##// END OF EJS Templates
perfrevlogwrite: use progress helper on modern hg...
Martin von Zweigbergk -
r41191:f36fd52d default
parent child Browse files
Show More
@@ -1,2662 +1,2675
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance'''
2 '''helper extension to measure performance'''
3
3
4 # "historical portability" policy of perf.py:
4 # "historical portability" policy of perf.py:
5 #
5 #
6 # We have to do:
6 # We have to do:
7 # - make perf.py "loadable" with as wide Mercurial version as possible
7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 # This doesn't mean that perf commands work correctly with that Mercurial.
8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 # - make historical perf command work correctly with as wide Mercurial
10 # - make historical perf command work correctly with as wide Mercurial
11 # version as possible
11 # version as possible
12 #
12 #
13 # We have to do, if possible with reasonable cost:
13 # We have to do, if possible with reasonable cost:
14 # - make recent perf command for historical feature work correctly
14 # - make recent perf command for historical feature work correctly
15 # with early Mercurial
15 # with early Mercurial
16 #
16 #
17 # We don't have to do:
17 # We don't have to do:
18 # - make perf command for recent feature work correctly with early
18 # - make perf command for recent feature work correctly with early
19 # Mercurial
19 # Mercurial
20
20
21 from __future__ import absolute_import
21 from __future__ import absolute_import
22 import contextlib
22 import contextlib
23 import functools
23 import functools
24 import gc
24 import gc
25 import os
25 import os
26 import random
26 import random
27 import shutil
27 import shutil
28 import struct
28 import struct
29 import sys
29 import sys
30 import tempfile
30 import tempfile
31 import threading
31 import threading
32 import time
32 import time
33 from mercurial import (
33 from mercurial import (
34 changegroup,
34 changegroup,
35 cmdutil,
35 cmdutil,
36 commands,
36 commands,
37 copies,
37 copies,
38 error,
38 error,
39 extensions,
39 extensions,
40 hg,
40 hg,
41 mdiff,
41 mdiff,
42 merge,
42 merge,
43 revlog,
43 revlog,
44 util,
44 util,
45 )
45 )
46
46
47 # for "historical portability":
47 # for "historical portability":
48 # try to import modules separately (in dict order), and ignore
48 # try to import modules separately (in dict order), and ignore
49 # failure, because these aren't available with early Mercurial
49 # failure, because these aren't available with early Mercurial
50 try:
50 try:
51 from mercurial import branchmap # since 2.5 (or bcee63733aad)
51 from mercurial import branchmap # since 2.5 (or bcee63733aad)
52 except ImportError:
52 except ImportError:
53 pass
53 pass
54 try:
54 try:
55 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
55 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
56 except ImportError:
56 except ImportError:
57 pass
57 pass
58 try:
58 try:
59 from mercurial import registrar # since 3.7 (or 37d50250b696)
59 from mercurial import registrar # since 3.7 (or 37d50250b696)
60 dir(registrar) # forcibly load it
60 dir(registrar) # forcibly load it
61 except ImportError:
61 except ImportError:
62 registrar = None
62 registrar = None
63 try:
63 try:
64 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
64 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
65 except ImportError:
65 except ImportError:
66 pass
66 pass
67 try:
67 try:
68 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
68 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
69 except ImportError:
69 except ImportError:
70 pass
70 pass
71 try:
71 try:
72 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
72 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
73 except ImportError:
73 except ImportError:
74 pass
74 pass
75
75
76
76
77 def identity(a):
77 def identity(a):
78 return a
78 return a
79
79
80 try:
80 try:
81 from mercurial import pycompat
81 from mercurial import pycompat
82 getargspec = pycompat.getargspec # added to module after 4.5
82 getargspec = pycompat.getargspec # added to module after 4.5
83 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
83 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
84 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
84 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
85 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
85 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
86 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
86 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
87 if pycompat.ispy3:
87 if pycompat.ispy3:
88 _maxint = sys.maxsize # per py3 docs for replacing maxint
88 _maxint = sys.maxsize # per py3 docs for replacing maxint
89 else:
89 else:
90 _maxint = sys.maxint
90 _maxint = sys.maxint
91 except (ImportError, AttributeError):
91 except (ImportError, AttributeError):
92 import inspect
92 import inspect
93 getargspec = inspect.getargspec
93 getargspec = inspect.getargspec
94 _byteskwargs = identity
94 _byteskwargs = identity
95 fsencode = identity # no py3 support
95 fsencode = identity # no py3 support
96 _maxint = sys.maxint # no py3 support
96 _maxint = sys.maxint # no py3 support
97 _sysstr = lambda x: x # no py3 support
97 _sysstr = lambda x: x # no py3 support
98 _xrange = xrange
98 _xrange = xrange
99
99
100 try:
100 try:
101 # 4.7+
101 # 4.7+
102 queue = pycompat.queue.Queue
102 queue = pycompat.queue.Queue
103 except (AttributeError, ImportError):
103 except (AttributeError, ImportError):
104 # <4.7.
104 # <4.7.
105 try:
105 try:
106 queue = pycompat.queue
106 queue = pycompat.queue
107 except (AttributeError, ImportError):
107 except (AttributeError, ImportError):
108 queue = util.queue
108 queue = util.queue
109
109
110 try:
110 try:
111 from mercurial import logcmdutil
111 from mercurial import logcmdutil
112 makelogtemplater = logcmdutil.maketemplater
112 makelogtemplater = logcmdutil.maketemplater
113 except (AttributeError, ImportError):
113 except (AttributeError, ImportError):
114 try:
114 try:
115 makelogtemplater = cmdutil.makelogtemplater
115 makelogtemplater = cmdutil.makelogtemplater
116 except (AttributeError, ImportError):
116 except (AttributeError, ImportError):
117 makelogtemplater = None
117 makelogtemplater = None
118
118
119 # for "historical portability":
119 # for "historical portability":
120 # define util.safehasattr forcibly, because util.safehasattr has been
120 # define util.safehasattr forcibly, because util.safehasattr has been
121 # available since 1.9.3 (or 94b200a11cf7)
121 # available since 1.9.3 (or 94b200a11cf7)
122 _undefined = object()
122 _undefined = object()
123 def safehasattr(thing, attr):
123 def safehasattr(thing, attr):
124 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
124 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
125 setattr(util, 'safehasattr', safehasattr)
125 setattr(util, 'safehasattr', safehasattr)
126
126
127 # for "historical portability":
127 # for "historical portability":
128 # define util.timer forcibly, because util.timer has been available
128 # define util.timer forcibly, because util.timer has been available
129 # since ae5d60bb70c9
129 # since ae5d60bb70c9
130 if safehasattr(time, 'perf_counter'):
130 if safehasattr(time, 'perf_counter'):
131 util.timer = time.perf_counter
131 util.timer = time.perf_counter
132 elif os.name == b'nt':
132 elif os.name == b'nt':
133 util.timer = time.clock
133 util.timer = time.clock
134 else:
134 else:
135 util.timer = time.time
135 util.timer = time.time
136
136
137 # for "historical portability":
137 # for "historical portability":
138 # use locally defined empty option list, if formatteropts isn't
138 # use locally defined empty option list, if formatteropts isn't
139 # available, because commands.formatteropts has been available since
139 # available, because commands.formatteropts has been available since
140 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
140 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
141 # available since 2.2 (or ae5f92e154d3)
141 # available since 2.2 (or ae5f92e154d3)
142 formatteropts = getattr(cmdutil, "formatteropts",
142 formatteropts = getattr(cmdutil, "formatteropts",
143 getattr(commands, "formatteropts", []))
143 getattr(commands, "formatteropts", []))
144
144
145 # for "historical portability":
145 # for "historical portability":
146 # use locally defined option list, if debugrevlogopts isn't available,
146 # use locally defined option list, if debugrevlogopts isn't available,
147 # because commands.debugrevlogopts has been available since 3.7 (or
147 # because commands.debugrevlogopts has been available since 3.7 (or
148 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
148 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
149 # since 1.9 (or a79fea6b3e77).
149 # since 1.9 (or a79fea6b3e77).
150 revlogopts = getattr(cmdutil, "debugrevlogopts",
150 revlogopts = getattr(cmdutil, "debugrevlogopts",
151 getattr(commands, "debugrevlogopts", [
151 getattr(commands, "debugrevlogopts", [
152 (b'c', b'changelog', False, (b'open changelog')),
152 (b'c', b'changelog', False, (b'open changelog')),
153 (b'm', b'manifest', False, (b'open manifest')),
153 (b'm', b'manifest', False, (b'open manifest')),
154 (b'', b'dir', False, (b'open directory manifest')),
154 (b'', b'dir', False, (b'open directory manifest')),
155 ]))
155 ]))
156
156
157 cmdtable = {}
157 cmdtable = {}
158
158
159 # for "historical portability":
159 # for "historical portability":
160 # define parsealiases locally, because cmdutil.parsealiases has been
160 # define parsealiases locally, because cmdutil.parsealiases has been
161 # available since 1.5 (or 6252852b4332)
161 # available since 1.5 (or 6252852b4332)
162 def parsealiases(cmd):
162 def parsealiases(cmd):
163 return cmd.split(b"|")
163 return cmd.split(b"|")
164
164
165 if safehasattr(registrar, 'command'):
165 if safehasattr(registrar, 'command'):
166 command = registrar.command(cmdtable)
166 command = registrar.command(cmdtable)
167 elif safehasattr(cmdutil, 'command'):
167 elif safehasattr(cmdutil, 'command'):
168 command = cmdutil.command(cmdtable)
168 command = cmdutil.command(cmdtable)
169 if b'norepo' not in getargspec(command).args:
169 if b'norepo' not in getargspec(command).args:
170 # for "historical portability":
170 # for "historical portability":
171 # wrap original cmdutil.command, because "norepo" option has
171 # wrap original cmdutil.command, because "norepo" option has
172 # been available since 3.1 (or 75a96326cecb)
172 # been available since 3.1 (or 75a96326cecb)
173 _command = command
173 _command = command
174 def command(name, options=(), synopsis=None, norepo=False):
174 def command(name, options=(), synopsis=None, norepo=False):
175 if norepo:
175 if norepo:
176 commands.norepo += b' %s' % b' '.join(parsealiases(name))
176 commands.norepo += b' %s' % b' '.join(parsealiases(name))
177 return _command(name, list(options), synopsis)
177 return _command(name, list(options), synopsis)
178 else:
178 else:
179 # for "historical portability":
179 # for "historical portability":
180 # define "@command" annotation locally, because cmdutil.command
180 # define "@command" annotation locally, because cmdutil.command
181 # has been available since 1.9 (or 2daa5179e73f)
181 # has been available since 1.9 (or 2daa5179e73f)
182 def command(name, options=(), synopsis=None, norepo=False):
182 def command(name, options=(), synopsis=None, norepo=False):
183 def decorator(func):
183 def decorator(func):
184 if synopsis:
184 if synopsis:
185 cmdtable[name] = func, list(options), synopsis
185 cmdtable[name] = func, list(options), synopsis
186 else:
186 else:
187 cmdtable[name] = func, list(options)
187 cmdtable[name] = func, list(options)
188 if norepo:
188 if norepo:
189 commands.norepo += b' %s' % b' '.join(parsealiases(name))
189 commands.norepo += b' %s' % b' '.join(parsealiases(name))
190 return func
190 return func
191 return decorator
191 return decorator
192
192
193 try:
193 try:
194 import mercurial.registrar
194 import mercurial.registrar
195 import mercurial.configitems
195 import mercurial.configitems
196 configtable = {}
196 configtable = {}
197 configitem = mercurial.registrar.configitem(configtable)
197 configitem = mercurial.registrar.configitem(configtable)
198 configitem(b'perf', b'presleep',
198 configitem(b'perf', b'presleep',
199 default=mercurial.configitems.dynamicdefault,
199 default=mercurial.configitems.dynamicdefault,
200 )
200 )
201 configitem(b'perf', b'stub',
201 configitem(b'perf', b'stub',
202 default=mercurial.configitems.dynamicdefault,
202 default=mercurial.configitems.dynamicdefault,
203 )
203 )
204 configitem(b'perf', b'parentscount',
204 configitem(b'perf', b'parentscount',
205 default=mercurial.configitems.dynamicdefault,
205 default=mercurial.configitems.dynamicdefault,
206 )
206 )
207 configitem(b'perf', b'all-timing',
207 configitem(b'perf', b'all-timing',
208 default=mercurial.configitems.dynamicdefault,
208 default=mercurial.configitems.dynamicdefault,
209 )
209 )
210 except (ImportError, AttributeError):
210 except (ImportError, AttributeError):
211 pass
211 pass
212
212
213 def getlen(ui):
213 def getlen(ui):
214 if ui.configbool(b"perf", b"stub", False):
214 if ui.configbool(b"perf", b"stub", False):
215 return lambda x: 1
215 return lambda x: 1
216 return len
216 return len
217
217
218 def gettimer(ui, opts=None):
218 def gettimer(ui, opts=None):
219 """return a timer function and formatter: (timer, formatter)
219 """return a timer function and formatter: (timer, formatter)
220
220
221 This function exists to gather the creation of formatter in a single
221 This function exists to gather the creation of formatter in a single
222 place instead of duplicating it in all performance commands."""
222 place instead of duplicating it in all performance commands."""
223
223
224 # enforce an idle period before execution to counteract power management
224 # enforce an idle period before execution to counteract power management
225 # experimental config: perf.presleep
225 # experimental config: perf.presleep
226 time.sleep(getint(ui, b"perf", b"presleep", 1))
226 time.sleep(getint(ui, b"perf", b"presleep", 1))
227
227
228 if opts is None:
228 if opts is None:
229 opts = {}
229 opts = {}
230 # redirect all to stderr unless buffer api is in use
230 # redirect all to stderr unless buffer api is in use
231 if not ui._buffers:
231 if not ui._buffers:
232 ui = ui.copy()
232 ui = ui.copy()
233 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
233 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
234 if uifout:
234 if uifout:
235 # for "historical portability":
235 # for "historical portability":
236 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
236 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
237 uifout.set(ui.ferr)
237 uifout.set(ui.ferr)
238
238
239 # get a formatter
239 # get a formatter
240 uiformatter = getattr(ui, 'formatter', None)
240 uiformatter = getattr(ui, 'formatter', None)
241 if uiformatter:
241 if uiformatter:
242 fm = uiformatter(b'perf', opts)
242 fm = uiformatter(b'perf', opts)
243 else:
243 else:
244 # for "historical portability":
244 # for "historical portability":
245 # define formatter locally, because ui.formatter has been
245 # define formatter locally, because ui.formatter has been
246 # available since 2.2 (or ae5f92e154d3)
246 # available since 2.2 (or ae5f92e154d3)
247 from mercurial import node
247 from mercurial import node
248 class defaultformatter(object):
248 class defaultformatter(object):
249 """Minimized composition of baseformatter and plainformatter
249 """Minimized composition of baseformatter and plainformatter
250 """
250 """
251 def __init__(self, ui, topic, opts):
251 def __init__(self, ui, topic, opts):
252 self._ui = ui
252 self._ui = ui
253 if ui.debugflag:
253 if ui.debugflag:
254 self.hexfunc = node.hex
254 self.hexfunc = node.hex
255 else:
255 else:
256 self.hexfunc = node.short
256 self.hexfunc = node.short
257 def __nonzero__(self):
257 def __nonzero__(self):
258 return False
258 return False
259 __bool__ = __nonzero__
259 __bool__ = __nonzero__
260 def startitem(self):
260 def startitem(self):
261 pass
261 pass
262 def data(self, **data):
262 def data(self, **data):
263 pass
263 pass
264 def write(self, fields, deftext, *fielddata, **opts):
264 def write(self, fields, deftext, *fielddata, **opts):
265 self._ui.write(deftext % fielddata, **opts)
265 self._ui.write(deftext % fielddata, **opts)
266 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
266 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
267 if cond:
267 if cond:
268 self._ui.write(deftext % fielddata, **opts)
268 self._ui.write(deftext % fielddata, **opts)
269 def plain(self, text, **opts):
269 def plain(self, text, **opts):
270 self._ui.write(text, **opts)
270 self._ui.write(text, **opts)
271 def end(self):
271 def end(self):
272 pass
272 pass
273 fm = defaultformatter(ui, b'perf', opts)
273 fm = defaultformatter(ui, b'perf', opts)
274
274
275 # stub function, runs code only once instead of in a loop
275 # stub function, runs code only once instead of in a loop
276 # experimental config: perf.stub
276 # experimental config: perf.stub
277 if ui.configbool(b"perf", b"stub", False):
277 if ui.configbool(b"perf", b"stub", False):
278 return functools.partial(stub_timer, fm), fm
278 return functools.partial(stub_timer, fm), fm
279
279
280 # experimental config: perf.all-timing
280 # experimental config: perf.all-timing
281 displayall = ui.configbool(b"perf", b"all-timing", False)
281 displayall = ui.configbool(b"perf", b"all-timing", False)
282 return functools.partial(_timer, fm, displayall=displayall), fm
282 return functools.partial(_timer, fm, displayall=displayall), fm
283
283
284 def stub_timer(fm, func, setup=None, title=None):
284 def stub_timer(fm, func, setup=None, title=None):
285 if setup is not None:
285 if setup is not None:
286 setup()
286 setup()
287 func()
287 func()
288
288
289 @contextlib.contextmanager
289 @contextlib.contextmanager
290 def timeone():
290 def timeone():
291 r = []
291 r = []
292 ostart = os.times()
292 ostart = os.times()
293 cstart = util.timer()
293 cstart = util.timer()
294 yield r
294 yield r
295 cstop = util.timer()
295 cstop = util.timer()
296 ostop = os.times()
296 ostop = os.times()
297 a, b = ostart, ostop
297 a, b = ostart, ostop
298 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
298 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
299
299
300 def _timer(fm, func, setup=None, title=None, displayall=False):
300 def _timer(fm, func, setup=None, title=None, displayall=False):
301 gc.collect()
301 gc.collect()
302 results = []
302 results = []
303 begin = util.timer()
303 begin = util.timer()
304 count = 0
304 count = 0
305 while True:
305 while True:
306 if setup is not None:
306 if setup is not None:
307 setup()
307 setup()
308 with timeone() as item:
308 with timeone() as item:
309 r = func()
309 r = func()
310 count += 1
310 count += 1
311 results.append(item[0])
311 results.append(item[0])
312 cstop = util.timer()
312 cstop = util.timer()
313 if cstop - begin > 3 and count >= 100:
313 if cstop - begin > 3 and count >= 100:
314 break
314 break
315 if cstop - begin > 10 and count >= 3:
315 if cstop - begin > 10 and count >= 3:
316 break
316 break
317
317
318 formatone(fm, results, title=title, result=r,
318 formatone(fm, results, title=title, result=r,
319 displayall=displayall)
319 displayall=displayall)
320
320
321 def formatone(fm, timings, title=None, result=None, displayall=False):
321 def formatone(fm, timings, title=None, result=None, displayall=False):
322
322
323 count = len(timings)
323 count = len(timings)
324
324
325 fm.startitem()
325 fm.startitem()
326
326
327 if title:
327 if title:
328 fm.write(b'title', b'! %s\n', title)
328 fm.write(b'title', b'! %s\n', title)
329 if result:
329 if result:
330 fm.write(b'result', b'! result: %s\n', result)
330 fm.write(b'result', b'! result: %s\n', result)
331 def display(role, entry):
331 def display(role, entry):
332 prefix = b''
332 prefix = b''
333 if role != b'best':
333 if role != b'best':
334 prefix = b'%s.' % role
334 prefix = b'%s.' % role
335 fm.plain(b'!')
335 fm.plain(b'!')
336 fm.write(prefix + b'wall', b' wall %f', entry[0])
336 fm.write(prefix + b'wall', b' wall %f', entry[0])
337 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
337 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
338 fm.write(prefix + b'user', b' user %f', entry[1])
338 fm.write(prefix + b'user', b' user %f', entry[1])
339 fm.write(prefix + b'sys', b' sys %f', entry[2])
339 fm.write(prefix + b'sys', b' sys %f', entry[2])
340 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
340 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
341 fm.plain(b'\n')
341 fm.plain(b'\n')
342 timings.sort()
342 timings.sort()
343 min_val = timings[0]
343 min_val = timings[0]
344 display(b'best', min_val)
344 display(b'best', min_val)
345 if displayall:
345 if displayall:
346 max_val = timings[-1]
346 max_val = timings[-1]
347 display(b'max', max_val)
347 display(b'max', max_val)
348 avg = tuple([sum(x) / count for x in zip(*timings)])
348 avg = tuple([sum(x) / count for x in zip(*timings)])
349 display(b'avg', avg)
349 display(b'avg', avg)
350 median = timings[len(timings) // 2]
350 median = timings[len(timings) // 2]
351 display(b'median', median)
351 display(b'median', median)
352
352
353 # utilities for historical portability
353 # utilities for historical portability
354
354
355 def getint(ui, section, name, default):
355 def getint(ui, section, name, default):
356 # for "historical portability":
356 # for "historical portability":
357 # ui.configint has been available since 1.9 (or fa2b596db182)
357 # ui.configint has been available since 1.9 (or fa2b596db182)
358 v = ui.config(section, name, None)
358 v = ui.config(section, name, None)
359 if v is None:
359 if v is None:
360 return default
360 return default
361 try:
361 try:
362 return int(v)
362 return int(v)
363 except ValueError:
363 except ValueError:
364 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
364 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
365 % (section, name, v))
365 % (section, name, v))
366
366
367 def safeattrsetter(obj, name, ignoremissing=False):
367 def safeattrsetter(obj, name, ignoremissing=False):
368 """Ensure that 'obj' has 'name' attribute before subsequent setattr
368 """Ensure that 'obj' has 'name' attribute before subsequent setattr
369
369
370 This function is aborted, if 'obj' doesn't have 'name' attribute
370 This function is aborted, if 'obj' doesn't have 'name' attribute
371 at runtime. This avoids overlooking removal of an attribute, which
371 at runtime. This avoids overlooking removal of an attribute, which
372 breaks assumption of performance measurement, in the future.
372 breaks assumption of performance measurement, in the future.
373
373
374 This function returns the object to (1) assign a new value, and
374 This function returns the object to (1) assign a new value, and
375 (2) restore an original value to the attribute.
375 (2) restore an original value to the attribute.
376
376
377 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
377 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
378 abortion, and this function returns None. This is useful to
378 abortion, and this function returns None. This is useful to
379 examine an attribute, which isn't ensured in all Mercurial
379 examine an attribute, which isn't ensured in all Mercurial
380 versions.
380 versions.
381 """
381 """
382 if not util.safehasattr(obj, name):
382 if not util.safehasattr(obj, name):
383 if ignoremissing:
383 if ignoremissing:
384 return None
384 return None
385 raise error.Abort((b"missing attribute %s of %s might break assumption"
385 raise error.Abort((b"missing attribute %s of %s might break assumption"
386 b" of performance measurement") % (name, obj))
386 b" of performance measurement") % (name, obj))
387
387
388 origvalue = getattr(obj, _sysstr(name))
388 origvalue = getattr(obj, _sysstr(name))
389 class attrutil(object):
389 class attrutil(object):
390 def set(self, newvalue):
390 def set(self, newvalue):
391 setattr(obj, _sysstr(name), newvalue)
391 setattr(obj, _sysstr(name), newvalue)
392 def restore(self):
392 def restore(self):
393 setattr(obj, _sysstr(name), origvalue)
393 setattr(obj, _sysstr(name), origvalue)
394
394
395 return attrutil()
395 return attrutil()
396
396
397 # utilities to examine each internal API changes
397 # utilities to examine each internal API changes
398
398
399 def getbranchmapsubsettable():
399 def getbranchmapsubsettable():
400 # for "historical portability":
400 # for "historical portability":
401 # subsettable is defined in:
401 # subsettable is defined in:
402 # - branchmap since 2.9 (or 175c6fd8cacc)
402 # - branchmap since 2.9 (or 175c6fd8cacc)
403 # - repoview since 2.5 (or 59a9f18d4587)
403 # - repoview since 2.5 (or 59a9f18d4587)
404 for mod in (branchmap, repoview):
404 for mod in (branchmap, repoview):
405 subsettable = getattr(mod, 'subsettable', None)
405 subsettable = getattr(mod, 'subsettable', None)
406 if subsettable:
406 if subsettable:
407 return subsettable
407 return subsettable
408
408
409 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
409 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
410 # branchmap and repoview modules exist, but subsettable attribute
410 # branchmap and repoview modules exist, but subsettable attribute
411 # doesn't)
411 # doesn't)
412 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
412 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
413 hint=b"use 2.5 or later")
413 hint=b"use 2.5 or later")
414
414
415 def getsvfs(repo):
415 def getsvfs(repo):
416 """Return appropriate object to access files under .hg/store
416 """Return appropriate object to access files under .hg/store
417 """
417 """
418 # for "historical portability":
418 # for "historical portability":
419 # repo.svfs has been available since 2.3 (or 7034365089bf)
419 # repo.svfs has been available since 2.3 (or 7034365089bf)
420 svfs = getattr(repo, 'svfs', None)
420 svfs = getattr(repo, 'svfs', None)
421 if svfs:
421 if svfs:
422 return svfs
422 return svfs
423 else:
423 else:
424 return getattr(repo, 'sopener')
424 return getattr(repo, 'sopener')
425
425
426 def getvfs(repo):
426 def getvfs(repo):
427 """Return appropriate object to access files under .hg
427 """Return appropriate object to access files under .hg
428 """
428 """
429 # for "historical portability":
429 # for "historical portability":
430 # repo.vfs has been available since 2.3 (or 7034365089bf)
430 # repo.vfs has been available since 2.3 (or 7034365089bf)
431 vfs = getattr(repo, 'vfs', None)
431 vfs = getattr(repo, 'vfs', None)
432 if vfs:
432 if vfs:
433 return vfs
433 return vfs
434 else:
434 else:
435 return getattr(repo, 'opener')
435 return getattr(repo, 'opener')
436
436
437 def repocleartagscachefunc(repo):
437 def repocleartagscachefunc(repo):
438 """Return the function to clear tags cache according to repo internal API
438 """Return the function to clear tags cache according to repo internal API
439 """
439 """
440 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
440 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
441 # in this case, setattr(repo, '_tagscache', None) or so isn't
441 # in this case, setattr(repo, '_tagscache', None) or so isn't
442 # correct way to clear tags cache, because existing code paths
442 # correct way to clear tags cache, because existing code paths
443 # expect _tagscache to be a structured object.
443 # expect _tagscache to be a structured object.
444 def clearcache():
444 def clearcache():
445 # _tagscache has been filteredpropertycache since 2.5 (or
445 # _tagscache has been filteredpropertycache since 2.5 (or
446 # 98c867ac1330), and delattr() can't work in such case
446 # 98c867ac1330), and delattr() can't work in such case
447 if b'_tagscache' in vars(repo):
447 if b'_tagscache' in vars(repo):
448 del repo.__dict__[b'_tagscache']
448 del repo.__dict__[b'_tagscache']
449 return clearcache
449 return clearcache
450
450
451 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
451 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
452 if repotags: # since 1.4 (or 5614a628d173)
452 if repotags: # since 1.4 (or 5614a628d173)
453 return lambda : repotags.set(None)
453 return lambda : repotags.set(None)
454
454
455 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
455 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
456 if repotagscache: # since 0.6 (or d7df759d0e97)
456 if repotagscache: # since 0.6 (or d7df759d0e97)
457 return lambda : repotagscache.set(None)
457 return lambda : repotagscache.set(None)
458
458
459 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
459 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
460 # this point, but it isn't so problematic, because:
460 # this point, but it isn't so problematic, because:
461 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
461 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
462 # in perftags() causes failure soon
462 # in perftags() causes failure soon
463 # - perf.py itself has been available since 1.1 (or eb240755386d)
463 # - perf.py itself has been available since 1.1 (or eb240755386d)
464 raise error.Abort((b"tags API of this hg command is unknown"))
464 raise error.Abort((b"tags API of this hg command is unknown"))
465
465
466 # utilities to clear cache
466 # utilities to clear cache
467
467
468 def clearfilecache(obj, attrname):
468 def clearfilecache(obj, attrname):
469 unfiltered = getattr(obj, 'unfiltered', None)
469 unfiltered = getattr(obj, 'unfiltered', None)
470 if unfiltered is not None:
470 if unfiltered is not None:
471 obj = obj.unfiltered()
471 obj = obj.unfiltered()
472 if attrname in vars(obj):
472 if attrname in vars(obj):
473 delattr(obj, attrname)
473 delattr(obj, attrname)
474 obj._filecache.pop(attrname, None)
474 obj._filecache.pop(attrname, None)
475
475
476 def clearchangelog(repo):
476 def clearchangelog(repo):
477 if repo is not repo.unfiltered():
477 if repo is not repo.unfiltered():
478 object.__setattr__(repo, r'_clcachekey', None)
478 object.__setattr__(repo, r'_clcachekey', None)
479 object.__setattr__(repo, r'_clcache', None)
479 object.__setattr__(repo, r'_clcache', None)
480 clearfilecache(repo.unfiltered(), 'changelog')
480 clearfilecache(repo.unfiltered(), 'changelog')
481
481
482 # perf commands
482 # perf commands
483
483
484 @command(b'perfwalk', formatteropts)
484 @command(b'perfwalk', formatteropts)
485 def perfwalk(ui, repo, *pats, **opts):
485 def perfwalk(ui, repo, *pats, **opts):
486 opts = _byteskwargs(opts)
486 opts = _byteskwargs(opts)
487 timer, fm = gettimer(ui, opts)
487 timer, fm = gettimer(ui, opts)
488 m = scmutil.match(repo[None], pats, {})
488 m = scmutil.match(repo[None], pats, {})
489 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
489 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
490 ignored=False))))
490 ignored=False))))
491 fm.end()
491 fm.end()
492
492
493 @command(b'perfannotate', formatteropts)
493 @command(b'perfannotate', formatteropts)
494 def perfannotate(ui, repo, f, **opts):
494 def perfannotate(ui, repo, f, **opts):
495 opts = _byteskwargs(opts)
495 opts = _byteskwargs(opts)
496 timer, fm = gettimer(ui, opts)
496 timer, fm = gettimer(ui, opts)
497 fc = repo[b'.'][f]
497 fc = repo[b'.'][f]
498 timer(lambda: len(fc.annotate(True)))
498 timer(lambda: len(fc.annotate(True)))
499 fm.end()
499 fm.end()
500
500
501 @command(b'perfstatus',
501 @command(b'perfstatus',
502 [(b'u', b'unknown', False,
502 [(b'u', b'unknown', False,
503 b'ask status to look for unknown files')] + formatteropts)
503 b'ask status to look for unknown files')] + formatteropts)
504 def perfstatus(ui, repo, **opts):
504 def perfstatus(ui, repo, **opts):
505 opts = _byteskwargs(opts)
505 opts = _byteskwargs(opts)
506 #m = match.always(repo.root, repo.getcwd())
506 #m = match.always(repo.root, repo.getcwd())
507 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
507 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
508 # False))))
508 # False))))
509 timer, fm = gettimer(ui, opts)
509 timer, fm = gettimer(ui, opts)
510 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
510 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
511 fm.end()
511 fm.end()
512
512
513 @command(b'perfaddremove', formatteropts)
513 @command(b'perfaddremove', formatteropts)
514 def perfaddremove(ui, repo, **opts):
514 def perfaddremove(ui, repo, **opts):
515 opts = _byteskwargs(opts)
515 opts = _byteskwargs(opts)
516 timer, fm = gettimer(ui, opts)
516 timer, fm = gettimer(ui, opts)
517 try:
517 try:
518 oldquiet = repo.ui.quiet
518 oldquiet = repo.ui.quiet
519 repo.ui.quiet = True
519 repo.ui.quiet = True
520 matcher = scmutil.match(repo[None])
520 matcher = scmutil.match(repo[None])
521 opts[b'dry_run'] = True
521 opts[b'dry_run'] = True
522 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
522 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
523 finally:
523 finally:
524 repo.ui.quiet = oldquiet
524 repo.ui.quiet = oldquiet
525 fm.end()
525 fm.end()
526
526
527 def clearcaches(cl):
527 def clearcaches(cl):
528 # behave somewhat consistently across internal API changes
528 # behave somewhat consistently across internal API changes
529 if util.safehasattr(cl, b'clearcaches'):
529 if util.safehasattr(cl, b'clearcaches'):
530 cl.clearcaches()
530 cl.clearcaches()
531 elif util.safehasattr(cl, b'_nodecache'):
531 elif util.safehasattr(cl, b'_nodecache'):
532 from mercurial.node import nullid, nullrev
532 from mercurial.node import nullid, nullrev
533 cl._nodecache = {nullid: nullrev}
533 cl._nodecache = {nullid: nullrev}
534 cl._nodepos = None
534 cl._nodepos = None
535
535
536 @command(b'perfheads', formatteropts)
536 @command(b'perfheads', formatteropts)
537 def perfheads(ui, repo, **opts):
537 def perfheads(ui, repo, **opts):
538 opts = _byteskwargs(opts)
538 opts = _byteskwargs(opts)
539 timer, fm = gettimer(ui, opts)
539 timer, fm = gettimer(ui, opts)
540 cl = repo.changelog
540 cl = repo.changelog
541 def d():
541 def d():
542 len(cl.headrevs())
542 len(cl.headrevs())
543 clearcaches(cl)
543 clearcaches(cl)
544 timer(d)
544 timer(d)
545 fm.end()
545 fm.end()
546
546
547 @command(b'perftags', formatteropts+
547 @command(b'perftags', formatteropts+
548 [
548 [
549 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
549 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
550 ])
550 ])
551 def perftags(ui, repo, **opts):
551 def perftags(ui, repo, **opts):
552 opts = _byteskwargs(opts)
552 opts = _byteskwargs(opts)
553 timer, fm = gettimer(ui, opts)
553 timer, fm = gettimer(ui, opts)
554 repocleartagscache = repocleartagscachefunc(repo)
554 repocleartagscache = repocleartagscachefunc(repo)
555 clearrevlogs = opts[b'clear_revlogs']
555 clearrevlogs = opts[b'clear_revlogs']
556 def s():
556 def s():
557 if clearrevlogs:
557 if clearrevlogs:
558 clearchangelog(repo)
558 clearchangelog(repo)
559 clearfilecache(repo.unfiltered(), 'manifest')
559 clearfilecache(repo.unfiltered(), 'manifest')
560 repocleartagscache()
560 repocleartagscache()
561 def t():
561 def t():
562 return len(repo.tags())
562 return len(repo.tags())
563 timer(t, setup=s)
563 timer(t, setup=s)
564 fm.end()
564 fm.end()
565
565
566 @command(b'perfancestors', formatteropts)
566 @command(b'perfancestors', formatteropts)
567 def perfancestors(ui, repo, **opts):
567 def perfancestors(ui, repo, **opts):
568 opts = _byteskwargs(opts)
568 opts = _byteskwargs(opts)
569 timer, fm = gettimer(ui, opts)
569 timer, fm = gettimer(ui, opts)
570 heads = repo.changelog.headrevs()
570 heads = repo.changelog.headrevs()
571 def d():
571 def d():
572 for a in repo.changelog.ancestors(heads):
572 for a in repo.changelog.ancestors(heads):
573 pass
573 pass
574 timer(d)
574 timer(d)
575 fm.end()
575 fm.end()
576
576
577 @command(b'perfancestorset', formatteropts)
577 @command(b'perfancestorset', formatteropts)
578 def perfancestorset(ui, repo, revset, **opts):
578 def perfancestorset(ui, repo, revset, **opts):
579 opts = _byteskwargs(opts)
579 opts = _byteskwargs(opts)
580 timer, fm = gettimer(ui, opts)
580 timer, fm = gettimer(ui, opts)
581 revs = repo.revs(revset)
581 revs = repo.revs(revset)
582 heads = repo.changelog.headrevs()
582 heads = repo.changelog.headrevs()
583 def d():
583 def d():
584 s = repo.changelog.ancestors(heads)
584 s = repo.changelog.ancestors(heads)
585 for rev in revs:
585 for rev in revs:
586 rev in s
586 rev in s
587 timer(d)
587 timer(d)
588 fm.end()
588 fm.end()
589
589
590 @command(b'perfdiscovery', formatteropts, b'PATH')
590 @command(b'perfdiscovery', formatteropts, b'PATH')
591 def perfdiscovery(ui, repo, path, **opts):
591 def perfdiscovery(ui, repo, path, **opts):
592 """benchmark discovery between local repo and the peer at given path
592 """benchmark discovery between local repo and the peer at given path
593 """
593 """
594 repos = [repo, None]
594 repos = [repo, None]
595 timer, fm = gettimer(ui, opts)
595 timer, fm = gettimer(ui, opts)
596 path = ui.expandpath(path)
596 path = ui.expandpath(path)
597
597
598 def s():
598 def s():
599 repos[1] = hg.peer(ui, opts, path)
599 repos[1] = hg.peer(ui, opts, path)
600 def d():
600 def d():
601 setdiscovery.findcommonheads(ui, *repos)
601 setdiscovery.findcommonheads(ui, *repos)
602 timer(d, setup=s)
602 timer(d, setup=s)
603 fm.end()
603 fm.end()
604
604
605 @command(b'perfbookmarks', formatteropts +
605 @command(b'perfbookmarks', formatteropts +
606 [
606 [
607 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
607 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
608 ])
608 ])
609 def perfbookmarks(ui, repo, **opts):
609 def perfbookmarks(ui, repo, **opts):
610 """benchmark parsing bookmarks from disk to memory"""
610 """benchmark parsing bookmarks from disk to memory"""
611 opts = _byteskwargs(opts)
611 opts = _byteskwargs(opts)
612 timer, fm = gettimer(ui, opts)
612 timer, fm = gettimer(ui, opts)
613
613
614 clearrevlogs = opts[b'clear_revlogs']
614 clearrevlogs = opts[b'clear_revlogs']
615 def s():
615 def s():
616 if clearrevlogs:
616 if clearrevlogs:
617 clearchangelog(repo)
617 clearchangelog(repo)
618 clearfilecache(repo, b'_bookmarks')
618 clearfilecache(repo, b'_bookmarks')
619 def d():
619 def d():
620 repo._bookmarks
620 repo._bookmarks
621 timer(d, setup=s)
621 timer(d, setup=s)
622 fm.end()
622 fm.end()
623
623
624 @command(b'perfbundleread', formatteropts, b'BUNDLE')
624 @command(b'perfbundleread', formatteropts, b'BUNDLE')
625 def perfbundleread(ui, repo, bundlepath, **opts):
625 def perfbundleread(ui, repo, bundlepath, **opts):
626 """Benchmark reading of bundle files.
626 """Benchmark reading of bundle files.
627
627
628 This command is meant to isolate the I/O part of bundle reading as
628 This command is meant to isolate the I/O part of bundle reading as
629 much as possible.
629 much as possible.
630 """
630 """
631 from mercurial import (
631 from mercurial import (
632 bundle2,
632 bundle2,
633 exchange,
633 exchange,
634 streamclone,
634 streamclone,
635 )
635 )
636
636
637 opts = _byteskwargs(opts)
637 opts = _byteskwargs(opts)
638
638
639 def makebench(fn):
639 def makebench(fn):
640 def run():
640 def run():
641 with open(bundlepath, b'rb') as fh:
641 with open(bundlepath, b'rb') as fh:
642 bundle = exchange.readbundle(ui, fh, bundlepath)
642 bundle = exchange.readbundle(ui, fh, bundlepath)
643 fn(bundle)
643 fn(bundle)
644
644
645 return run
645 return run
646
646
647 def makereadnbytes(size):
647 def makereadnbytes(size):
648 def run():
648 def run():
649 with open(bundlepath, b'rb') as fh:
649 with open(bundlepath, b'rb') as fh:
650 bundle = exchange.readbundle(ui, fh, bundlepath)
650 bundle = exchange.readbundle(ui, fh, bundlepath)
651 while bundle.read(size):
651 while bundle.read(size):
652 pass
652 pass
653
653
654 return run
654 return run
655
655
656 def makestdioread(size):
656 def makestdioread(size):
657 def run():
657 def run():
658 with open(bundlepath, b'rb') as fh:
658 with open(bundlepath, b'rb') as fh:
659 while fh.read(size):
659 while fh.read(size):
660 pass
660 pass
661
661
662 return run
662 return run
663
663
664 # bundle1
664 # bundle1
665
665
666 def deltaiter(bundle):
666 def deltaiter(bundle):
667 for delta in bundle.deltaiter():
667 for delta in bundle.deltaiter():
668 pass
668 pass
669
669
670 def iterchunks(bundle):
670 def iterchunks(bundle):
671 for chunk in bundle.getchunks():
671 for chunk in bundle.getchunks():
672 pass
672 pass
673
673
674 # bundle2
674 # bundle2
675
675
676 def forwardchunks(bundle):
676 def forwardchunks(bundle):
677 for chunk in bundle._forwardchunks():
677 for chunk in bundle._forwardchunks():
678 pass
678 pass
679
679
680 def iterparts(bundle):
680 def iterparts(bundle):
681 for part in bundle.iterparts():
681 for part in bundle.iterparts():
682 pass
682 pass
683
683
684 def iterpartsseekable(bundle):
684 def iterpartsseekable(bundle):
685 for part in bundle.iterparts(seekable=True):
685 for part in bundle.iterparts(seekable=True):
686 pass
686 pass
687
687
688 def seek(bundle):
688 def seek(bundle):
689 for part in bundle.iterparts(seekable=True):
689 for part in bundle.iterparts(seekable=True):
690 part.seek(0, os.SEEK_END)
690 part.seek(0, os.SEEK_END)
691
691
692 def makepartreadnbytes(size):
692 def makepartreadnbytes(size):
693 def run():
693 def run():
694 with open(bundlepath, b'rb') as fh:
694 with open(bundlepath, b'rb') as fh:
695 bundle = exchange.readbundle(ui, fh, bundlepath)
695 bundle = exchange.readbundle(ui, fh, bundlepath)
696 for part in bundle.iterparts():
696 for part in bundle.iterparts():
697 while part.read(size):
697 while part.read(size):
698 pass
698 pass
699
699
700 return run
700 return run
701
701
702 benches = [
702 benches = [
703 (makestdioread(8192), b'read(8k)'),
703 (makestdioread(8192), b'read(8k)'),
704 (makestdioread(16384), b'read(16k)'),
704 (makestdioread(16384), b'read(16k)'),
705 (makestdioread(32768), b'read(32k)'),
705 (makestdioread(32768), b'read(32k)'),
706 (makestdioread(131072), b'read(128k)'),
706 (makestdioread(131072), b'read(128k)'),
707 ]
707 ]
708
708
709 with open(bundlepath, b'rb') as fh:
709 with open(bundlepath, b'rb') as fh:
710 bundle = exchange.readbundle(ui, fh, bundlepath)
710 bundle = exchange.readbundle(ui, fh, bundlepath)
711
711
712 if isinstance(bundle, changegroup.cg1unpacker):
712 if isinstance(bundle, changegroup.cg1unpacker):
713 benches.extend([
713 benches.extend([
714 (makebench(deltaiter), b'cg1 deltaiter()'),
714 (makebench(deltaiter), b'cg1 deltaiter()'),
715 (makebench(iterchunks), b'cg1 getchunks()'),
715 (makebench(iterchunks), b'cg1 getchunks()'),
716 (makereadnbytes(8192), b'cg1 read(8k)'),
716 (makereadnbytes(8192), b'cg1 read(8k)'),
717 (makereadnbytes(16384), b'cg1 read(16k)'),
717 (makereadnbytes(16384), b'cg1 read(16k)'),
718 (makereadnbytes(32768), b'cg1 read(32k)'),
718 (makereadnbytes(32768), b'cg1 read(32k)'),
719 (makereadnbytes(131072), b'cg1 read(128k)'),
719 (makereadnbytes(131072), b'cg1 read(128k)'),
720 ])
720 ])
721 elif isinstance(bundle, bundle2.unbundle20):
721 elif isinstance(bundle, bundle2.unbundle20):
722 benches.extend([
722 benches.extend([
723 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
723 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
724 (makebench(iterparts), b'bundle2 iterparts()'),
724 (makebench(iterparts), b'bundle2 iterparts()'),
725 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
725 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
726 (makebench(seek), b'bundle2 part seek()'),
726 (makebench(seek), b'bundle2 part seek()'),
727 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
727 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
728 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
728 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
729 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
729 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
730 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
730 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
731 ])
731 ])
732 elif isinstance(bundle, streamclone.streamcloneapplier):
732 elif isinstance(bundle, streamclone.streamcloneapplier):
733 raise error.Abort(b'stream clone bundles not supported')
733 raise error.Abort(b'stream clone bundles not supported')
734 else:
734 else:
735 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
735 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
736
736
737 for fn, title in benches:
737 for fn, title in benches:
738 timer, fm = gettimer(ui, opts)
738 timer, fm = gettimer(ui, opts)
739 timer(fn, title=title)
739 timer(fn, title=title)
740 fm.end()
740 fm.end()
741
741
742 @command(b'perfchangegroupchangelog', formatteropts +
742 @command(b'perfchangegroupchangelog', formatteropts +
743 [(b'', b'cgversion', b'02', b'changegroup version'),
743 [(b'', b'cgversion', b'02', b'changegroup version'),
744 (b'r', b'rev', b'', b'revisions to add to changegroup')])
744 (b'r', b'rev', b'', b'revisions to add to changegroup')])
745 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
745 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
746 """Benchmark producing a changelog group for a changegroup.
746 """Benchmark producing a changelog group for a changegroup.
747
747
748 This measures the time spent processing the changelog during a
748 This measures the time spent processing the changelog during a
749 bundle operation. This occurs during `hg bundle` and on a server
749 bundle operation. This occurs during `hg bundle` and on a server
750 processing a `getbundle` wire protocol request (handles clones
750 processing a `getbundle` wire protocol request (handles clones
751 and pull requests).
751 and pull requests).
752
752
753 By default, all revisions are added to the changegroup.
753 By default, all revisions are added to the changegroup.
754 """
754 """
755 opts = _byteskwargs(opts)
755 opts = _byteskwargs(opts)
756 cl = repo.changelog
756 cl = repo.changelog
757 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
757 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
758 bundler = changegroup.getbundler(cgversion, repo)
758 bundler = changegroup.getbundler(cgversion, repo)
759
759
760 def d():
760 def d():
761 state, chunks = bundler._generatechangelog(cl, nodes)
761 state, chunks = bundler._generatechangelog(cl, nodes)
762 for chunk in chunks:
762 for chunk in chunks:
763 pass
763 pass
764
764
765 timer, fm = gettimer(ui, opts)
765 timer, fm = gettimer(ui, opts)
766
766
767 # Terminal printing can interfere with timing. So disable it.
767 # Terminal printing can interfere with timing. So disable it.
768 with ui.configoverride({(b'progress', b'disable'): True}):
768 with ui.configoverride({(b'progress', b'disable'): True}):
769 timer(d)
769 timer(d)
770
770
771 fm.end()
771 fm.end()
772
772
773 @command(b'perfdirs', formatteropts)
773 @command(b'perfdirs', formatteropts)
774 def perfdirs(ui, repo, **opts):
774 def perfdirs(ui, repo, **opts):
775 opts = _byteskwargs(opts)
775 opts = _byteskwargs(opts)
776 timer, fm = gettimer(ui, opts)
776 timer, fm = gettimer(ui, opts)
777 dirstate = repo.dirstate
777 dirstate = repo.dirstate
778 b'a' in dirstate
778 b'a' in dirstate
779 def d():
779 def d():
780 dirstate.hasdir(b'a')
780 dirstate.hasdir(b'a')
781 del dirstate._map._dirs
781 del dirstate._map._dirs
782 timer(d)
782 timer(d)
783 fm.end()
783 fm.end()
784
784
785 @command(b'perfdirstate', formatteropts)
785 @command(b'perfdirstate', formatteropts)
786 def perfdirstate(ui, repo, **opts):
786 def perfdirstate(ui, repo, **opts):
787 opts = _byteskwargs(opts)
787 opts = _byteskwargs(opts)
788 timer, fm = gettimer(ui, opts)
788 timer, fm = gettimer(ui, opts)
789 b"a" in repo.dirstate
789 b"a" in repo.dirstate
790 def d():
790 def d():
791 repo.dirstate.invalidate()
791 repo.dirstate.invalidate()
792 b"a" in repo.dirstate
792 b"a" in repo.dirstate
793 timer(d)
793 timer(d)
794 fm.end()
794 fm.end()
795
795
796 @command(b'perfdirstatedirs', formatteropts)
796 @command(b'perfdirstatedirs', formatteropts)
797 def perfdirstatedirs(ui, repo, **opts):
797 def perfdirstatedirs(ui, repo, **opts):
798 opts = _byteskwargs(opts)
798 opts = _byteskwargs(opts)
799 timer, fm = gettimer(ui, opts)
799 timer, fm = gettimer(ui, opts)
800 b"a" in repo.dirstate
800 b"a" in repo.dirstate
801 def d():
801 def d():
802 repo.dirstate.hasdir(b"a")
802 repo.dirstate.hasdir(b"a")
803 del repo.dirstate._map._dirs
803 del repo.dirstate._map._dirs
804 timer(d)
804 timer(d)
805 fm.end()
805 fm.end()
806
806
807 @command(b'perfdirstatefoldmap', formatteropts)
807 @command(b'perfdirstatefoldmap', formatteropts)
808 def perfdirstatefoldmap(ui, repo, **opts):
808 def perfdirstatefoldmap(ui, repo, **opts):
809 opts = _byteskwargs(opts)
809 opts = _byteskwargs(opts)
810 timer, fm = gettimer(ui, opts)
810 timer, fm = gettimer(ui, opts)
811 dirstate = repo.dirstate
811 dirstate = repo.dirstate
812 b'a' in dirstate
812 b'a' in dirstate
813 def d():
813 def d():
814 dirstate._map.filefoldmap.get(b'a')
814 dirstate._map.filefoldmap.get(b'a')
815 del dirstate._map.filefoldmap
815 del dirstate._map.filefoldmap
816 timer(d)
816 timer(d)
817 fm.end()
817 fm.end()
818
818
819 @command(b'perfdirfoldmap', formatteropts)
819 @command(b'perfdirfoldmap', formatteropts)
820 def perfdirfoldmap(ui, repo, **opts):
820 def perfdirfoldmap(ui, repo, **opts):
821 opts = _byteskwargs(opts)
821 opts = _byteskwargs(opts)
822 timer, fm = gettimer(ui, opts)
822 timer, fm = gettimer(ui, opts)
823 dirstate = repo.dirstate
823 dirstate = repo.dirstate
824 b'a' in dirstate
824 b'a' in dirstate
825 def d():
825 def d():
826 dirstate._map.dirfoldmap.get(b'a')
826 dirstate._map.dirfoldmap.get(b'a')
827 del dirstate._map.dirfoldmap
827 del dirstate._map.dirfoldmap
828 del dirstate._map._dirs
828 del dirstate._map._dirs
829 timer(d)
829 timer(d)
830 fm.end()
830 fm.end()
831
831
832 @command(b'perfdirstatewrite', formatteropts)
832 @command(b'perfdirstatewrite', formatteropts)
833 def perfdirstatewrite(ui, repo, **opts):
833 def perfdirstatewrite(ui, repo, **opts):
834 opts = _byteskwargs(opts)
834 opts = _byteskwargs(opts)
835 timer, fm = gettimer(ui, opts)
835 timer, fm = gettimer(ui, opts)
836 ds = repo.dirstate
836 ds = repo.dirstate
837 b"a" in ds
837 b"a" in ds
838 def d():
838 def d():
839 ds._dirty = True
839 ds._dirty = True
840 ds.write(repo.currenttransaction())
840 ds.write(repo.currenttransaction())
841 timer(d)
841 timer(d)
842 fm.end()
842 fm.end()
843
843
844 @command(b'perfmergecalculate',
844 @command(b'perfmergecalculate',
845 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
845 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
846 def perfmergecalculate(ui, repo, rev, **opts):
846 def perfmergecalculate(ui, repo, rev, **opts):
847 opts = _byteskwargs(opts)
847 opts = _byteskwargs(opts)
848 timer, fm = gettimer(ui, opts)
848 timer, fm = gettimer(ui, opts)
849 wctx = repo[None]
849 wctx = repo[None]
850 rctx = scmutil.revsingle(repo, rev, rev)
850 rctx = scmutil.revsingle(repo, rev, rev)
851 ancestor = wctx.ancestor(rctx)
851 ancestor = wctx.ancestor(rctx)
852 # we don't want working dir files to be stat'd in the benchmark, so prime
852 # we don't want working dir files to be stat'd in the benchmark, so prime
853 # that cache
853 # that cache
854 wctx.dirty()
854 wctx.dirty()
855 def d():
855 def d():
856 # acceptremote is True because we don't want prompts in the middle of
856 # acceptremote is True because we don't want prompts in the middle of
857 # our benchmark
857 # our benchmark
858 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
858 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
859 acceptremote=True, followcopies=True)
859 acceptremote=True, followcopies=True)
860 timer(d)
860 timer(d)
861 fm.end()
861 fm.end()
862
862
863 @command(b'perfpathcopies', [], b"REV REV")
863 @command(b'perfpathcopies', [], b"REV REV")
864 def perfpathcopies(ui, repo, rev1, rev2, **opts):
864 def perfpathcopies(ui, repo, rev1, rev2, **opts):
865 """benchmark the copy tracing logic"""
865 """benchmark the copy tracing logic"""
866 opts = _byteskwargs(opts)
866 opts = _byteskwargs(opts)
867 timer, fm = gettimer(ui, opts)
867 timer, fm = gettimer(ui, opts)
868 ctx1 = scmutil.revsingle(repo, rev1, rev1)
868 ctx1 = scmutil.revsingle(repo, rev1, rev1)
869 ctx2 = scmutil.revsingle(repo, rev2, rev2)
869 ctx2 = scmutil.revsingle(repo, rev2, rev2)
870 def d():
870 def d():
871 copies.pathcopies(ctx1, ctx2)
871 copies.pathcopies(ctx1, ctx2)
872 timer(d)
872 timer(d)
873 fm.end()
873 fm.end()
874
874
875 @command(b'perfphases',
875 @command(b'perfphases',
876 [(b'', b'full', False, b'include file reading time too'),
876 [(b'', b'full', False, b'include file reading time too'),
877 ], b"")
877 ], b"")
878 def perfphases(ui, repo, **opts):
878 def perfphases(ui, repo, **opts):
879 """benchmark phasesets computation"""
879 """benchmark phasesets computation"""
880 opts = _byteskwargs(opts)
880 opts = _byteskwargs(opts)
881 timer, fm = gettimer(ui, opts)
881 timer, fm = gettimer(ui, opts)
882 _phases = repo._phasecache
882 _phases = repo._phasecache
883 full = opts.get(b'full')
883 full = opts.get(b'full')
884 def d():
884 def d():
885 phases = _phases
885 phases = _phases
886 if full:
886 if full:
887 clearfilecache(repo, b'_phasecache')
887 clearfilecache(repo, b'_phasecache')
888 phases = repo._phasecache
888 phases = repo._phasecache
889 phases.invalidate()
889 phases.invalidate()
890 phases.loadphaserevs(repo)
890 phases.loadphaserevs(repo)
891 timer(d)
891 timer(d)
892 fm.end()
892 fm.end()
893
893
894 @command(b'perfphasesremote',
894 @command(b'perfphasesremote',
895 [], b"[DEST]")
895 [], b"[DEST]")
896 def perfphasesremote(ui, repo, dest=None, **opts):
896 def perfphasesremote(ui, repo, dest=None, **opts):
897 """benchmark time needed to analyse phases of the remote server"""
897 """benchmark time needed to analyse phases of the remote server"""
898 from mercurial.node import (
898 from mercurial.node import (
899 bin,
899 bin,
900 )
900 )
901 from mercurial import (
901 from mercurial import (
902 exchange,
902 exchange,
903 hg,
903 hg,
904 phases,
904 phases,
905 )
905 )
906 opts = _byteskwargs(opts)
906 opts = _byteskwargs(opts)
907 timer, fm = gettimer(ui, opts)
907 timer, fm = gettimer(ui, opts)
908
908
909 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
909 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
910 if not path:
910 if not path:
911 raise error.Abort((b'default repository not configured!'),
911 raise error.Abort((b'default repository not configured!'),
912 hint=(b"see 'hg help config.paths'"))
912 hint=(b"see 'hg help config.paths'"))
913 dest = path.pushloc or path.loc
913 dest = path.pushloc or path.loc
914 branches = (path.branch, opts.get(b'branch') or [])
914 branches = (path.branch, opts.get(b'branch') or [])
915 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
915 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
916 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
916 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
917 other = hg.peer(repo, opts, dest)
917 other = hg.peer(repo, opts, dest)
918
918
919 # easier to perform discovery through the operation
919 # easier to perform discovery through the operation
920 op = exchange.pushoperation(repo, other)
920 op = exchange.pushoperation(repo, other)
921 exchange._pushdiscoverychangeset(op)
921 exchange._pushdiscoverychangeset(op)
922
922
923 remotesubset = op.fallbackheads
923 remotesubset = op.fallbackheads
924
924
925 with other.commandexecutor() as e:
925 with other.commandexecutor() as e:
926 remotephases = e.callcommand(b'listkeys',
926 remotephases = e.callcommand(b'listkeys',
927 {b'namespace': b'phases'}).result()
927 {b'namespace': b'phases'}).result()
928 del other
928 del other
929 publishing = remotephases.get(b'publishing', False)
929 publishing = remotephases.get(b'publishing', False)
930 if publishing:
930 if publishing:
931 ui.status((b'publishing: yes\n'))
931 ui.status((b'publishing: yes\n'))
932 else:
932 else:
933 ui.status((b'publishing: no\n'))
933 ui.status((b'publishing: no\n'))
934
934
935 nodemap = repo.changelog.nodemap
935 nodemap = repo.changelog.nodemap
936 nonpublishroots = 0
936 nonpublishroots = 0
937 for nhex, phase in remotephases.iteritems():
937 for nhex, phase in remotephases.iteritems():
938 if nhex == b'publishing': # ignore data related to publish option
938 if nhex == b'publishing': # ignore data related to publish option
939 continue
939 continue
940 node = bin(nhex)
940 node = bin(nhex)
941 if node in nodemap and int(phase):
941 if node in nodemap and int(phase):
942 nonpublishroots += 1
942 nonpublishroots += 1
943 ui.status((b'number of roots: %d\n') % len(remotephases))
943 ui.status((b'number of roots: %d\n') % len(remotephases))
944 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
944 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
945 def d():
945 def d():
946 phases.remotephasessummary(repo,
946 phases.remotephasessummary(repo,
947 remotesubset,
947 remotesubset,
948 remotephases)
948 remotephases)
949 timer(d)
949 timer(d)
950 fm.end()
950 fm.end()
951
951
952 @command(b'perfmanifest',[
952 @command(b'perfmanifest',[
953 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
953 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
954 (b'', b'clear-disk', False, b'clear on-disk caches too'),
954 (b'', b'clear-disk', False, b'clear on-disk caches too'),
955 ] + formatteropts, b'REV|NODE')
955 ] + formatteropts, b'REV|NODE')
956 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
956 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
957 """benchmark the time to read a manifest from disk and return a usable
957 """benchmark the time to read a manifest from disk and return a usable
958 dict-like object
958 dict-like object
959
959
960 Manifest caches are cleared before retrieval."""
960 Manifest caches are cleared before retrieval."""
961 opts = _byteskwargs(opts)
961 opts = _byteskwargs(opts)
962 timer, fm = gettimer(ui, opts)
962 timer, fm = gettimer(ui, opts)
963 if not manifest_rev:
963 if not manifest_rev:
964 ctx = scmutil.revsingle(repo, rev, rev)
964 ctx = scmutil.revsingle(repo, rev, rev)
965 t = ctx.manifestnode()
965 t = ctx.manifestnode()
966 else:
966 else:
967 from mercurial.node import bin
967 from mercurial.node import bin
968
968
969 if len(rev) == 40:
969 if len(rev) == 40:
970 t = bin(rev)
970 t = bin(rev)
971 else:
971 else:
972 try:
972 try:
973 rev = int(rev)
973 rev = int(rev)
974
974
975 if util.safehasattr(repo.manifestlog, b'getstorage'):
975 if util.safehasattr(repo.manifestlog, b'getstorage'):
976 t = repo.manifestlog.getstorage(b'').node(rev)
976 t = repo.manifestlog.getstorage(b'').node(rev)
977 else:
977 else:
978 t = repo.manifestlog._revlog.lookup(rev)
978 t = repo.manifestlog._revlog.lookup(rev)
979 except ValueError:
979 except ValueError:
980 raise error.Abort(b'manifest revision must be integer or full '
980 raise error.Abort(b'manifest revision must be integer or full '
981 b'node')
981 b'node')
982 def d():
982 def d():
983 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
983 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
984 repo.manifestlog[t].read()
984 repo.manifestlog[t].read()
985 timer(d)
985 timer(d)
986 fm.end()
986 fm.end()
987
987
988 @command(b'perfchangeset', formatteropts)
988 @command(b'perfchangeset', formatteropts)
989 def perfchangeset(ui, repo, rev, **opts):
989 def perfchangeset(ui, repo, rev, **opts):
990 opts = _byteskwargs(opts)
990 opts = _byteskwargs(opts)
991 timer, fm = gettimer(ui, opts)
991 timer, fm = gettimer(ui, opts)
992 n = scmutil.revsingle(repo, rev).node()
992 n = scmutil.revsingle(repo, rev).node()
993 def d():
993 def d():
994 repo.changelog.read(n)
994 repo.changelog.read(n)
995 #repo.changelog._cache = None
995 #repo.changelog._cache = None
996 timer(d)
996 timer(d)
997 fm.end()
997 fm.end()
998
998
999 @command(b'perfignore', formatteropts)
999 @command(b'perfignore', formatteropts)
1000 def perfignore(ui, repo, **opts):
1000 def perfignore(ui, repo, **opts):
1001 """benchmark operation related to computing ignore"""
1001 """benchmark operation related to computing ignore"""
1002 opts = _byteskwargs(opts)
1002 opts = _byteskwargs(opts)
1003 timer, fm = gettimer(ui, opts)
1003 timer, fm = gettimer(ui, opts)
1004 dirstate = repo.dirstate
1004 dirstate = repo.dirstate
1005
1005
1006 def setupone():
1006 def setupone():
1007 dirstate.invalidate()
1007 dirstate.invalidate()
1008 clearfilecache(dirstate, b'_ignore')
1008 clearfilecache(dirstate, b'_ignore')
1009
1009
1010 def runone():
1010 def runone():
1011 dirstate._ignore
1011 dirstate._ignore
1012
1012
1013 timer(runone, setup=setupone, title=b"load")
1013 timer(runone, setup=setupone, title=b"load")
1014 fm.end()
1014 fm.end()
1015
1015
1016 @command(b'perfindex', [
1016 @command(b'perfindex', [
1017 (b'', b'rev', b'', b'revision to be looked up (default tip)'),
1017 (b'', b'rev', b'', b'revision to be looked up (default tip)'),
1018 ] + formatteropts)
1018 ] + formatteropts)
1019 def perfindex(ui, repo, **opts):
1019 def perfindex(ui, repo, **opts):
1020 import mercurial.revlog
1020 import mercurial.revlog
1021 opts = _byteskwargs(opts)
1021 opts = _byteskwargs(opts)
1022 timer, fm = gettimer(ui, opts)
1022 timer, fm = gettimer(ui, opts)
1023 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1023 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1024 if opts[b'rev'] is None:
1024 if opts[b'rev'] is None:
1025 n = repo[b"tip"].node()
1025 n = repo[b"tip"].node()
1026 else:
1026 else:
1027 rev = scmutil.revsingle(repo, opts[b'rev'])
1027 rev = scmutil.revsingle(repo, opts[b'rev'])
1028 n = repo[rev].node()
1028 n = repo[rev].node()
1029
1029
1030 unfi = repo.unfiltered()
1030 unfi = repo.unfiltered()
1031 # find the filecache func directly
1031 # find the filecache func directly
1032 # This avoid polluting the benchmark with the filecache logic
1032 # This avoid polluting the benchmark with the filecache logic
1033 makecl = unfi.__class__.changelog.func
1033 makecl = unfi.__class__.changelog.func
1034 def setup():
1034 def setup():
1035 # probably not necessary, but for good measure
1035 # probably not necessary, but for good measure
1036 clearchangelog(unfi)
1036 clearchangelog(unfi)
1037 def d():
1037 def d():
1038 cl = makecl(unfi)
1038 cl = makecl(unfi)
1039 cl.rev(n)
1039 cl.rev(n)
1040 timer(d, setup=setup)
1040 timer(d, setup=setup)
1041 fm.end()
1041 fm.end()
1042
1042
1043 @command(b'perfstartup', formatteropts)
1043 @command(b'perfstartup', formatteropts)
1044 def perfstartup(ui, repo, **opts):
1044 def perfstartup(ui, repo, **opts):
1045 opts = _byteskwargs(opts)
1045 opts = _byteskwargs(opts)
1046 timer, fm = gettimer(ui, opts)
1046 timer, fm = gettimer(ui, opts)
1047 def d():
1047 def d():
1048 if os.name != r'nt':
1048 if os.name != r'nt':
1049 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1049 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1050 fsencode(sys.argv[0]))
1050 fsencode(sys.argv[0]))
1051 else:
1051 else:
1052 os.environ[r'HGRCPATH'] = r' '
1052 os.environ[r'HGRCPATH'] = r' '
1053 os.system(r"%s version -q > NUL" % sys.argv[0])
1053 os.system(r"%s version -q > NUL" % sys.argv[0])
1054 timer(d)
1054 timer(d)
1055 fm.end()
1055 fm.end()
1056
1056
1057 @command(b'perfparents', formatteropts)
1057 @command(b'perfparents', formatteropts)
1058 def perfparents(ui, repo, **opts):
1058 def perfparents(ui, repo, **opts):
1059 opts = _byteskwargs(opts)
1059 opts = _byteskwargs(opts)
1060 timer, fm = gettimer(ui, opts)
1060 timer, fm = gettimer(ui, opts)
1061 # control the number of commits perfparents iterates over
1061 # control the number of commits perfparents iterates over
1062 # experimental config: perf.parentscount
1062 # experimental config: perf.parentscount
1063 count = getint(ui, b"perf", b"parentscount", 1000)
1063 count = getint(ui, b"perf", b"parentscount", 1000)
1064 if len(repo.changelog) < count:
1064 if len(repo.changelog) < count:
1065 raise error.Abort(b"repo needs %d commits for this test" % count)
1065 raise error.Abort(b"repo needs %d commits for this test" % count)
1066 repo = repo.unfiltered()
1066 repo = repo.unfiltered()
1067 nl = [repo.changelog.node(i) for i in _xrange(count)]
1067 nl = [repo.changelog.node(i) for i in _xrange(count)]
1068 def d():
1068 def d():
1069 for n in nl:
1069 for n in nl:
1070 repo.changelog.parents(n)
1070 repo.changelog.parents(n)
1071 timer(d)
1071 timer(d)
1072 fm.end()
1072 fm.end()
1073
1073
1074 @command(b'perfctxfiles', formatteropts)
1074 @command(b'perfctxfiles', formatteropts)
1075 def perfctxfiles(ui, repo, x, **opts):
1075 def perfctxfiles(ui, repo, x, **opts):
1076 opts = _byteskwargs(opts)
1076 opts = _byteskwargs(opts)
1077 x = int(x)
1077 x = int(x)
1078 timer, fm = gettimer(ui, opts)
1078 timer, fm = gettimer(ui, opts)
1079 def d():
1079 def d():
1080 len(repo[x].files())
1080 len(repo[x].files())
1081 timer(d)
1081 timer(d)
1082 fm.end()
1082 fm.end()
1083
1083
1084 @command(b'perfrawfiles', formatteropts)
1084 @command(b'perfrawfiles', formatteropts)
1085 def perfrawfiles(ui, repo, x, **opts):
1085 def perfrawfiles(ui, repo, x, **opts):
1086 opts = _byteskwargs(opts)
1086 opts = _byteskwargs(opts)
1087 x = int(x)
1087 x = int(x)
1088 timer, fm = gettimer(ui, opts)
1088 timer, fm = gettimer(ui, opts)
1089 cl = repo.changelog
1089 cl = repo.changelog
1090 def d():
1090 def d():
1091 len(cl.read(x)[3])
1091 len(cl.read(x)[3])
1092 timer(d)
1092 timer(d)
1093 fm.end()
1093 fm.end()
1094
1094
1095 @command(b'perflookup', formatteropts)
1095 @command(b'perflookup', formatteropts)
1096 def perflookup(ui, repo, rev, **opts):
1096 def perflookup(ui, repo, rev, **opts):
1097 opts = _byteskwargs(opts)
1097 opts = _byteskwargs(opts)
1098 timer, fm = gettimer(ui, opts)
1098 timer, fm = gettimer(ui, opts)
1099 timer(lambda: len(repo.lookup(rev)))
1099 timer(lambda: len(repo.lookup(rev)))
1100 fm.end()
1100 fm.end()
1101
1101
1102 @command(b'perflinelogedits',
1102 @command(b'perflinelogedits',
1103 [(b'n', b'edits', 10000, b'number of edits'),
1103 [(b'n', b'edits', 10000, b'number of edits'),
1104 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1104 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1105 ], norepo=True)
1105 ], norepo=True)
1106 def perflinelogedits(ui, **opts):
1106 def perflinelogedits(ui, **opts):
1107 from mercurial import linelog
1107 from mercurial import linelog
1108
1108
1109 opts = _byteskwargs(opts)
1109 opts = _byteskwargs(opts)
1110
1110
1111 edits = opts[b'edits']
1111 edits = opts[b'edits']
1112 maxhunklines = opts[b'max_hunk_lines']
1112 maxhunklines = opts[b'max_hunk_lines']
1113
1113
1114 maxb1 = 100000
1114 maxb1 = 100000
1115 random.seed(0)
1115 random.seed(0)
1116 randint = random.randint
1116 randint = random.randint
1117 currentlines = 0
1117 currentlines = 0
1118 arglist = []
1118 arglist = []
1119 for rev in _xrange(edits):
1119 for rev in _xrange(edits):
1120 a1 = randint(0, currentlines)
1120 a1 = randint(0, currentlines)
1121 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1121 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1122 b1 = randint(0, maxb1)
1122 b1 = randint(0, maxb1)
1123 b2 = randint(b1, b1 + maxhunklines)
1123 b2 = randint(b1, b1 + maxhunklines)
1124 currentlines += (b2 - b1) - (a2 - a1)
1124 currentlines += (b2 - b1) - (a2 - a1)
1125 arglist.append((rev, a1, a2, b1, b2))
1125 arglist.append((rev, a1, a2, b1, b2))
1126
1126
1127 def d():
1127 def d():
1128 ll = linelog.linelog()
1128 ll = linelog.linelog()
1129 for args in arglist:
1129 for args in arglist:
1130 ll.replacelines(*args)
1130 ll.replacelines(*args)
1131
1131
1132 timer, fm = gettimer(ui, opts)
1132 timer, fm = gettimer(ui, opts)
1133 timer(d)
1133 timer(d)
1134 fm.end()
1134 fm.end()
1135
1135
1136 @command(b'perfrevrange', formatteropts)
1136 @command(b'perfrevrange', formatteropts)
1137 def perfrevrange(ui, repo, *specs, **opts):
1137 def perfrevrange(ui, repo, *specs, **opts):
1138 opts = _byteskwargs(opts)
1138 opts = _byteskwargs(opts)
1139 timer, fm = gettimer(ui, opts)
1139 timer, fm = gettimer(ui, opts)
1140 revrange = scmutil.revrange
1140 revrange = scmutil.revrange
1141 timer(lambda: len(revrange(repo, specs)))
1141 timer(lambda: len(revrange(repo, specs)))
1142 fm.end()
1142 fm.end()
1143
1143
1144 @command(b'perfnodelookup', formatteropts)
1144 @command(b'perfnodelookup', formatteropts)
1145 def perfnodelookup(ui, repo, rev, **opts):
1145 def perfnodelookup(ui, repo, rev, **opts):
1146 opts = _byteskwargs(opts)
1146 opts = _byteskwargs(opts)
1147 timer, fm = gettimer(ui, opts)
1147 timer, fm = gettimer(ui, opts)
1148 import mercurial.revlog
1148 import mercurial.revlog
1149 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1149 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1150 n = scmutil.revsingle(repo, rev).node()
1150 n = scmutil.revsingle(repo, rev).node()
1151 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1151 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1152 def d():
1152 def d():
1153 cl.rev(n)
1153 cl.rev(n)
1154 clearcaches(cl)
1154 clearcaches(cl)
1155 timer(d)
1155 timer(d)
1156 fm.end()
1156 fm.end()
1157
1157
1158 @command(b'perflog',
1158 @command(b'perflog',
1159 [(b'', b'rename', False, b'ask log to follow renames')
1159 [(b'', b'rename', False, b'ask log to follow renames')
1160 ] + formatteropts)
1160 ] + formatteropts)
1161 def perflog(ui, repo, rev=None, **opts):
1161 def perflog(ui, repo, rev=None, **opts):
1162 opts = _byteskwargs(opts)
1162 opts = _byteskwargs(opts)
1163 if rev is None:
1163 if rev is None:
1164 rev=[]
1164 rev=[]
1165 timer, fm = gettimer(ui, opts)
1165 timer, fm = gettimer(ui, opts)
1166 ui.pushbuffer()
1166 ui.pushbuffer()
1167 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1167 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1168 copies=opts.get(b'rename')))
1168 copies=opts.get(b'rename')))
1169 ui.popbuffer()
1169 ui.popbuffer()
1170 fm.end()
1170 fm.end()
1171
1171
1172 @command(b'perfmoonwalk', formatteropts)
1172 @command(b'perfmoonwalk', formatteropts)
1173 def perfmoonwalk(ui, repo, **opts):
1173 def perfmoonwalk(ui, repo, **opts):
1174 """benchmark walking the changelog backwards
1174 """benchmark walking the changelog backwards
1175
1175
1176 This also loads the changelog data for each revision in the changelog.
1176 This also loads the changelog data for each revision in the changelog.
1177 """
1177 """
1178 opts = _byteskwargs(opts)
1178 opts = _byteskwargs(opts)
1179 timer, fm = gettimer(ui, opts)
1179 timer, fm = gettimer(ui, opts)
1180 def moonwalk():
1180 def moonwalk():
1181 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1181 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1182 ctx = repo[i]
1182 ctx = repo[i]
1183 ctx.branch() # read changelog data (in addition to the index)
1183 ctx.branch() # read changelog data (in addition to the index)
1184 timer(moonwalk)
1184 timer(moonwalk)
1185 fm.end()
1185 fm.end()
1186
1186
1187 @command(b'perftemplating',
1187 @command(b'perftemplating',
1188 [(b'r', b'rev', [], b'revisions to run the template on'),
1188 [(b'r', b'rev', [], b'revisions to run the template on'),
1189 ] + formatteropts)
1189 ] + formatteropts)
1190 def perftemplating(ui, repo, testedtemplate=None, **opts):
1190 def perftemplating(ui, repo, testedtemplate=None, **opts):
1191 """test the rendering time of a given template"""
1191 """test the rendering time of a given template"""
1192 if makelogtemplater is None:
1192 if makelogtemplater is None:
1193 raise error.Abort((b"perftemplating not available with this Mercurial"),
1193 raise error.Abort((b"perftemplating not available with this Mercurial"),
1194 hint=b"use 4.3 or later")
1194 hint=b"use 4.3 or later")
1195
1195
1196 opts = _byteskwargs(opts)
1196 opts = _byteskwargs(opts)
1197
1197
1198 nullui = ui.copy()
1198 nullui = ui.copy()
1199 nullui.fout = open(os.devnull, r'wb')
1199 nullui.fout = open(os.devnull, r'wb')
1200 nullui.disablepager()
1200 nullui.disablepager()
1201 revs = opts.get(b'rev')
1201 revs = opts.get(b'rev')
1202 if not revs:
1202 if not revs:
1203 revs = [b'all()']
1203 revs = [b'all()']
1204 revs = list(scmutil.revrange(repo, revs))
1204 revs = list(scmutil.revrange(repo, revs))
1205
1205
1206 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1206 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1207 b' {author|person}: {desc|firstline}\n')
1207 b' {author|person}: {desc|firstline}\n')
1208 if testedtemplate is None:
1208 if testedtemplate is None:
1209 testedtemplate = defaulttemplate
1209 testedtemplate = defaulttemplate
1210 displayer = makelogtemplater(nullui, repo, testedtemplate)
1210 displayer = makelogtemplater(nullui, repo, testedtemplate)
1211 def format():
1211 def format():
1212 for r in revs:
1212 for r in revs:
1213 ctx = repo[r]
1213 ctx = repo[r]
1214 displayer.show(ctx)
1214 displayer.show(ctx)
1215 displayer.flush(ctx)
1215 displayer.flush(ctx)
1216
1216
1217 timer, fm = gettimer(ui, opts)
1217 timer, fm = gettimer(ui, opts)
1218 timer(format)
1218 timer(format)
1219 fm.end()
1219 fm.end()
1220
1220
1221 @command(b'perfhelper-pathcopies', formatteropts +
1221 @command(b'perfhelper-pathcopies', formatteropts +
1222 [
1222 [
1223 (b'r', b'revs', [], b'restrict search to these revisions'),
1223 (b'r', b'revs', [], b'restrict search to these revisions'),
1224 (b'', b'timing', False, b'provides extra data (costly)'),
1224 (b'', b'timing', False, b'provides extra data (costly)'),
1225 ])
1225 ])
1226 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1226 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1227 """find statistic about potential parameters for the `perftracecopies`
1227 """find statistic about potential parameters for the `perftracecopies`
1228
1228
1229 This command find source-destination pair relevant for copytracing testing.
1229 This command find source-destination pair relevant for copytracing testing.
1230 It report value for some of the parameters that impact copy tracing time.
1230 It report value for some of the parameters that impact copy tracing time.
1231
1231
1232 If `--timing` is set, rename detection is run and the associated timing
1232 If `--timing` is set, rename detection is run and the associated timing
1233 will be reported. The extra details comes at the cost of a slower command
1233 will be reported. The extra details comes at the cost of a slower command
1234 execution.
1234 execution.
1235
1235
1236 Since the rename detection is only run once, other factors might easily
1236 Since the rename detection is only run once, other factors might easily
1237 affect the precision of the timing. However it should give a good
1237 affect the precision of the timing. However it should give a good
1238 approximation of which revision pairs are very costly.
1238 approximation of which revision pairs are very costly.
1239 """
1239 """
1240 opts = _byteskwargs(opts)
1240 opts = _byteskwargs(opts)
1241 fm = ui.formatter(b'perf', opts)
1241 fm = ui.formatter(b'perf', opts)
1242 dotiming = opts[b'timing']
1242 dotiming = opts[b'timing']
1243
1243
1244 if dotiming:
1244 if dotiming:
1245 header = '%12s %12s %12s %12s %12s %12s\n'
1245 header = '%12s %12s %12s %12s %12s %12s\n'
1246 output = ("%(source)12s %(destination)12s "
1246 output = ("%(source)12s %(destination)12s "
1247 "%(nbrevs)12d %(nbmissingfiles)12d "
1247 "%(nbrevs)12d %(nbmissingfiles)12d "
1248 "%(nbrenamedfiles)12d %(time)18.5f\n")
1248 "%(nbrenamedfiles)12d %(time)18.5f\n")
1249 header_names = ("source", "destination", "nb-revs", "nb-files",
1249 header_names = ("source", "destination", "nb-revs", "nb-files",
1250 "nb-renames", "time")
1250 "nb-renames", "time")
1251 fm.plain(header % header_names)
1251 fm.plain(header % header_names)
1252 else:
1252 else:
1253 header = '%12s %12s %12s %12s\n'
1253 header = '%12s %12s %12s %12s\n'
1254 output = ("%(source)12s %(destination)12s "
1254 output = ("%(source)12s %(destination)12s "
1255 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1255 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1256 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1256 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1257
1257
1258 if not revs:
1258 if not revs:
1259 revs = ['all()']
1259 revs = ['all()']
1260 revs = scmutil.revrange(repo, revs)
1260 revs = scmutil.revrange(repo, revs)
1261
1261
1262 roi = repo.revs('merge() and %ld', revs)
1262 roi = repo.revs('merge() and %ld', revs)
1263 for r in roi:
1263 for r in roi:
1264 ctx = repo[r]
1264 ctx = repo[r]
1265 p1 = ctx.p1().rev()
1265 p1 = ctx.p1().rev()
1266 p2 = ctx.p2().rev()
1266 p2 = ctx.p2().rev()
1267 bases = repo.changelog._commonancestorsheads(p1, p2)
1267 bases = repo.changelog._commonancestorsheads(p1, p2)
1268 for p in (p1, p2):
1268 for p in (p1, p2):
1269 for b in bases:
1269 for b in bases:
1270 base = repo[b]
1270 base = repo[b]
1271 parent = repo[p]
1271 parent = repo[p]
1272 missing = copies._computeforwardmissing(base, parent)
1272 missing = copies._computeforwardmissing(base, parent)
1273 if not missing:
1273 if not missing:
1274 continue
1274 continue
1275 data = {
1275 data = {
1276 b'source': base.hex(),
1276 b'source': base.hex(),
1277 b'destination': parent.hex(),
1277 b'destination': parent.hex(),
1278 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1278 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1279 b'nbmissingfiles': len(missing),
1279 b'nbmissingfiles': len(missing),
1280 }
1280 }
1281 if dotiming:
1281 if dotiming:
1282 begin = util.timer()
1282 begin = util.timer()
1283 renames = copies.pathcopies(base, parent)
1283 renames = copies.pathcopies(base, parent)
1284 end = util.timer()
1284 end = util.timer()
1285 # not very stable timing since we did only one run
1285 # not very stable timing since we did only one run
1286 data['time'] = end - begin
1286 data['time'] = end - begin
1287 data['nbrenamedfiles'] = len(renames)
1287 data['nbrenamedfiles'] = len(renames)
1288 fm.startitem()
1288 fm.startitem()
1289 fm.data(**data)
1289 fm.data(**data)
1290 out = data.copy()
1290 out = data.copy()
1291 out['source'] = fm.hexfunc(base.node())
1291 out['source'] = fm.hexfunc(base.node())
1292 out['destination'] = fm.hexfunc(parent.node())
1292 out['destination'] = fm.hexfunc(parent.node())
1293 fm.plain(output % out)
1293 fm.plain(output % out)
1294
1294
1295 fm.end()
1295 fm.end()
1296
1296
1297 @command(b'perfcca', formatteropts)
1297 @command(b'perfcca', formatteropts)
1298 def perfcca(ui, repo, **opts):
1298 def perfcca(ui, repo, **opts):
1299 opts = _byteskwargs(opts)
1299 opts = _byteskwargs(opts)
1300 timer, fm = gettimer(ui, opts)
1300 timer, fm = gettimer(ui, opts)
1301 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1301 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1302 fm.end()
1302 fm.end()
1303
1303
1304 @command(b'perffncacheload', formatteropts)
1304 @command(b'perffncacheload', formatteropts)
1305 def perffncacheload(ui, repo, **opts):
1305 def perffncacheload(ui, repo, **opts):
1306 opts = _byteskwargs(opts)
1306 opts = _byteskwargs(opts)
1307 timer, fm = gettimer(ui, opts)
1307 timer, fm = gettimer(ui, opts)
1308 s = repo.store
1308 s = repo.store
1309 def d():
1309 def d():
1310 s.fncache._load()
1310 s.fncache._load()
1311 timer(d)
1311 timer(d)
1312 fm.end()
1312 fm.end()
1313
1313
1314 @command(b'perffncachewrite', formatteropts)
1314 @command(b'perffncachewrite', formatteropts)
1315 def perffncachewrite(ui, repo, **opts):
1315 def perffncachewrite(ui, repo, **opts):
1316 opts = _byteskwargs(opts)
1316 opts = _byteskwargs(opts)
1317 timer, fm = gettimer(ui, opts)
1317 timer, fm = gettimer(ui, opts)
1318 s = repo.store
1318 s = repo.store
1319 lock = repo.lock()
1319 lock = repo.lock()
1320 s.fncache._load()
1320 s.fncache._load()
1321 tr = repo.transaction(b'perffncachewrite')
1321 tr = repo.transaction(b'perffncachewrite')
1322 tr.addbackup(b'fncache')
1322 tr.addbackup(b'fncache')
1323 def d():
1323 def d():
1324 s.fncache._dirty = True
1324 s.fncache._dirty = True
1325 s.fncache.write(tr)
1325 s.fncache.write(tr)
1326 timer(d)
1326 timer(d)
1327 tr.close()
1327 tr.close()
1328 lock.release()
1328 lock.release()
1329 fm.end()
1329 fm.end()
1330
1330
1331 @command(b'perffncacheencode', formatteropts)
1331 @command(b'perffncacheencode', formatteropts)
1332 def perffncacheencode(ui, repo, **opts):
1332 def perffncacheencode(ui, repo, **opts):
1333 opts = _byteskwargs(opts)
1333 opts = _byteskwargs(opts)
1334 timer, fm = gettimer(ui, opts)
1334 timer, fm = gettimer(ui, opts)
1335 s = repo.store
1335 s = repo.store
1336 s.fncache._load()
1336 s.fncache._load()
1337 def d():
1337 def d():
1338 for p in s.fncache.entries:
1338 for p in s.fncache.entries:
1339 s.encode(p)
1339 s.encode(p)
1340 timer(d)
1340 timer(d)
1341 fm.end()
1341 fm.end()
1342
1342
1343 def _bdiffworker(q, blocks, xdiff, ready, done):
1343 def _bdiffworker(q, blocks, xdiff, ready, done):
1344 while not done.is_set():
1344 while not done.is_set():
1345 pair = q.get()
1345 pair = q.get()
1346 while pair is not None:
1346 while pair is not None:
1347 if xdiff:
1347 if xdiff:
1348 mdiff.bdiff.xdiffblocks(*pair)
1348 mdiff.bdiff.xdiffblocks(*pair)
1349 elif blocks:
1349 elif blocks:
1350 mdiff.bdiff.blocks(*pair)
1350 mdiff.bdiff.blocks(*pair)
1351 else:
1351 else:
1352 mdiff.textdiff(*pair)
1352 mdiff.textdiff(*pair)
1353 q.task_done()
1353 q.task_done()
1354 pair = q.get()
1354 pair = q.get()
1355 q.task_done() # for the None one
1355 q.task_done() # for the None one
1356 with ready:
1356 with ready:
1357 ready.wait()
1357 ready.wait()
1358
1358
1359 def _manifestrevision(repo, mnode):
1359 def _manifestrevision(repo, mnode):
1360 ml = repo.manifestlog
1360 ml = repo.manifestlog
1361
1361
1362 if util.safehasattr(ml, b'getstorage'):
1362 if util.safehasattr(ml, b'getstorage'):
1363 store = ml.getstorage(b'')
1363 store = ml.getstorage(b'')
1364 else:
1364 else:
1365 store = ml._revlog
1365 store = ml._revlog
1366
1366
1367 return store.revision(mnode)
1367 return store.revision(mnode)
1368
1368
1369 @command(b'perfbdiff', revlogopts + formatteropts + [
1369 @command(b'perfbdiff', revlogopts + formatteropts + [
1370 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1370 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1371 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1371 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1372 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1372 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1373 (b'', b'blocks', False, b'test computing diffs into blocks'),
1373 (b'', b'blocks', False, b'test computing diffs into blocks'),
1374 (b'', b'xdiff', False, b'use xdiff algorithm'),
1374 (b'', b'xdiff', False, b'use xdiff algorithm'),
1375 ],
1375 ],
1376
1376
1377 b'-c|-m|FILE REV')
1377 b'-c|-m|FILE REV')
1378 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1378 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1379 """benchmark a bdiff between revisions
1379 """benchmark a bdiff between revisions
1380
1380
1381 By default, benchmark a bdiff between its delta parent and itself.
1381 By default, benchmark a bdiff between its delta parent and itself.
1382
1382
1383 With ``--count``, benchmark bdiffs between delta parents and self for N
1383 With ``--count``, benchmark bdiffs between delta parents and self for N
1384 revisions starting at the specified revision.
1384 revisions starting at the specified revision.
1385
1385
1386 With ``--alldata``, assume the requested revision is a changeset and
1386 With ``--alldata``, assume the requested revision is a changeset and
1387 measure bdiffs for all changes related to that changeset (manifest
1387 measure bdiffs for all changes related to that changeset (manifest
1388 and filelogs).
1388 and filelogs).
1389 """
1389 """
1390 opts = _byteskwargs(opts)
1390 opts = _byteskwargs(opts)
1391
1391
1392 if opts[b'xdiff'] and not opts[b'blocks']:
1392 if opts[b'xdiff'] and not opts[b'blocks']:
1393 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1393 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1394
1394
1395 if opts[b'alldata']:
1395 if opts[b'alldata']:
1396 opts[b'changelog'] = True
1396 opts[b'changelog'] = True
1397
1397
1398 if opts.get(b'changelog') or opts.get(b'manifest'):
1398 if opts.get(b'changelog') or opts.get(b'manifest'):
1399 file_, rev = None, file_
1399 file_, rev = None, file_
1400 elif rev is None:
1400 elif rev is None:
1401 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1401 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1402
1402
1403 blocks = opts[b'blocks']
1403 blocks = opts[b'blocks']
1404 xdiff = opts[b'xdiff']
1404 xdiff = opts[b'xdiff']
1405 textpairs = []
1405 textpairs = []
1406
1406
1407 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1407 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1408
1408
1409 startrev = r.rev(r.lookup(rev))
1409 startrev = r.rev(r.lookup(rev))
1410 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1410 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1411 if opts[b'alldata']:
1411 if opts[b'alldata']:
1412 # Load revisions associated with changeset.
1412 # Load revisions associated with changeset.
1413 ctx = repo[rev]
1413 ctx = repo[rev]
1414 mtext = _manifestrevision(repo, ctx.manifestnode())
1414 mtext = _manifestrevision(repo, ctx.manifestnode())
1415 for pctx in ctx.parents():
1415 for pctx in ctx.parents():
1416 pman = _manifestrevision(repo, pctx.manifestnode())
1416 pman = _manifestrevision(repo, pctx.manifestnode())
1417 textpairs.append((pman, mtext))
1417 textpairs.append((pman, mtext))
1418
1418
1419 # Load filelog revisions by iterating manifest delta.
1419 # Load filelog revisions by iterating manifest delta.
1420 man = ctx.manifest()
1420 man = ctx.manifest()
1421 pman = ctx.p1().manifest()
1421 pman = ctx.p1().manifest()
1422 for filename, change in pman.diff(man).items():
1422 for filename, change in pman.diff(man).items():
1423 fctx = repo.file(filename)
1423 fctx = repo.file(filename)
1424 f1 = fctx.revision(change[0][0] or -1)
1424 f1 = fctx.revision(change[0][0] or -1)
1425 f2 = fctx.revision(change[1][0] or -1)
1425 f2 = fctx.revision(change[1][0] or -1)
1426 textpairs.append((f1, f2))
1426 textpairs.append((f1, f2))
1427 else:
1427 else:
1428 dp = r.deltaparent(rev)
1428 dp = r.deltaparent(rev)
1429 textpairs.append((r.revision(dp), r.revision(rev)))
1429 textpairs.append((r.revision(dp), r.revision(rev)))
1430
1430
1431 withthreads = threads > 0
1431 withthreads = threads > 0
1432 if not withthreads:
1432 if not withthreads:
1433 def d():
1433 def d():
1434 for pair in textpairs:
1434 for pair in textpairs:
1435 if xdiff:
1435 if xdiff:
1436 mdiff.bdiff.xdiffblocks(*pair)
1436 mdiff.bdiff.xdiffblocks(*pair)
1437 elif blocks:
1437 elif blocks:
1438 mdiff.bdiff.blocks(*pair)
1438 mdiff.bdiff.blocks(*pair)
1439 else:
1439 else:
1440 mdiff.textdiff(*pair)
1440 mdiff.textdiff(*pair)
1441 else:
1441 else:
1442 q = queue()
1442 q = queue()
1443 for i in _xrange(threads):
1443 for i in _xrange(threads):
1444 q.put(None)
1444 q.put(None)
1445 ready = threading.Condition()
1445 ready = threading.Condition()
1446 done = threading.Event()
1446 done = threading.Event()
1447 for i in _xrange(threads):
1447 for i in _xrange(threads):
1448 threading.Thread(target=_bdiffworker,
1448 threading.Thread(target=_bdiffworker,
1449 args=(q, blocks, xdiff, ready, done)).start()
1449 args=(q, blocks, xdiff, ready, done)).start()
1450 q.join()
1450 q.join()
1451 def d():
1451 def d():
1452 for pair in textpairs:
1452 for pair in textpairs:
1453 q.put(pair)
1453 q.put(pair)
1454 for i in _xrange(threads):
1454 for i in _xrange(threads):
1455 q.put(None)
1455 q.put(None)
1456 with ready:
1456 with ready:
1457 ready.notify_all()
1457 ready.notify_all()
1458 q.join()
1458 q.join()
1459 timer, fm = gettimer(ui, opts)
1459 timer, fm = gettimer(ui, opts)
1460 timer(d)
1460 timer(d)
1461 fm.end()
1461 fm.end()
1462
1462
1463 if withthreads:
1463 if withthreads:
1464 done.set()
1464 done.set()
1465 for i in _xrange(threads):
1465 for i in _xrange(threads):
1466 q.put(None)
1466 q.put(None)
1467 with ready:
1467 with ready:
1468 ready.notify_all()
1468 ready.notify_all()
1469
1469
1470 @command(b'perfunidiff', revlogopts + formatteropts + [
1470 @command(b'perfunidiff', revlogopts + formatteropts + [
1471 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1471 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1472 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1472 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1473 ], b'-c|-m|FILE REV')
1473 ], b'-c|-m|FILE REV')
1474 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1474 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1475 """benchmark a unified diff between revisions
1475 """benchmark a unified diff between revisions
1476
1476
1477 This doesn't include any copy tracing - it's just a unified diff
1477 This doesn't include any copy tracing - it's just a unified diff
1478 of the texts.
1478 of the texts.
1479
1479
1480 By default, benchmark a diff between its delta parent and itself.
1480 By default, benchmark a diff between its delta parent and itself.
1481
1481
1482 With ``--count``, benchmark diffs between delta parents and self for N
1482 With ``--count``, benchmark diffs between delta parents and self for N
1483 revisions starting at the specified revision.
1483 revisions starting at the specified revision.
1484
1484
1485 With ``--alldata``, assume the requested revision is a changeset and
1485 With ``--alldata``, assume the requested revision is a changeset and
1486 measure diffs for all changes related to that changeset (manifest
1486 measure diffs for all changes related to that changeset (manifest
1487 and filelogs).
1487 and filelogs).
1488 """
1488 """
1489 opts = _byteskwargs(opts)
1489 opts = _byteskwargs(opts)
1490 if opts[b'alldata']:
1490 if opts[b'alldata']:
1491 opts[b'changelog'] = True
1491 opts[b'changelog'] = True
1492
1492
1493 if opts.get(b'changelog') or opts.get(b'manifest'):
1493 if opts.get(b'changelog') or opts.get(b'manifest'):
1494 file_, rev = None, file_
1494 file_, rev = None, file_
1495 elif rev is None:
1495 elif rev is None:
1496 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1496 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1497
1497
1498 textpairs = []
1498 textpairs = []
1499
1499
1500 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1500 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1501
1501
1502 startrev = r.rev(r.lookup(rev))
1502 startrev = r.rev(r.lookup(rev))
1503 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1503 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1504 if opts[b'alldata']:
1504 if opts[b'alldata']:
1505 # Load revisions associated with changeset.
1505 # Load revisions associated with changeset.
1506 ctx = repo[rev]
1506 ctx = repo[rev]
1507 mtext = _manifestrevision(repo, ctx.manifestnode())
1507 mtext = _manifestrevision(repo, ctx.manifestnode())
1508 for pctx in ctx.parents():
1508 for pctx in ctx.parents():
1509 pman = _manifestrevision(repo, pctx.manifestnode())
1509 pman = _manifestrevision(repo, pctx.manifestnode())
1510 textpairs.append((pman, mtext))
1510 textpairs.append((pman, mtext))
1511
1511
1512 # Load filelog revisions by iterating manifest delta.
1512 # Load filelog revisions by iterating manifest delta.
1513 man = ctx.manifest()
1513 man = ctx.manifest()
1514 pman = ctx.p1().manifest()
1514 pman = ctx.p1().manifest()
1515 for filename, change in pman.diff(man).items():
1515 for filename, change in pman.diff(man).items():
1516 fctx = repo.file(filename)
1516 fctx = repo.file(filename)
1517 f1 = fctx.revision(change[0][0] or -1)
1517 f1 = fctx.revision(change[0][0] or -1)
1518 f2 = fctx.revision(change[1][0] or -1)
1518 f2 = fctx.revision(change[1][0] or -1)
1519 textpairs.append((f1, f2))
1519 textpairs.append((f1, f2))
1520 else:
1520 else:
1521 dp = r.deltaparent(rev)
1521 dp = r.deltaparent(rev)
1522 textpairs.append((r.revision(dp), r.revision(rev)))
1522 textpairs.append((r.revision(dp), r.revision(rev)))
1523
1523
1524 def d():
1524 def d():
1525 for left, right in textpairs:
1525 for left, right in textpairs:
1526 # The date strings don't matter, so we pass empty strings.
1526 # The date strings don't matter, so we pass empty strings.
1527 headerlines, hunks = mdiff.unidiff(
1527 headerlines, hunks = mdiff.unidiff(
1528 left, b'', right, b'', b'left', b'right', binary=False)
1528 left, b'', right, b'', b'left', b'right', binary=False)
1529 # consume iterators in roughly the way patch.py does
1529 # consume iterators in roughly the way patch.py does
1530 b'\n'.join(headerlines)
1530 b'\n'.join(headerlines)
1531 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1531 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1532 timer, fm = gettimer(ui, opts)
1532 timer, fm = gettimer(ui, opts)
1533 timer(d)
1533 timer(d)
1534 fm.end()
1534 fm.end()
1535
1535
1536 @command(b'perfdiffwd', formatteropts)
1536 @command(b'perfdiffwd', formatteropts)
1537 def perfdiffwd(ui, repo, **opts):
1537 def perfdiffwd(ui, repo, **opts):
1538 """Profile diff of working directory changes"""
1538 """Profile diff of working directory changes"""
1539 opts = _byteskwargs(opts)
1539 opts = _byteskwargs(opts)
1540 timer, fm = gettimer(ui, opts)
1540 timer, fm = gettimer(ui, opts)
1541 options = {
1541 options = {
1542 'w': 'ignore_all_space',
1542 'w': 'ignore_all_space',
1543 'b': 'ignore_space_change',
1543 'b': 'ignore_space_change',
1544 'B': 'ignore_blank_lines',
1544 'B': 'ignore_blank_lines',
1545 }
1545 }
1546
1546
1547 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1547 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1548 opts = dict((options[c], b'1') for c in diffopt)
1548 opts = dict((options[c], b'1') for c in diffopt)
1549 def d():
1549 def d():
1550 ui.pushbuffer()
1550 ui.pushbuffer()
1551 commands.diff(ui, repo, **opts)
1551 commands.diff(ui, repo, **opts)
1552 ui.popbuffer()
1552 ui.popbuffer()
1553 diffopt = diffopt.encode('ascii')
1553 diffopt = diffopt.encode('ascii')
1554 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1554 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1555 timer(d, title=title)
1555 timer(d, title=title)
1556 fm.end()
1556 fm.end()
1557
1557
1558 @command(b'perfrevlogindex', revlogopts + formatteropts,
1558 @command(b'perfrevlogindex', revlogopts + formatteropts,
1559 b'-c|-m|FILE')
1559 b'-c|-m|FILE')
1560 def perfrevlogindex(ui, repo, file_=None, **opts):
1560 def perfrevlogindex(ui, repo, file_=None, **opts):
1561 """Benchmark operations against a revlog index.
1561 """Benchmark operations against a revlog index.
1562
1562
1563 This tests constructing a revlog instance, reading index data,
1563 This tests constructing a revlog instance, reading index data,
1564 parsing index data, and performing various operations related to
1564 parsing index data, and performing various operations related to
1565 index data.
1565 index data.
1566 """
1566 """
1567
1567
1568 opts = _byteskwargs(opts)
1568 opts = _byteskwargs(opts)
1569
1569
1570 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1570 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1571
1571
1572 opener = getattr(rl, 'opener') # trick linter
1572 opener = getattr(rl, 'opener') # trick linter
1573 indexfile = rl.indexfile
1573 indexfile = rl.indexfile
1574 data = opener.read(indexfile)
1574 data = opener.read(indexfile)
1575
1575
1576 header = struct.unpack(b'>I', data[0:4])[0]
1576 header = struct.unpack(b'>I', data[0:4])[0]
1577 version = header & 0xFFFF
1577 version = header & 0xFFFF
1578 if version == 1:
1578 if version == 1:
1579 revlogio = revlog.revlogio()
1579 revlogio = revlog.revlogio()
1580 inline = header & (1 << 16)
1580 inline = header & (1 << 16)
1581 else:
1581 else:
1582 raise error.Abort((b'unsupported revlog version: %d') % version)
1582 raise error.Abort((b'unsupported revlog version: %d') % version)
1583
1583
1584 rllen = len(rl)
1584 rllen = len(rl)
1585
1585
1586 node0 = rl.node(0)
1586 node0 = rl.node(0)
1587 node25 = rl.node(rllen // 4)
1587 node25 = rl.node(rllen // 4)
1588 node50 = rl.node(rllen // 2)
1588 node50 = rl.node(rllen // 2)
1589 node75 = rl.node(rllen // 4 * 3)
1589 node75 = rl.node(rllen // 4 * 3)
1590 node100 = rl.node(rllen - 1)
1590 node100 = rl.node(rllen - 1)
1591
1591
1592 allrevs = range(rllen)
1592 allrevs = range(rllen)
1593 allrevsrev = list(reversed(allrevs))
1593 allrevsrev = list(reversed(allrevs))
1594 allnodes = [rl.node(rev) for rev in range(rllen)]
1594 allnodes = [rl.node(rev) for rev in range(rllen)]
1595 allnodesrev = list(reversed(allnodes))
1595 allnodesrev = list(reversed(allnodes))
1596
1596
1597 def constructor():
1597 def constructor():
1598 revlog.revlog(opener, indexfile)
1598 revlog.revlog(opener, indexfile)
1599
1599
1600 def read():
1600 def read():
1601 with opener(indexfile) as fh:
1601 with opener(indexfile) as fh:
1602 fh.read()
1602 fh.read()
1603
1603
1604 def parseindex():
1604 def parseindex():
1605 revlogio.parseindex(data, inline)
1605 revlogio.parseindex(data, inline)
1606
1606
1607 def getentry(revornode):
1607 def getentry(revornode):
1608 index = revlogio.parseindex(data, inline)[0]
1608 index = revlogio.parseindex(data, inline)[0]
1609 index[revornode]
1609 index[revornode]
1610
1610
1611 def getentries(revs, count=1):
1611 def getentries(revs, count=1):
1612 index = revlogio.parseindex(data, inline)[0]
1612 index = revlogio.parseindex(data, inline)[0]
1613
1613
1614 for i in range(count):
1614 for i in range(count):
1615 for rev in revs:
1615 for rev in revs:
1616 index[rev]
1616 index[rev]
1617
1617
1618 def resolvenode(node):
1618 def resolvenode(node):
1619 nodemap = revlogio.parseindex(data, inline)[1]
1619 nodemap = revlogio.parseindex(data, inline)[1]
1620 # This only works for the C code.
1620 # This only works for the C code.
1621 if nodemap is None:
1621 if nodemap is None:
1622 return
1622 return
1623
1623
1624 try:
1624 try:
1625 nodemap[node]
1625 nodemap[node]
1626 except error.RevlogError:
1626 except error.RevlogError:
1627 pass
1627 pass
1628
1628
1629 def resolvenodes(nodes, count=1):
1629 def resolvenodes(nodes, count=1):
1630 nodemap = revlogio.parseindex(data, inline)[1]
1630 nodemap = revlogio.parseindex(data, inline)[1]
1631 if nodemap is None:
1631 if nodemap is None:
1632 return
1632 return
1633
1633
1634 for i in range(count):
1634 for i in range(count):
1635 for node in nodes:
1635 for node in nodes:
1636 try:
1636 try:
1637 nodemap[node]
1637 nodemap[node]
1638 except error.RevlogError:
1638 except error.RevlogError:
1639 pass
1639 pass
1640
1640
1641 benches = [
1641 benches = [
1642 (constructor, b'revlog constructor'),
1642 (constructor, b'revlog constructor'),
1643 (read, b'read'),
1643 (read, b'read'),
1644 (parseindex, b'create index object'),
1644 (parseindex, b'create index object'),
1645 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1645 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1646 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1646 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1647 (lambda: resolvenode(node0), b'look up node at rev 0'),
1647 (lambda: resolvenode(node0), b'look up node at rev 0'),
1648 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1648 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1649 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1649 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1650 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1650 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1651 (lambda: resolvenode(node100), b'look up node at tip'),
1651 (lambda: resolvenode(node100), b'look up node at tip'),
1652 # 2x variation is to measure caching impact.
1652 # 2x variation is to measure caching impact.
1653 (lambda: resolvenodes(allnodes),
1653 (lambda: resolvenodes(allnodes),
1654 b'look up all nodes (forward)'),
1654 b'look up all nodes (forward)'),
1655 (lambda: resolvenodes(allnodes, 2),
1655 (lambda: resolvenodes(allnodes, 2),
1656 b'look up all nodes 2x (forward)'),
1656 b'look up all nodes 2x (forward)'),
1657 (lambda: resolvenodes(allnodesrev),
1657 (lambda: resolvenodes(allnodesrev),
1658 b'look up all nodes (reverse)'),
1658 b'look up all nodes (reverse)'),
1659 (lambda: resolvenodes(allnodesrev, 2),
1659 (lambda: resolvenodes(allnodesrev, 2),
1660 b'look up all nodes 2x (reverse)'),
1660 b'look up all nodes 2x (reverse)'),
1661 (lambda: getentries(allrevs),
1661 (lambda: getentries(allrevs),
1662 b'retrieve all index entries (forward)'),
1662 b'retrieve all index entries (forward)'),
1663 (lambda: getentries(allrevs, 2),
1663 (lambda: getentries(allrevs, 2),
1664 b'retrieve all index entries 2x (forward)'),
1664 b'retrieve all index entries 2x (forward)'),
1665 (lambda: getentries(allrevsrev),
1665 (lambda: getentries(allrevsrev),
1666 b'retrieve all index entries (reverse)'),
1666 b'retrieve all index entries (reverse)'),
1667 (lambda: getentries(allrevsrev, 2),
1667 (lambda: getentries(allrevsrev, 2),
1668 b'retrieve all index entries 2x (reverse)'),
1668 b'retrieve all index entries 2x (reverse)'),
1669 ]
1669 ]
1670
1670
1671 for fn, title in benches:
1671 for fn, title in benches:
1672 timer, fm = gettimer(ui, opts)
1672 timer, fm = gettimer(ui, opts)
1673 timer(fn, title=title)
1673 timer(fn, title=title)
1674 fm.end()
1674 fm.end()
1675
1675
1676 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1676 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1677 [(b'd', b'dist', 100, b'distance between the revisions'),
1677 [(b'd', b'dist', 100, b'distance between the revisions'),
1678 (b's', b'startrev', 0, b'revision to start reading at'),
1678 (b's', b'startrev', 0, b'revision to start reading at'),
1679 (b'', b'reverse', False, b'read in reverse')],
1679 (b'', b'reverse', False, b'read in reverse')],
1680 b'-c|-m|FILE')
1680 b'-c|-m|FILE')
1681 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1681 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1682 **opts):
1682 **opts):
1683 """Benchmark reading a series of revisions from a revlog.
1683 """Benchmark reading a series of revisions from a revlog.
1684
1684
1685 By default, we read every ``-d/--dist`` revision from 0 to tip of
1685 By default, we read every ``-d/--dist`` revision from 0 to tip of
1686 the specified revlog.
1686 the specified revlog.
1687
1687
1688 The start revision can be defined via ``-s/--startrev``.
1688 The start revision can be defined via ``-s/--startrev``.
1689 """
1689 """
1690 opts = _byteskwargs(opts)
1690 opts = _byteskwargs(opts)
1691
1691
1692 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1692 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1693 rllen = getlen(ui)(rl)
1693 rllen = getlen(ui)(rl)
1694
1694
1695 if startrev < 0:
1695 if startrev < 0:
1696 startrev = rllen + startrev
1696 startrev = rllen + startrev
1697
1697
1698 def d():
1698 def d():
1699 rl.clearcaches()
1699 rl.clearcaches()
1700
1700
1701 beginrev = startrev
1701 beginrev = startrev
1702 endrev = rllen
1702 endrev = rllen
1703 dist = opts[b'dist']
1703 dist = opts[b'dist']
1704
1704
1705 if reverse:
1705 if reverse:
1706 beginrev, endrev = endrev - 1, beginrev - 1
1706 beginrev, endrev = endrev - 1, beginrev - 1
1707 dist = -1 * dist
1707 dist = -1 * dist
1708
1708
1709 for x in _xrange(beginrev, endrev, dist):
1709 for x in _xrange(beginrev, endrev, dist):
1710 # Old revisions don't support passing int.
1710 # Old revisions don't support passing int.
1711 n = rl.node(x)
1711 n = rl.node(x)
1712 rl.revision(n)
1712 rl.revision(n)
1713
1713
1714 timer, fm = gettimer(ui, opts)
1714 timer, fm = gettimer(ui, opts)
1715 timer(d)
1715 timer(d)
1716 fm.end()
1716 fm.end()
1717
1717
1718 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1718 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1719 [(b's', b'startrev', 1000, b'revision to start writing at'),
1719 [(b's', b'startrev', 1000, b'revision to start writing at'),
1720 (b'', b'stoprev', -1, b'last revision to write'),
1720 (b'', b'stoprev', -1, b'last revision to write'),
1721 (b'', b'count', 3, b'last revision to write'),
1721 (b'', b'count', 3, b'last revision to write'),
1722 (b'', b'details', False, b'print timing for every revisions tested'),
1722 (b'', b'details', False, b'print timing for every revisions tested'),
1723 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1723 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1724 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1724 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1725 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1725 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1726 ],
1726 ],
1727 b'-c|-m|FILE')
1727 b'-c|-m|FILE')
1728 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1728 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1729 """Benchmark writing a series of revisions to a revlog.
1729 """Benchmark writing a series of revisions to a revlog.
1730
1730
1731 Possible source values are:
1731 Possible source values are:
1732 * `full`: add from a full text (default).
1732 * `full`: add from a full text (default).
1733 * `parent-1`: add from a delta to the first parent
1733 * `parent-1`: add from a delta to the first parent
1734 * `parent-2`: add from a delta to the second parent if it exists
1734 * `parent-2`: add from a delta to the second parent if it exists
1735 (use a delta from the first parent otherwise)
1735 (use a delta from the first parent otherwise)
1736 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1736 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1737 * `storage`: add from the existing precomputed deltas
1737 * `storage`: add from the existing precomputed deltas
1738 """
1738 """
1739 opts = _byteskwargs(opts)
1739 opts = _byteskwargs(opts)
1740
1740
1741 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1741 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1742 rllen = getlen(ui)(rl)
1742 rllen = getlen(ui)(rl)
1743 if startrev < 0:
1743 if startrev < 0:
1744 startrev = rllen + startrev
1744 startrev = rllen + startrev
1745 if stoprev < 0:
1745 if stoprev < 0:
1746 stoprev = rllen + stoprev
1746 stoprev = rllen + stoprev
1747
1747
1748 lazydeltabase = opts['lazydeltabase']
1748 lazydeltabase = opts['lazydeltabase']
1749 source = opts['source']
1749 source = opts['source']
1750 clearcaches = opts['clear_caches']
1750 clearcaches = opts['clear_caches']
1751 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1751 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1752 b'storage')
1752 b'storage')
1753 if source not in validsource:
1753 if source not in validsource:
1754 raise error.Abort('invalid source type: %s' % source)
1754 raise error.Abort('invalid source type: %s' % source)
1755
1755
1756 ### actually gather results
1756 ### actually gather results
1757 count = opts['count']
1757 count = opts['count']
1758 if count <= 0:
1758 if count <= 0:
1759 raise error.Abort('invalide run count: %d' % count)
1759 raise error.Abort('invalide run count: %d' % count)
1760 allresults = []
1760 allresults = []
1761 for c in range(count):
1761 for c in range(count):
1762 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1762 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1763 lazydeltabase=lazydeltabase,
1763 lazydeltabase=lazydeltabase,
1764 clearcaches=clearcaches)
1764 clearcaches=clearcaches)
1765 allresults.append(timing)
1765 allresults.append(timing)
1766
1766
1767 ### consolidate the results in a single list
1767 ### consolidate the results in a single list
1768 results = []
1768 results = []
1769 for idx, (rev, t) in enumerate(allresults[0]):
1769 for idx, (rev, t) in enumerate(allresults[0]):
1770 ts = [t]
1770 ts = [t]
1771 for other in allresults[1:]:
1771 for other in allresults[1:]:
1772 orev, ot = other[idx]
1772 orev, ot = other[idx]
1773 assert orev == rev
1773 assert orev == rev
1774 ts.append(ot)
1774 ts.append(ot)
1775 results.append((rev, ts))
1775 results.append((rev, ts))
1776 resultcount = len(results)
1776 resultcount = len(results)
1777
1777
1778 ### Compute and display relevant statistics
1778 ### Compute and display relevant statistics
1779
1779
1780 # get a formatter
1780 # get a formatter
1781 fm = ui.formatter(b'perf', opts)
1781 fm = ui.formatter(b'perf', opts)
1782 displayall = ui.configbool(b"perf", b"all-timing", False)
1782 displayall = ui.configbool(b"perf", b"all-timing", False)
1783
1783
1784 # print individual details if requested
1784 # print individual details if requested
1785 if opts['details']:
1785 if opts['details']:
1786 for idx, item in enumerate(results, 1):
1786 for idx, item in enumerate(results, 1):
1787 rev, data = item
1787 rev, data = item
1788 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1788 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1789 formatone(fm, data, title=title, displayall=displayall)
1789 formatone(fm, data, title=title, displayall=displayall)
1790
1790
1791 # sorts results by median time
1791 # sorts results by median time
1792 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1792 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1793 # list of (name, index) to display)
1793 # list of (name, index) to display)
1794 relevants = [
1794 relevants = [
1795 ("min", 0),
1795 ("min", 0),
1796 ("10%", resultcount * 10 // 100),
1796 ("10%", resultcount * 10 // 100),
1797 ("25%", resultcount * 25 // 100),
1797 ("25%", resultcount * 25 // 100),
1798 ("50%", resultcount * 70 // 100),
1798 ("50%", resultcount * 70 // 100),
1799 ("75%", resultcount * 75 // 100),
1799 ("75%", resultcount * 75 // 100),
1800 ("90%", resultcount * 90 // 100),
1800 ("90%", resultcount * 90 // 100),
1801 ("95%", resultcount * 95 // 100),
1801 ("95%", resultcount * 95 // 100),
1802 ("99%", resultcount * 99 // 100),
1802 ("99%", resultcount * 99 // 100),
1803 ("99.9%", resultcount * 999 // 1000),
1803 ("99.9%", resultcount * 999 // 1000),
1804 ("99.99%", resultcount * 9999 // 10000),
1804 ("99.99%", resultcount * 9999 // 10000),
1805 ("99.999%", resultcount * 99999 // 100000),
1805 ("99.999%", resultcount * 99999 // 100000),
1806 ("max", -1),
1806 ("max", -1),
1807 ]
1807 ]
1808 if not ui.quiet:
1808 if not ui.quiet:
1809 for name, idx in relevants:
1809 for name, idx in relevants:
1810 data = results[idx]
1810 data = results[idx]
1811 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1811 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1812 formatone(fm, data[1], title=title, displayall=displayall)
1812 formatone(fm, data[1], title=title, displayall=displayall)
1813
1813
1814 # XXX summing that many float will not be very precise, we ignore this fact
1814 # XXX summing that many float will not be very precise, we ignore this fact
1815 # for now
1815 # for now
1816 totaltime = []
1816 totaltime = []
1817 for item in allresults:
1817 for item in allresults:
1818 totaltime.append((sum(x[1][0] for x in item),
1818 totaltime.append((sum(x[1][0] for x in item),
1819 sum(x[1][1] for x in item),
1819 sum(x[1][1] for x in item),
1820 sum(x[1][2] for x in item),)
1820 sum(x[1][2] for x in item),)
1821 )
1821 )
1822 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1822 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1823 displayall=displayall)
1823 displayall=displayall)
1824 fm.end()
1824 fm.end()
1825
1825
1826 class _faketr(object):
1826 class _faketr(object):
1827 def add(s, x, y, z=None):
1827 def add(s, x, y, z=None):
1828 return None
1828 return None
1829
1829
1830 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1830 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1831 lazydeltabase=True, clearcaches=True):
1831 lazydeltabase=True, clearcaches=True):
1832 timings = []
1832 timings = []
1833 tr = _faketr()
1833 tr = _faketr()
1834 with _temprevlog(ui, orig, startrev) as dest:
1834 with _temprevlog(ui, orig, startrev) as dest:
1835 dest._lazydeltabase = lazydeltabase
1835 dest._lazydeltabase = lazydeltabase
1836 revs = list(orig.revs(startrev, stoprev))
1836 revs = list(orig.revs(startrev, stoprev))
1837 total = len(revs)
1837 total = len(revs)
1838 topic = 'adding'
1838 topic = 'adding'
1839 if runidx is not None:
1839 if runidx is not None:
1840 topic += ' (run #%d)' % runidx
1840 topic += ' (run #%d)' % runidx
1841 # Support both old and new progress API
1842 if util.safehasattr(ui, 'makeprogress'):
1843 progress = ui.makeprogress(topic, unit='revs', total=total)
1844 def updateprogress(pos):
1845 progress.update(pos)
1846 def completeprogress():
1847 progress.complete()
1848 else:
1849 def updateprogress(pos):
1850 ui.progress(topic, pos, unit='revs', total=total)
1851 def completeprogress():
1852 ui.progress(topic, None, unit='revs', total=total)
1853
1841 for idx, rev in enumerate(revs):
1854 for idx, rev in enumerate(revs):
1842 ui.progress(topic, idx, unit='revs', total=total)
1855 updateprogress(idx)
1843 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1856 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1844 if clearcaches:
1857 if clearcaches:
1845 dest.index.clearcaches()
1858 dest.index.clearcaches()
1846 dest.clearcaches()
1859 dest.clearcaches()
1847 with timeone() as r:
1860 with timeone() as r:
1848 dest.addrawrevision(*addargs, **addkwargs)
1861 dest.addrawrevision(*addargs, **addkwargs)
1849 timings.append((rev, r[0]))
1862 timings.append((rev, r[0]))
1850 ui.progress(topic, total, unit='revs', total=total)
1863 updateprogress(total)
1851 ui.progress(topic, None, unit='revs', total=total)
1864 completeprogress()
1852 return timings
1865 return timings
1853
1866
1854 def _getrevisionseed(orig, rev, tr, source):
1867 def _getrevisionseed(orig, rev, tr, source):
1855 from mercurial.node import nullid
1868 from mercurial.node import nullid
1856
1869
1857 linkrev = orig.linkrev(rev)
1870 linkrev = orig.linkrev(rev)
1858 node = orig.node(rev)
1871 node = orig.node(rev)
1859 p1, p2 = orig.parents(node)
1872 p1, p2 = orig.parents(node)
1860 flags = orig.flags(rev)
1873 flags = orig.flags(rev)
1861 cachedelta = None
1874 cachedelta = None
1862 text = None
1875 text = None
1863
1876
1864 if source == b'full':
1877 if source == b'full':
1865 text = orig.revision(rev)
1878 text = orig.revision(rev)
1866 elif source == b'parent-1':
1879 elif source == b'parent-1':
1867 baserev = orig.rev(p1)
1880 baserev = orig.rev(p1)
1868 cachedelta = (baserev, orig.revdiff(p1, rev))
1881 cachedelta = (baserev, orig.revdiff(p1, rev))
1869 elif source == b'parent-2':
1882 elif source == b'parent-2':
1870 parent = p2
1883 parent = p2
1871 if p2 == nullid:
1884 if p2 == nullid:
1872 parent = p1
1885 parent = p1
1873 baserev = orig.rev(parent)
1886 baserev = orig.rev(parent)
1874 cachedelta = (baserev, orig.revdiff(parent, rev))
1887 cachedelta = (baserev, orig.revdiff(parent, rev))
1875 elif source == b'parent-smallest':
1888 elif source == b'parent-smallest':
1876 p1diff = orig.revdiff(p1, rev)
1889 p1diff = orig.revdiff(p1, rev)
1877 parent = p1
1890 parent = p1
1878 diff = p1diff
1891 diff = p1diff
1879 if p2 != nullid:
1892 if p2 != nullid:
1880 p2diff = orig.revdiff(p2, rev)
1893 p2diff = orig.revdiff(p2, rev)
1881 if len(p1diff) > len(p2diff):
1894 if len(p1diff) > len(p2diff):
1882 parent = p2
1895 parent = p2
1883 diff = p2diff
1896 diff = p2diff
1884 baserev = orig.rev(parent)
1897 baserev = orig.rev(parent)
1885 cachedelta = (baserev, diff)
1898 cachedelta = (baserev, diff)
1886 elif source == b'storage':
1899 elif source == b'storage':
1887 baserev = orig.deltaparent(rev)
1900 baserev = orig.deltaparent(rev)
1888 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1901 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1889
1902
1890 return ((text, tr, linkrev, p1, p2),
1903 return ((text, tr, linkrev, p1, p2),
1891 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1904 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1892
1905
1893 @contextlib.contextmanager
1906 @contextlib.contextmanager
1894 def _temprevlog(ui, orig, truncaterev):
1907 def _temprevlog(ui, orig, truncaterev):
1895 from mercurial import vfs as vfsmod
1908 from mercurial import vfs as vfsmod
1896
1909
1897 if orig._inline:
1910 if orig._inline:
1898 raise error.Abort('not supporting inline revlog (yet)')
1911 raise error.Abort('not supporting inline revlog (yet)')
1899
1912
1900 origindexpath = orig.opener.join(orig.indexfile)
1913 origindexpath = orig.opener.join(orig.indexfile)
1901 origdatapath = orig.opener.join(orig.datafile)
1914 origdatapath = orig.opener.join(orig.datafile)
1902 indexname = 'revlog.i'
1915 indexname = 'revlog.i'
1903 dataname = 'revlog.d'
1916 dataname = 'revlog.d'
1904
1917
1905 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1918 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1906 try:
1919 try:
1907 # copy the data file in a temporary directory
1920 # copy the data file in a temporary directory
1908 ui.debug('copying data in %s\n' % tmpdir)
1921 ui.debug('copying data in %s\n' % tmpdir)
1909 destindexpath = os.path.join(tmpdir, 'revlog.i')
1922 destindexpath = os.path.join(tmpdir, 'revlog.i')
1910 destdatapath = os.path.join(tmpdir, 'revlog.d')
1923 destdatapath = os.path.join(tmpdir, 'revlog.d')
1911 shutil.copyfile(origindexpath, destindexpath)
1924 shutil.copyfile(origindexpath, destindexpath)
1912 shutil.copyfile(origdatapath, destdatapath)
1925 shutil.copyfile(origdatapath, destdatapath)
1913
1926
1914 # remove the data we want to add again
1927 # remove the data we want to add again
1915 ui.debug('truncating data to be rewritten\n')
1928 ui.debug('truncating data to be rewritten\n')
1916 with open(destindexpath, 'ab') as index:
1929 with open(destindexpath, 'ab') as index:
1917 index.seek(0)
1930 index.seek(0)
1918 index.truncate(truncaterev * orig._io.size)
1931 index.truncate(truncaterev * orig._io.size)
1919 with open(destdatapath, 'ab') as data:
1932 with open(destdatapath, 'ab') as data:
1920 data.seek(0)
1933 data.seek(0)
1921 data.truncate(orig.start(truncaterev))
1934 data.truncate(orig.start(truncaterev))
1922
1935
1923 # instantiate a new revlog from the temporary copy
1936 # instantiate a new revlog from the temporary copy
1924 ui.debug('truncating adding to be rewritten\n')
1937 ui.debug('truncating adding to be rewritten\n')
1925 vfs = vfsmod.vfs(tmpdir)
1938 vfs = vfsmod.vfs(tmpdir)
1926 vfs.options = getattr(orig.opener, 'options', None)
1939 vfs.options = getattr(orig.opener, 'options', None)
1927
1940
1928 dest = revlog.revlog(vfs,
1941 dest = revlog.revlog(vfs,
1929 indexfile=indexname,
1942 indexfile=indexname,
1930 datafile=dataname)
1943 datafile=dataname)
1931 if dest._inline:
1944 if dest._inline:
1932 raise error.Abort('not supporting inline revlog (yet)')
1945 raise error.Abort('not supporting inline revlog (yet)')
1933 # make sure internals are initialized
1946 # make sure internals are initialized
1934 dest.revision(len(dest) - 1)
1947 dest.revision(len(dest) - 1)
1935 yield dest
1948 yield dest
1936 del dest, vfs
1949 del dest, vfs
1937 finally:
1950 finally:
1938 shutil.rmtree(tmpdir, True)
1951 shutil.rmtree(tmpdir, True)
1939
1952
1940 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1953 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1941 [(b'e', b'engines', b'', b'compression engines to use'),
1954 [(b'e', b'engines', b'', b'compression engines to use'),
1942 (b's', b'startrev', 0, b'revision to start at')],
1955 (b's', b'startrev', 0, b'revision to start at')],
1943 b'-c|-m|FILE')
1956 b'-c|-m|FILE')
1944 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1957 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1945 """Benchmark operations on revlog chunks.
1958 """Benchmark operations on revlog chunks.
1946
1959
1947 Logically, each revlog is a collection of fulltext revisions. However,
1960 Logically, each revlog is a collection of fulltext revisions. However,
1948 stored within each revlog are "chunks" of possibly compressed data. This
1961 stored within each revlog are "chunks" of possibly compressed data. This
1949 data needs to be read and decompressed or compressed and written.
1962 data needs to be read and decompressed or compressed and written.
1950
1963
1951 This command measures the time it takes to read+decompress and recompress
1964 This command measures the time it takes to read+decompress and recompress
1952 chunks in a revlog. It effectively isolates I/O and compression performance.
1965 chunks in a revlog. It effectively isolates I/O and compression performance.
1953 For measurements of higher-level operations like resolving revisions,
1966 For measurements of higher-level operations like resolving revisions,
1954 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1967 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1955 """
1968 """
1956 opts = _byteskwargs(opts)
1969 opts = _byteskwargs(opts)
1957
1970
1958 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1971 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1959
1972
1960 # _chunkraw was renamed to _getsegmentforrevs.
1973 # _chunkraw was renamed to _getsegmentforrevs.
1961 try:
1974 try:
1962 segmentforrevs = rl._getsegmentforrevs
1975 segmentforrevs = rl._getsegmentforrevs
1963 except AttributeError:
1976 except AttributeError:
1964 segmentforrevs = rl._chunkraw
1977 segmentforrevs = rl._chunkraw
1965
1978
1966 # Verify engines argument.
1979 # Verify engines argument.
1967 if engines:
1980 if engines:
1968 engines = set(e.strip() for e in engines.split(b','))
1981 engines = set(e.strip() for e in engines.split(b','))
1969 for engine in engines:
1982 for engine in engines:
1970 try:
1983 try:
1971 util.compressionengines[engine]
1984 util.compressionengines[engine]
1972 except KeyError:
1985 except KeyError:
1973 raise error.Abort(b'unknown compression engine: %s' % engine)
1986 raise error.Abort(b'unknown compression engine: %s' % engine)
1974 else:
1987 else:
1975 engines = []
1988 engines = []
1976 for e in util.compengines:
1989 for e in util.compengines:
1977 engine = util.compengines[e]
1990 engine = util.compengines[e]
1978 try:
1991 try:
1979 if engine.available():
1992 if engine.available():
1980 engine.revlogcompressor().compress(b'dummy')
1993 engine.revlogcompressor().compress(b'dummy')
1981 engines.append(e)
1994 engines.append(e)
1982 except NotImplementedError:
1995 except NotImplementedError:
1983 pass
1996 pass
1984
1997
1985 revs = list(rl.revs(startrev, len(rl) - 1))
1998 revs = list(rl.revs(startrev, len(rl) - 1))
1986
1999
1987 def rlfh(rl):
2000 def rlfh(rl):
1988 if rl._inline:
2001 if rl._inline:
1989 return getsvfs(repo)(rl.indexfile)
2002 return getsvfs(repo)(rl.indexfile)
1990 else:
2003 else:
1991 return getsvfs(repo)(rl.datafile)
2004 return getsvfs(repo)(rl.datafile)
1992
2005
1993 def doread():
2006 def doread():
1994 rl.clearcaches()
2007 rl.clearcaches()
1995 for rev in revs:
2008 for rev in revs:
1996 segmentforrevs(rev, rev)
2009 segmentforrevs(rev, rev)
1997
2010
1998 def doreadcachedfh():
2011 def doreadcachedfh():
1999 rl.clearcaches()
2012 rl.clearcaches()
2000 fh = rlfh(rl)
2013 fh = rlfh(rl)
2001 for rev in revs:
2014 for rev in revs:
2002 segmentforrevs(rev, rev, df=fh)
2015 segmentforrevs(rev, rev, df=fh)
2003
2016
2004 def doreadbatch():
2017 def doreadbatch():
2005 rl.clearcaches()
2018 rl.clearcaches()
2006 segmentforrevs(revs[0], revs[-1])
2019 segmentforrevs(revs[0], revs[-1])
2007
2020
2008 def doreadbatchcachedfh():
2021 def doreadbatchcachedfh():
2009 rl.clearcaches()
2022 rl.clearcaches()
2010 fh = rlfh(rl)
2023 fh = rlfh(rl)
2011 segmentforrevs(revs[0], revs[-1], df=fh)
2024 segmentforrevs(revs[0], revs[-1], df=fh)
2012
2025
2013 def dochunk():
2026 def dochunk():
2014 rl.clearcaches()
2027 rl.clearcaches()
2015 fh = rlfh(rl)
2028 fh = rlfh(rl)
2016 for rev in revs:
2029 for rev in revs:
2017 rl._chunk(rev, df=fh)
2030 rl._chunk(rev, df=fh)
2018
2031
2019 chunks = [None]
2032 chunks = [None]
2020
2033
2021 def dochunkbatch():
2034 def dochunkbatch():
2022 rl.clearcaches()
2035 rl.clearcaches()
2023 fh = rlfh(rl)
2036 fh = rlfh(rl)
2024 # Save chunks as a side-effect.
2037 # Save chunks as a side-effect.
2025 chunks[0] = rl._chunks(revs, df=fh)
2038 chunks[0] = rl._chunks(revs, df=fh)
2026
2039
2027 def docompress(compressor):
2040 def docompress(compressor):
2028 rl.clearcaches()
2041 rl.clearcaches()
2029
2042
2030 try:
2043 try:
2031 # Swap in the requested compression engine.
2044 # Swap in the requested compression engine.
2032 oldcompressor = rl._compressor
2045 oldcompressor = rl._compressor
2033 rl._compressor = compressor
2046 rl._compressor = compressor
2034 for chunk in chunks[0]:
2047 for chunk in chunks[0]:
2035 rl.compress(chunk)
2048 rl.compress(chunk)
2036 finally:
2049 finally:
2037 rl._compressor = oldcompressor
2050 rl._compressor = oldcompressor
2038
2051
2039 benches = [
2052 benches = [
2040 (lambda: doread(), b'read'),
2053 (lambda: doread(), b'read'),
2041 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2054 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2042 (lambda: doreadbatch(), b'read batch'),
2055 (lambda: doreadbatch(), b'read batch'),
2043 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2056 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2044 (lambda: dochunk(), b'chunk'),
2057 (lambda: dochunk(), b'chunk'),
2045 (lambda: dochunkbatch(), b'chunk batch'),
2058 (lambda: dochunkbatch(), b'chunk batch'),
2046 ]
2059 ]
2047
2060
2048 for engine in sorted(engines):
2061 for engine in sorted(engines):
2049 compressor = util.compengines[engine].revlogcompressor()
2062 compressor = util.compengines[engine].revlogcompressor()
2050 benches.append((functools.partial(docompress, compressor),
2063 benches.append((functools.partial(docompress, compressor),
2051 b'compress w/ %s' % engine))
2064 b'compress w/ %s' % engine))
2052
2065
2053 for fn, title in benches:
2066 for fn, title in benches:
2054 timer, fm = gettimer(ui, opts)
2067 timer, fm = gettimer(ui, opts)
2055 timer(fn, title=title)
2068 timer(fn, title=title)
2056 fm.end()
2069 fm.end()
2057
2070
2058 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2071 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2059 [(b'', b'cache', False, b'use caches instead of clearing')],
2072 [(b'', b'cache', False, b'use caches instead of clearing')],
2060 b'-c|-m|FILE REV')
2073 b'-c|-m|FILE REV')
2061 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2074 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2062 """Benchmark obtaining a revlog revision.
2075 """Benchmark obtaining a revlog revision.
2063
2076
2064 Obtaining a revlog revision consists of roughly the following steps:
2077 Obtaining a revlog revision consists of roughly the following steps:
2065
2078
2066 1. Compute the delta chain
2079 1. Compute the delta chain
2067 2. Slice the delta chain if applicable
2080 2. Slice the delta chain if applicable
2068 3. Obtain the raw chunks for that delta chain
2081 3. Obtain the raw chunks for that delta chain
2069 4. Decompress each raw chunk
2082 4. Decompress each raw chunk
2070 5. Apply binary patches to obtain fulltext
2083 5. Apply binary patches to obtain fulltext
2071 6. Verify hash of fulltext
2084 6. Verify hash of fulltext
2072
2085
2073 This command measures the time spent in each of these phases.
2086 This command measures the time spent in each of these phases.
2074 """
2087 """
2075 opts = _byteskwargs(opts)
2088 opts = _byteskwargs(opts)
2076
2089
2077 if opts.get(b'changelog') or opts.get(b'manifest'):
2090 if opts.get(b'changelog') or opts.get(b'manifest'):
2078 file_, rev = None, file_
2091 file_, rev = None, file_
2079 elif rev is None:
2092 elif rev is None:
2080 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2093 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2081
2094
2082 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2095 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2083
2096
2084 # _chunkraw was renamed to _getsegmentforrevs.
2097 # _chunkraw was renamed to _getsegmentforrevs.
2085 try:
2098 try:
2086 segmentforrevs = r._getsegmentforrevs
2099 segmentforrevs = r._getsegmentforrevs
2087 except AttributeError:
2100 except AttributeError:
2088 segmentforrevs = r._chunkraw
2101 segmentforrevs = r._chunkraw
2089
2102
2090 node = r.lookup(rev)
2103 node = r.lookup(rev)
2091 rev = r.rev(node)
2104 rev = r.rev(node)
2092
2105
2093 def getrawchunks(data, chain):
2106 def getrawchunks(data, chain):
2094 start = r.start
2107 start = r.start
2095 length = r.length
2108 length = r.length
2096 inline = r._inline
2109 inline = r._inline
2097 iosize = r._io.size
2110 iosize = r._io.size
2098 buffer = util.buffer
2111 buffer = util.buffer
2099
2112
2100 chunks = []
2113 chunks = []
2101 ladd = chunks.append
2114 ladd = chunks.append
2102 for idx, item in enumerate(chain):
2115 for idx, item in enumerate(chain):
2103 offset = start(item[0])
2116 offset = start(item[0])
2104 bits = data[idx]
2117 bits = data[idx]
2105 for rev in item:
2118 for rev in item:
2106 chunkstart = start(rev)
2119 chunkstart = start(rev)
2107 if inline:
2120 if inline:
2108 chunkstart += (rev + 1) * iosize
2121 chunkstart += (rev + 1) * iosize
2109 chunklength = length(rev)
2122 chunklength = length(rev)
2110 ladd(buffer(bits, chunkstart - offset, chunklength))
2123 ladd(buffer(bits, chunkstart - offset, chunklength))
2111
2124
2112 return chunks
2125 return chunks
2113
2126
2114 def dodeltachain(rev):
2127 def dodeltachain(rev):
2115 if not cache:
2128 if not cache:
2116 r.clearcaches()
2129 r.clearcaches()
2117 r._deltachain(rev)
2130 r._deltachain(rev)
2118
2131
2119 def doread(chain):
2132 def doread(chain):
2120 if not cache:
2133 if not cache:
2121 r.clearcaches()
2134 r.clearcaches()
2122 for item in slicedchain:
2135 for item in slicedchain:
2123 segmentforrevs(item[0], item[-1])
2136 segmentforrevs(item[0], item[-1])
2124
2137
2125 def doslice(r, chain, size):
2138 def doslice(r, chain, size):
2126 for s in slicechunk(r, chain, targetsize=size):
2139 for s in slicechunk(r, chain, targetsize=size):
2127 pass
2140 pass
2128
2141
2129 def dorawchunks(data, chain):
2142 def dorawchunks(data, chain):
2130 if not cache:
2143 if not cache:
2131 r.clearcaches()
2144 r.clearcaches()
2132 getrawchunks(data, chain)
2145 getrawchunks(data, chain)
2133
2146
2134 def dodecompress(chunks):
2147 def dodecompress(chunks):
2135 decomp = r.decompress
2148 decomp = r.decompress
2136 for chunk in chunks:
2149 for chunk in chunks:
2137 decomp(chunk)
2150 decomp(chunk)
2138
2151
2139 def dopatch(text, bins):
2152 def dopatch(text, bins):
2140 if not cache:
2153 if not cache:
2141 r.clearcaches()
2154 r.clearcaches()
2142 mdiff.patches(text, bins)
2155 mdiff.patches(text, bins)
2143
2156
2144 def dohash(text):
2157 def dohash(text):
2145 if not cache:
2158 if not cache:
2146 r.clearcaches()
2159 r.clearcaches()
2147 r.checkhash(text, node, rev=rev)
2160 r.checkhash(text, node, rev=rev)
2148
2161
2149 def dorevision():
2162 def dorevision():
2150 if not cache:
2163 if not cache:
2151 r.clearcaches()
2164 r.clearcaches()
2152 r.revision(node)
2165 r.revision(node)
2153
2166
2154 try:
2167 try:
2155 from mercurial.revlogutils.deltas import slicechunk
2168 from mercurial.revlogutils.deltas import slicechunk
2156 except ImportError:
2169 except ImportError:
2157 slicechunk = getattr(revlog, '_slicechunk', None)
2170 slicechunk = getattr(revlog, '_slicechunk', None)
2158
2171
2159 size = r.length(rev)
2172 size = r.length(rev)
2160 chain = r._deltachain(rev)[0]
2173 chain = r._deltachain(rev)[0]
2161 if not getattr(r, '_withsparseread', False):
2174 if not getattr(r, '_withsparseread', False):
2162 slicedchain = (chain,)
2175 slicedchain = (chain,)
2163 else:
2176 else:
2164 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2177 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2165 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2178 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2166 rawchunks = getrawchunks(data, slicedchain)
2179 rawchunks = getrawchunks(data, slicedchain)
2167 bins = r._chunks(chain)
2180 bins = r._chunks(chain)
2168 text = bytes(bins[0])
2181 text = bytes(bins[0])
2169 bins = bins[1:]
2182 bins = bins[1:]
2170 text = mdiff.patches(text, bins)
2183 text = mdiff.patches(text, bins)
2171
2184
2172 benches = [
2185 benches = [
2173 (lambda: dorevision(), b'full'),
2186 (lambda: dorevision(), b'full'),
2174 (lambda: dodeltachain(rev), b'deltachain'),
2187 (lambda: dodeltachain(rev), b'deltachain'),
2175 (lambda: doread(chain), b'read'),
2188 (lambda: doread(chain), b'read'),
2176 ]
2189 ]
2177
2190
2178 if getattr(r, '_withsparseread', False):
2191 if getattr(r, '_withsparseread', False):
2179 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2192 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2180 benches.append(slicing)
2193 benches.append(slicing)
2181
2194
2182 benches.extend([
2195 benches.extend([
2183 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2196 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2184 (lambda: dodecompress(rawchunks), b'decompress'),
2197 (lambda: dodecompress(rawchunks), b'decompress'),
2185 (lambda: dopatch(text, bins), b'patch'),
2198 (lambda: dopatch(text, bins), b'patch'),
2186 (lambda: dohash(text), b'hash'),
2199 (lambda: dohash(text), b'hash'),
2187 ])
2200 ])
2188
2201
2189 timer, fm = gettimer(ui, opts)
2202 timer, fm = gettimer(ui, opts)
2190 for fn, title in benches:
2203 for fn, title in benches:
2191 timer(fn, title=title)
2204 timer(fn, title=title)
2192 fm.end()
2205 fm.end()
2193
2206
2194 @command(b'perfrevset',
2207 @command(b'perfrevset',
2195 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2208 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2196 (b'', b'contexts', False, b'obtain changectx for each revision')]
2209 (b'', b'contexts', False, b'obtain changectx for each revision')]
2197 + formatteropts, b"REVSET")
2210 + formatteropts, b"REVSET")
2198 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2211 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2199 """benchmark the execution time of a revset
2212 """benchmark the execution time of a revset
2200
2213
2201 Use the --clean option if need to evaluate the impact of build volatile
2214 Use the --clean option if need to evaluate the impact of build volatile
2202 revisions set cache on the revset execution. Volatile cache hold filtered
2215 revisions set cache on the revset execution. Volatile cache hold filtered
2203 and obsolete related cache."""
2216 and obsolete related cache."""
2204 opts = _byteskwargs(opts)
2217 opts = _byteskwargs(opts)
2205
2218
2206 timer, fm = gettimer(ui, opts)
2219 timer, fm = gettimer(ui, opts)
2207 def d():
2220 def d():
2208 if clear:
2221 if clear:
2209 repo.invalidatevolatilesets()
2222 repo.invalidatevolatilesets()
2210 if contexts:
2223 if contexts:
2211 for ctx in repo.set(expr): pass
2224 for ctx in repo.set(expr): pass
2212 else:
2225 else:
2213 for r in repo.revs(expr): pass
2226 for r in repo.revs(expr): pass
2214 timer(d)
2227 timer(d)
2215 fm.end()
2228 fm.end()
2216
2229
2217 @command(b'perfvolatilesets',
2230 @command(b'perfvolatilesets',
2218 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2231 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2219 ] + formatteropts)
2232 ] + formatteropts)
2220 def perfvolatilesets(ui, repo, *names, **opts):
2233 def perfvolatilesets(ui, repo, *names, **opts):
2221 """benchmark the computation of various volatile set
2234 """benchmark the computation of various volatile set
2222
2235
2223 Volatile set computes element related to filtering and obsolescence."""
2236 Volatile set computes element related to filtering and obsolescence."""
2224 opts = _byteskwargs(opts)
2237 opts = _byteskwargs(opts)
2225 timer, fm = gettimer(ui, opts)
2238 timer, fm = gettimer(ui, opts)
2226 repo = repo.unfiltered()
2239 repo = repo.unfiltered()
2227
2240
2228 def getobs(name):
2241 def getobs(name):
2229 def d():
2242 def d():
2230 repo.invalidatevolatilesets()
2243 repo.invalidatevolatilesets()
2231 if opts[b'clear_obsstore']:
2244 if opts[b'clear_obsstore']:
2232 clearfilecache(repo, b'obsstore')
2245 clearfilecache(repo, b'obsstore')
2233 obsolete.getrevs(repo, name)
2246 obsolete.getrevs(repo, name)
2234 return d
2247 return d
2235
2248
2236 allobs = sorted(obsolete.cachefuncs)
2249 allobs = sorted(obsolete.cachefuncs)
2237 if names:
2250 if names:
2238 allobs = [n for n in allobs if n in names]
2251 allobs = [n for n in allobs if n in names]
2239
2252
2240 for name in allobs:
2253 for name in allobs:
2241 timer(getobs(name), title=name)
2254 timer(getobs(name), title=name)
2242
2255
2243 def getfiltered(name):
2256 def getfiltered(name):
2244 def d():
2257 def d():
2245 repo.invalidatevolatilesets()
2258 repo.invalidatevolatilesets()
2246 if opts[b'clear_obsstore']:
2259 if opts[b'clear_obsstore']:
2247 clearfilecache(repo, b'obsstore')
2260 clearfilecache(repo, b'obsstore')
2248 repoview.filterrevs(repo, name)
2261 repoview.filterrevs(repo, name)
2249 return d
2262 return d
2250
2263
2251 allfilter = sorted(repoview.filtertable)
2264 allfilter = sorted(repoview.filtertable)
2252 if names:
2265 if names:
2253 allfilter = [n for n in allfilter if n in names]
2266 allfilter = [n for n in allfilter if n in names]
2254
2267
2255 for name in allfilter:
2268 for name in allfilter:
2256 timer(getfiltered(name), title=name)
2269 timer(getfiltered(name), title=name)
2257 fm.end()
2270 fm.end()
2258
2271
2259 @command(b'perfbranchmap',
2272 @command(b'perfbranchmap',
2260 [(b'f', b'full', False,
2273 [(b'f', b'full', False,
2261 b'Includes build time of subset'),
2274 b'Includes build time of subset'),
2262 (b'', b'clear-revbranch', False,
2275 (b'', b'clear-revbranch', False,
2263 b'purge the revbranch cache between computation'),
2276 b'purge the revbranch cache between computation'),
2264 ] + formatteropts)
2277 ] + formatteropts)
2265 def perfbranchmap(ui, repo, *filternames, **opts):
2278 def perfbranchmap(ui, repo, *filternames, **opts):
2266 """benchmark the update of a branchmap
2279 """benchmark the update of a branchmap
2267
2280
2268 This benchmarks the full repo.branchmap() call with read and write disabled
2281 This benchmarks the full repo.branchmap() call with read and write disabled
2269 """
2282 """
2270 opts = _byteskwargs(opts)
2283 opts = _byteskwargs(opts)
2271 full = opts.get(b"full", False)
2284 full = opts.get(b"full", False)
2272 clear_revbranch = opts.get(b"clear_revbranch", False)
2285 clear_revbranch = opts.get(b"clear_revbranch", False)
2273 timer, fm = gettimer(ui, opts)
2286 timer, fm = gettimer(ui, opts)
2274 def getbranchmap(filtername):
2287 def getbranchmap(filtername):
2275 """generate a benchmark function for the filtername"""
2288 """generate a benchmark function for the filtername"""
2276 if filtername is None:
2289 if filtername is None:
2277 view = repo
2290 view = repo
2278 else:
2291 else:
2279 view = repo.filtered(filtername)
2292 view = repo.filtered(filtername)
2280 def d():
2293 def d():
2281 if clear_revbranch:
2294 if clear_revbranch:
2282 repo.revbranchcache()._clear()
2295 repo.revbranchcache()._clear()
2283 if full:
2296 if full:
2284 view._branchcaches.clear()
2297 view._branchcaches.clear()
2285 else:
2298 else:
2286 view._branchcaches.pop(filtername, None)
2299 view._branchcaches.pop(filtername, None)
2287 view.branchmap()
2300 view.branchmap()
2288 return d
2301 return d
2289 # add filter in smaller subset to bigger subset
2302 # add filter in smaller subset to bigger subset
2290 possiblefilters = set(repoview.filtertable)
2303 possiblefilters = set(repoview.filtertable)
2291 if filternames:
2304 if filternames:
2292 possiblefilters &= set(filternames)
2305 possiblefilters &= set(filternames)
2293 subsettable = getbranchmapsubsettable()
2306 subsettable = getbranchmapsubsettable()
2294 allfilters = []
2307 allfilters = []
2295 while possiblefilters:
2308 while possiblefilters:
2296 for name in possiblefilters:
2309 for name in possiblefilters:
2297 subset = subsettable.get(name)
2310 subset = subsettable.get(name)
2298 if subset not in possiblefilters:
2311 if subset not in possiblefilters:
2299 break
2312 break
2300 else:
2313 else:
2301 assert False, b'subset cycle %s!' % possiblefilters
2314 assert False, b'subset cycle %s!' % possiblefilters
2302 allfilters.append(name)
2315 allfilters.append(name)
2303 possiblefilters.remove(name)
2316 possiblefilters.remove(name)
2304
2317
2305 # warm the cache
2318 # warm the cache
2306 if not full:
2319 if not full:
2307 for name in allfilters:
2320 for name in allfilters:
2308 repo.filtered(name).branchmap()
2321 repo.filtered(name).branchmap()
2309 if not filternames or b'unfiltered' in filternames:
2322 if not filternames or b'unfiltered' in filternames:
2310 # add unfiltered
2323 # add unfiltered
2311 allfilters.append(None)
2324 allfilters.append(None)
2312
2325
2313 branchcacheread = safeattrsetter(branchmap, b'read')
2326 branchcacheread = safeattrsetter(branchmap, b'read')
2314 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2327 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2315 branchcacheread.set(lambda repo: None)
2328 branchcacheread.set(lambda repo: None)
2316 branchcachewrite.set(lambda bc, repo: None)
2329 branchcachewrite.set(lambda bc, repo: None)
2317 try:
2330 try:
2318 for name in allfilters:
2331 for name in allfilters:
2319 printname = name
2332 printname = name
2320 if name is None:
2333 if name is None:
2321 printname = b'unfiltered'
2334 printname = b'unfiltered'
2322 timer(getbranchmap(name), title=str(printname))
2335 timer(getbranchmap(name), title=str(printname))
2323 finally:
2336 finally:
2324 branchcacheread.restore()
2337 branchcacheread.restore()
2325 branchcachewrite.restore()
2338 branchcachewrite.restore()
2326 fm.end()
2339 fm.end()
2327
2340
2328 @command(b'perfbranchmapupdate', [
2341 @command(b'perfbranchmapupdate', [
2329 (b'', b'base', [], b'subset of revision to start from'),
2342 (b'', b'base', [], b'subset of revision to start from'),
2330 (b'', b'target', [], b'subset of revision to end with'),
2343 (b'', b'target', [], b'subset of revision to end with'),
2331 (b'', b'clear-caches', False, b'clear cache between each runs')
2344 (b'', b'clear-caches', False, b'clear cache between each runs')
2332 ] + formatteropts)
2345 ] + formatteropts)
2333 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2346 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2334 """benchmark branchmap update from for <base> revs to <target> revs
2347 """benchmark branchmap update from for <base> revs to <target> revs
2335
2348
2336 If `--clear-caches` is passed, the following items will be reset before
2349 If `--clear-caches` is passed, the following items will be reset before
2337 each update:
2350 each update:
2338 * the changelog instance and associated indexes
2351 * the changelog instance and associated indexes
2339 * the rev-branch-cache instance
2352 * the rev-branch-cache instance
2340
2353
2341 Examples:
2354 Examples:
2342
2355
2343 # update for the one last revision
2356 # update for the one last revision
2344 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2357 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2345
2358
2346 $ update for change coming with a new branch
2359 $ update for change coming with a new branch
2347 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2360 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2348 """
2361 """
2349 from mercurial import branchmap
2362 from mercurial import branchmap
2350 from mercurial import repoview
2363 from mercurial import repoview
2351 opts = _byteskwargs(opts)
2364 opts = _byteskwargs(opts)
2352 timer, fm = gettimer(ui, opts)
2365 timer, fm = gettimer(ui, opts)
2353 clearcaches = opts[b'clear_caches']
2366 clearcaches = opts[b'clear_caches']
2354 unfi = repo.unfiltered()
2367 unfi = repo.unfiltered()
2355 x = [None] # used to pass data between closure
2368 x = [None] # used to pass data between closure
2356
2369
2357 # we use a `list` here to avoid possible side effect from smartset
2370 # we use a `list` here to avoid possible side effect from smartset
2358 baserevs = list(scmutil.revrange(repo, base))
2371 baserevs = list(scmutil.revrange(repo, base))
2359 targetrevs = list(scmutil.revrange(repo, target))
2372 targetrevs = list(scmutil.revrange(repo, target))
2360 if not baserevs:
2373 if not baserevs:
2361 raise error.Abort(b'no revisions selected for --base')
2374 raise error.Abort(b'no revisions selected for --base')
2362 if not targetrevs:
2375 if not targetrevs:
2363 raise error.Abort(b'no revisions selected for --target')
2376 raise error.Abort(b'no revisions selected for --target')
2364
2377
2365 # make sure the target branchmap also contains the one in the base
2378 # make sure the target branchmap also contains the one in the base
2366 targetrevs = list(set(baserevs) | set(targetrevs))
2379 targetrevs = list(set(baserevs) | set(targetrevs))
2367 targetrevs.sort()
2380 targetrevs.sort()
2368
2381
2369 cl = repo.changelog
2382 cl = repo.changelog
2370 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2383 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2371 allbaserevs.sort()
2384 allbaserevs.sort()
2372 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2385 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2373
2386
2374 newrevs = list(alltargetrevs.difference(allbaserevs))
2387 newrevs = list(alltargetrevs.difference(allbaserevs))
2375 newrevs.sort()
2388 newrevs.sort()
2376
2389
2377 allrevs = frozenset(unfi.changelog.revs())
2390 allrevs = frozenset(unfi.changelog.revs())
2378 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2391 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2379 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2392 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2380
2393
2381 def basefilter(repo, visibilityexceptions=None):
2394 def basefilter(repo, visibilityexceptions=None):
2382 return basefilterrevs
2395 return basefilterrevs
2383
2396
2384 def targetfilter(repo, visibilityexceptions=None):
2397 def targetfilter(repo, visibilityexceptions=None):
2385 return targetfilterrevs
2398 return targetfilterrevs
2386
2399
2387 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2400 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2388 ui.status(msg % (len(allbaserevs), len(newrevs)))
2401 ui.status(msg % (len(allbaserevs), len(newrevs)))
2389 if targetfilterrevs:
2402 if targetfilterrevs:
2390 msg = b'(%d revisions still filtered)\n'
2403 msg = b'(%d revisions still filtered)\n'
2391 ui.status(msg % len(targetfilterrevs))
2404 ui.status(msg % len(targetfilterrevs))
2392
2405
2393 try:
2406 try:
2394 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2407 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2395 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2408 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2396
2409
2397 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2410 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2398 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2411 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2399
2412
2400 # try to find an existing branchmap to reuse
2413 # try to find an existing branchmap to reuse
2401 subsettable = getbranchmapsubsettable()
2414 subsettable = getbranchmapsubsettable()
2402 candidatefilter = subsettable.get(None)
2415 candidatefilter = subsettable.get(None)
2403 while candidatefilter is not None:
2416 while candidatefilter is not None:
2404 candidatebm = repo.filtered(candidatefilter).branchmap()
2417 candidatebm = repo.filtered(candidatefilter).branchmap()
2405 if candidatebm.validfor(baserepo):
2418 if candidatebm.validfor(baserepo):
2406 filtered = repoview.filterrevs(repo, candidatefilter)
2419 filtered = repoview.filterrevs(repo, candidatefilter)
2407 missing = [r for r in allbaserevs if r in filtered]
2420 missing = [r for r in allbaserevs if r in filtered]
2408 base = candidatebm.copy()
2421 base = candidatebm.copy()
2409 base.update(baserepo, missing)
2422 base.update(baserepo, missing)
2410 break
2423 break
2411 candidatefilter = subsettable.get(candidatefilter)
2424 candidatefilter = subsettable.get(candidatefilter)
2412 else:
2425 else:
2413 # no suitable subset where found
2426 # no suitable subset where found
2414 base = branchmap.branchcache()
2427 base = branchmap.branchcache()
2415 base.update(baserepo, allbaserevs)
2428 base.update(baserepo, allbaserevs)
2416
2429
2417 def setup():
2430 def setup():
2418 x[0] = base.copy()
2431 x[0] = base.copy()
2419 if clearcaches:
2432 if clearcaches:
2420 unfi._revbranchcache = None
2433 unfi._revbranchcache = None
2421 clearchangelog(repo)
2434 clearchangelog(repo)
2422
2435
2423 def bench():
2436 def bench():
2424 x[0].update(targetrepo, newrevs)
2437 x[0].update(targetrepo, newrevs)
2425
2438
2426 timer(bench, setup=setup)
2439 timer(bench, setup=setup)
2427 fm.end()
2440 fm.end()
2428 finally:
2441 finally:
2429 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2442 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2430 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2443 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2431
2444
2432 @command(b'perfbranchmapload', [
2445 @command(b'perfbranchmapload', [
2433 (b'f', b'filter', b'', b'Specify repoview filter'),
2446 (b'f', b'filter', b'', b'Specify repoview filter'),
2434 (b'', b'list', False, b'List brachmap filter caches'),
2447 (b'', b'list', False, b'List brachmap filter caches'),
2435 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2448 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2436
2449
2437 ] + formatteropts)
2450 ] + formatteropts)
2438 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2451 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2439 """benchmark reading the branchmap"""
2452 """benchmark reading the branchmap"""
2440 opts = _byteskwargs(opts)
2453 opts = _byteskwargs(opts)
2441 clearrevlogs = opts[b'clear_revlogs']
2454 clearrevlogs = opts[b'clear_revlogs']
2442
2455
2443 if list:
2456 if list:
2444 for name, kind, st in repo.cachevfs.readdir(stat=True):
2457 for name, kind, st in repo.cachevfs.readdir(stat=True):
2445 if name.startswith(b'branch2'):
2458 if name.startswith(b'branch2'):
2446 filtername = name.partition(b'-')[2] or b'unfiltered'
2459 filtername = name.partition(b'-')[2] or b'unfiltered'
2447 ui.status(b'%s - %s\n'
2460 ui.status(b'%s - %s\n'
2448 % (filtername, util.bytecount(st.st_size)))
2461 % (filtername, util.bytecount(st.st_size)))
2449 return
2462 return
2450 if not filter:
2463 if not filter:
2451 filter = None
2464 filter = None
2452 subsettable = getbranchmapsubsettable()
2465 subsettable = getbranchmapsubsettable()
2453 if filter is None:
2466 if filter is None:
2454 repo = repo.unfiltered()
2467 repo = repo.unfiltered()
2455 else:
2468 else:
2456 repo = repoview.repoview(repo, filter)
2469 repo = repoview.repoview(repo, filter)
2457
2470
2458 repo.branchmap() # make sure we have a relevant, up to date branchmap
2471 repo.branchmap() # make sure we have a relevant, up to date branchmap
2459
2472
2460 currentfilter = filter
2473 currentfilter = filter
2461 # try once without timer, the filter may not be cached
2474 # try once without timer, the filter may not be cached
2462 while branchmap.read(repo) is None:
2475 while branchmap.read(repo) is None:
2463 currentfilter = subsettable.get(currentfilter)
2476 currentfilter = subsettable.get(currentfilter)
2464 if currentfilter is None:
2477 if currentfilter is None:
2465 raise error.Abort(b'No branchmap cached for %s repo'
2478 raise error.Abort(b'No branchmap cached for %s repo'
2466 % (filter or b'unfiltered'))
2479 % (filter or b'unfiltered'))
2467 repo = repo.filtered(currentfilter)
2480 repo = repo.filtered(currentfilter)
2468 timer, fm = gettimer(ui, opts)
2481 timer, fm = gettimer(ui, opts)
2469 def setup():
2482 def setup():
2470 if clearrevlogs:
2483 if clearrevlogs:
2471 clearchangelog(repo)
2484 clearchangelog(repo)
2472 def bench():
2485 def bench():
2473 branchmap.read(repo)
2486 branchmap.read(repo)
2474 timer(bench, setup=setup)
2487 timer(bench, setup=setup)
2475 fm.end()
2488 fm.end()
2476
2489
2477 @command(b'perfloadmarkers')
2490 @command(b'perfloadmarkers')
2478 def perfloadmarkers(ui, repo):
2491 def perfloadmarkers(ui, repo):
2479 """benchmark the time to parse the on-disk markers for a repo
2492 """benchmark the time to parse the on-disk markers for a repo
2480
2493
2481 Result is the number of markers in the repo."""
2494 Result is the number of markers in the repo."""
2482 timer, fm = gettimer(ui)
2495 timer, fm = gettimer(ui)
2483 svfs = getsvfs(repo)
2496 svfs = getsvfs(repo)
2484 timer(lambda: len(obsolete.obsstore(svfs)))
2497 timer(lambda: len(obsolete.obsstore(svfs)))
2485 fm.end()
2498 fm.end()
2486
2499
2487 @command(b'perflrucachedict', formatteropts +
2500 @command(b'perflrucachedict', formatteropts +
2488 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2501 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2489 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2502 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2490 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2503 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2491 (b'', b'size', 4, b'size of cache'),
2504 (b'', b'size', 4, b'size of cache'),
2492 (b'', b'gets', 10000, b'number of key lookups'),
2505 (b'', b'gets', 10000, b'number of key lookups'),
2493 (b'', b'sets', 10000, b'number of key sets'),
2506 (b'', b'sets', 10000, b'number of key sets'),
2494 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2507 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2495 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2508 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2496 norepo=True)
2509 norepo=True)
2497 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2510 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2498 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2511 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2499 opts = _byteskwargs(opts)
2512 opts = _byteskwargs(opts)
2500
2513
2501 def doinit():
2514 def doinit():
2502 for i in _xrange(10000):
2515 for i in _xrange(10000):
2503 util.lrucachedict(size)
2516 util.lrucachedict(size)
2504
2517
2505 costrange = list(range(mincost, maxcost + 1))
2518 costrange = list(range(mincost, maxcost + 1))
2506
2519
2507 values = []
2520 values = []
2508 for i in _xrange(size):
2521 for i in _xrange(size):
2509 values.append(random.randint(0, _maxint))
2522 values.append(random.randint(0, _maxint))
2510
2523
2511 # Get mode fills the cache and tests raw lookup performance with no
2524 # Get mode fills the cache and tests raw lookup performance with no
2512 # eviction.
2525 # eviction.
2513 getseq = []
2526 getseq = []
2514 for i in _xrange(gets):
2527 for i in _xrange(gets):
2515 getseq.append(random.choice(values))
2528 getseq.append(random.choice(values))
2516
2529
2517 def dogets():
2530 def dogets():
2518 d = util.lrucachedict(size)
2531 d = util.lrucachedict(size)
2519 for v in values:
2532 for v in values:
2520 d[v] = v
2533 d[v] = v
2521 for key in getseq:
2534 for key in getseq:
2522 value = d[key]
2535 value = d[key]
2523 value # silence pyflakes warning
2536 value # silence pyflakes warning
2524
2537
2525 def dogetscost():
2538 def dogetscost():
2526 d = util.lrucachedict(size, maxcost=costlimit)
2539 d = util.lrucachedict(size, maxcost=costlimit)
2527 for i, v in enumerate(values):
2540 for i, v in enumerate(values):
2528 d.insert(v, v, cost=costs[i])
2541 d.insert(v, v, cost=costs[i])
2529 for key in getseq:
2542 for key in getseq:
2530 try:
2543 try:
2531 value = d[key]
2544 value = d[key]
2532 value # silence pyflakes warning
2545 value # silence pyflakes warning
2533 except KeyError:
2546 except KeyError:
2534 pass
2547 pass
2535
2548
2536 # Set mode tests insertion speed with cache eviction.
2549 # Set mode tests insertion speed with cache eviction.
2537 setseq = []
2550 setseq = []
2538 costs = []
2551 costs = []
2539 for i in _xrange(sets):
2552 for i in _xrange(sets):
2540 setseq.append(random.randint(0, _maxint))
2553 setseq.append(random.randint(0, _maxint))
2541 costs.append(random.choice(costrange))
2554 costs.append(random.choice(costrange))
2542
2555
2543 def doinserts():
2556 def doinserts():
2544 d = util.lrucachedict(size)
2557 d = util.lrucachedict(size)
2545 for v in setseq:
2558 for v in setseq:
2546 d.insert(v, v)
2559 d.insert(v, v)
2547
2560
2548 def doinsertscost():
2561 def doinsertscost():
2549 d = util.lrucachedict(size, maxcost=costlimit)
2562 d = util.lrucachedict(size, maxcost=costlimit)
2550 for i, v in enumerate(setseq):
2563 for i, v in enumerate(setseq):
2551 d.insert(v, v, cost=costs[i])
2564 d.insert(v, v, cost=costs[i])
2552
2565
2553 def dosets():
2566 def dosets():
2554 d = util.lrucachedict(size)
2567 d = util.lrucachedict(size)
2555 for v in setseq:
2568 for v in setseq:
2556 d[v] = v
2569 d[v] = v
2557
2570
2558 # Mixed mode randomly performs gets and sets with eviction.
2571 # Mixed mode randomly performs gets and sets with eviction.
2559 mixedops = []
2572 mixedops = []
2560 for i in _xrange(mixed):
2573 for i in _xrange(mixed):
2561 r = random.randint(0, 100)
2574 r = random.randint(0, 100)
2562 if r < mixedgetfreq:
2575 if r < mixedgetfreq:
2563 op = 0
2576 op = 0
2564 else:
2577 else:
2565 op = 1
2578 op = 1
2566
2579
2567 mixedops.append((op,
2580 mixedops.append((op,
2568 random.randint(0, size * 2),
2581 random.randint(0, size * 2),
2569 random.choice(costrange)))
2582 random.choice(costrange)))
2570
2583
2571 def domixed():
2584 def domixed():
2572 d = util.lrucachedict(size)
2585 d = util.lrucachedict(size)
2573
2586
2574 for op, v, cost in mixedops:
2587 for op, v, cost in mixedops:
2575 if op == 0:
2588 if op == 0:
2576 try:
2589 try:
2577 d[v]
2590 d[v]
2578 except KeyError:
2591 except KeyError:
2579 pass
2592 pass
2580 else:
2593 else:
2581 d[v] = v
2594 d[v] = v
2582
2595
2583 def domixedcost():
2596 def domixedcost():
2584 d = util.lrucachedict(size, maxcost=costlimit)
2597 d = util.lrucachedict(size, maxcost=costlimit)
2585
2598
2586 for op, v, cost in mixedops:
2599 for op, v, cost in mixedops:
2587 if op == 0:
2600 if op == 0:
2588 try:
2601 try:
2589 d[v]
2602 d[v]
2590 except KeyError:
2603 except KeyError:
2591 pass
2604 pass
2592 else:
2605 else:
2593 d.insert(v, v, cost=cost)
2606 d.insert(v, v, cost=cost)
2594
2607
2595 benches = [
2608 benches = [
2596 (doinit, b'init'),
2609 (doinit, b'init'),
2597 ]
2610 ]
2598
2611
2599 if costlimit:
2612 if costlimit:
2600 benches.extend([
2613 benches.extend([
2601 (dogetscost, b'gets w/ cost limit'),
2614 (dogetscost, b'gets w/ cost limit'),
2602 (doinsertscost, b'inserts w/ cost limit'),
2615 (doinsertscost, b'inserts w/ cost limit'),
2603 (domixedcost, b'mixed w/ cost limit'),
2616 (domixedcost, b'mixed w/ cost limit'),
2604 ])
2617 ])
2605 else:
2618 else:
2606 benches.extend([
2619 benches.extend([
2607 (dogets, b'gets'),
2620 (dogets, b'gets'),
2608 (doinserts, b'inserts'),
2621 (doinserts, b'inserts'),
2609 (dosets, b'sets'),
2622 (dosets, b'sets'),
2610 (domixed, b'mixed')
2623 (domixed, b'mixed')
2611 ])
2624 ])
2612
2625
2613 for fn, title in benches:
2626 for fn, title in benches:
2614 timer, fm = gettimer(ui, opts)
2627 timer, fm = gettimer(ui, opts)
2615 timer(fn, title=title)
2628 timer(fn, title=title)
2616 fm.end()
2629 fm.end()
2617
2630
2618 @command(b'perfwrite', formatteropts)
2631 @command(b'perfwrite', formatteropts)
2619 def perfwrite(ui, repo, **opts):
2632 def perfwrite(ui, repo, **opts):
2620 """microbenchmark ui.write
2633 """microbenchmark ui.write
2621 """
2634 """
2622 opts = _byteskwargs(opts)
2635 opts = _byteskwargs(opts)
2623
2636
2624 timer, fm = gettimer(ui, opts)
2637 timer, fm = gettimer(ui, opts)
2625 def write():
2638 def write():
2626 for i in range(100000):
2639 for i in range(100000):
2627 ui.write((b'Testing write performance\n'))
2640 ui.write((b'Testing write performance\n'))
2628 timer(write)
2641 timer(write)
2629 fm.end()
2642 fm.end()
2630
2643
2631 def uisetup(ui):
2644 def uisetup(ui):
2632 if (util.safehasattr(cmdutil, b'openrevlog') and
2645 if (util.safehasattr(cmdutil, b'openrevlog') and
2633 not util.safehasattr(commands, b'debugrevlogopts')):
2646 not util.safehasattr(commands, b'debugrevlogopts')):
2634 # for "historical portability":
2647 # for "historical portability":
2635 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2648 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2636 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2649 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2637 # openrevlog() should cause failure, because it has been
2650 # openrevlog() should cause failure, because it has been
2638 # available since 3.5 (or 49c583ca48c4).
2651 # available since 3.5 (or 49c583ca48c4).
2639 def openrevlog(orig, repo, cmd, file_, opts):
2652 def openrevlog(orig, repo, cmd, file_, opts):
2640 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2653 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2641 raise error.Abort(b"This version doesn't support --dir option",
2654 raise error.Abort(b"This version doesn't support --dir option",
2642 hint=b"use 3.5 or later")
2655 hint=b"use 3.5 or later")
2643 return orig(repo, cmd, file_, opts)
2656 return orig(repo, cmd, file_, opts)
2644 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2657 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2645
2658
2646 @command(b'perfprogress', formatteropts + [
2659 @command(b'perfprogress', formatteropts + [
2647 (b'', b'topic', b'topic', b'topic for progress messages'),
2660 (b'', b'topic', b'topic', b'topic for progress messages'),
2648 (b'c', b'total', 1000000, b'total value we are progressing to'),
2661 (b'c', b'total', 1000000, b'total value we are progressing to'),
2649 ], norepo=True)
2662 ], norepo=True)
2650 def perfprogress(ui, topic=None, total=None, **opts):
2663 def perfprogress(ui, topic=None, total=None, **opts):
2651 """printing of progress bars"""
2664 """printing of progress bars"""
2652 opts = _byteskwargs(opts)
2665 opts = _byteskwargs(opts)
2653
2666
2654 timer, fm = gettimer(ui, opts)
2667 timer, fm = gettimer(ui, opts)
2655
2668
2656 def doprogress():
2669 def doprogress():
2657 with ui.makeprogress(topic, total=total) as progress:
2670 with ui.makeprogress(topic, total=total) as progress:
2658 for i in pycompat.xrange(total):
2671 for i in pycompat.xrange(total):
2659 progress.increment()
2672 progress.increment()
2660
2673
2661 timer(doprogress)
2674 timer(doprogress)
2662 fm.end()
2675 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now