##// END OF EJS Templates
perf: explicitly pass title as a keyword argument in `perfdiffwd`...
Boris Feld -
r40715:20d2fd60 default
parent child Browse files
Show More
@@ -1,2365 +1,2365 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance'''
2 '''helper extension to measure performance'''
3
3
4 # "historical portability" policy of perf.py:
4 # "historical portability" policy of perf.py:
5 #
5 #
6 # We have to do:
6 # We have to do:
7 # - make perf.py "loadable" with as wide Mercurial version as possible
7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 # This doesn't mean that perf commands work correctly with that Mercurial.
8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 # - make historical perf command work correctly with as wide Mercurial
10 # - make historical perf command work correctly with as wide Mercurial
11 # version as possible
11 # version as possible
12 #
12 #
13 # We have to do, if possible with reasonable cost:
13 # We have to do, if possible with reasonable cost:
14 # - make recent perf command for historical feature work correctly
14 # - make recent perf command for historical feature work correctly
15 # with early Mercurial
15 # with early Mercurial
16 #
16 #
17 # We don't have to do:
17 # We don't have to do:
18 # - make perf command for recent feature work correctly with early
18 # - make perf command for recent feature work correctly with early
19 # Mercurial
19 # Mercurial
20
20
21 from __future__ import absolute_import
21 from __future__ import absolute_import
22 import contextlib
22 import contextlib
23 import functools
23 import functools
24 import gc
24 import gc
25 import os
25 import os
26 import random
26 import random
27 import shutil
27 import shutil
28 import struct
28 import struct
29 import sys
29 import sys
30 import tempfile
30 import tempfile
31 import threading
31 import threading
32 import time
32 import time
33 from mercurial import (
33 from mercurial import (
34 changegroup,
34 changegroup,
35 cmdutil,
35 cmdutil,
36 commands,
36 commands,
37 copies,
37 copies,
38 error,
38 error,
39 extensions,
39 extensions,
40 mdiff,
40 mdiff,
41 merge,
41 merge,
42 revlog,
42 revlog,
43 util,
43 util,
44 )
44 )
45
45
46 # for "historical portability":
46 # for "historical portability":
47 # try to import modules separately (in dict order), and ignore
47 # try to import modules separately (in dict order), and ignore
48 # failure, because these aren't available with early Mercurial
48 # failure, because these aren't available with early Mercurial
49 try:
49 try:
50 from mercurial import branchmap # since 2.5 (or bcee63733aad)
50 from mercurial import branchmap # since 2.5 (or bcee63733aad)
51 except ImportError:
51 except ImportError:
52 pass
52 pass
53 try:
53 try:
54 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
54 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
55 except ImportError:
55 except ImportError:
56 pass
56 pass
57 try:
57 try:
58 from mercurial import registrar # since 3.7 (or 37d50250b696)
58 from mercurial import registrar # since 3.7 (or 37d50250b696)
59 dir(registrar) # forcibly load it
59 dir(registrar) # forcibly load it
60 except ImportError:
60 except ImportError:
61 registrar = None
61 registrar = None
62 try:
62 try:
63 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
63 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
64 except ImportError:
64 except ImportError:
65 pass
65 pass
66 try:
66 try:
67 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
67 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
68 except ImportError:
68 except ImportError:
69 pass
69 pass
70
70
71 def identity(a):
71 def identity(a):
72 return a
72 return a
73
73
74 try:
74 try:
75 from mercurial import pycompat
75 from mercurial import pycompat
76 getargspec = pycompat.getargspec # added to module after 4.5
76 getargspec = pycompat.getargspec # added to module after 4.5
77 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
77 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
78 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
78 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
79 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
79 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
80 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
80 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
81 if pycompat.ispy3:
81 if pycompat.ispy3:
82 _maxint = sys.maxsize # per py3 docs for replacing maxint
82 _maxint = sys.maxsize # per py3 docs for replacing maxint
83 else:
83 else:
84 _maxint = sys.maxint
84 _maxint = sys.maxint
85 except (ImportError, AttributeError):
85 except (ImportError, AttributeError):
86 import inspect
86 import inspect
87 getargspec = inspect.getargspec
87 getargspec = inspect.getargspec
88 _byteskwargs = identity
88 _byteskwargs = identity
89 fsencode = identity # no py3 support
89 fsencode = identity # no py3 support
90 _maxint = sys.maxint # no py3 support
90 _maxint = sys.maxint # no py3 support
91 _sysstr = lambda x: x # no py3 support
91 _sysstr = lambda x: x # no py3 support
92 _xrange = xrange
92 _xrange = xrange
93
93
94 try:
94 try:
95 # 4.7+
95 # 4.7+
96 queue = pycompat.queue.Queue
96 queue = pycompat.queue.Queue
97 except (AttributeError, ImportError):
97 except (AttributeError, ImportError):
98 # <4.7.
98 # <4.7.
99 try:
99 try:
100 queue = pycompat.queue
100 queue = pycompat.queue
101 except (AttributeError, ImportError):
101 except (AttributeError, ImportError):
102 queue = util.queue
102 queue = util.queue
103
103
104 try:
104 try:
105 from mercurial import logcmdutil
105 from mercurial import logcmdutil
106 makelogtemplater = logcmdutil.maketemplater
106 makelogtemplater = logcmdutil.maketemplater
107 except (AttributeError, ImportError):
107 except (AttributeError, ImportError):
108 try:
108 try:
109 makelogtemplater = cmdutil.makelogtemplater
109 makelogtemplater = cmdutil.makelogtemplater
110 except (AttributeError, ImportError):
110 except (AttributeError, ImportError):
111 makelogtemplater = None
111 makelogtemplater = None
112
112
113 # for "historical portability":
113 # for "historical portability":
114 # define util.safehasattr forcibly, because util.safehasattr has been
114 # define util.safehasattr forcibly, because util.safehasattr has been
115 # available since 1.9.3 (or 94b200a11cf7)
115 # available since 1.9.3 (or 94b200a11cf7)
116 _undefined = object()
116 _undefined = object()
117 def safehasattr(thing, attr):
117 def safehasattr(thing, attr):
118 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
118 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
119 setattr(util, 'safehasattr', safehasattr)
119 setattr(util, 'safehasattr', safehasattr)
120
120
121 # for "historical portability":
121 # for "historical portability":
122 # define util.timer forcibly, because util.timer has been available
122 # define util.timer forcibly, because util.timer has been available
123 # since ae5d60bb70c9
123 # since ae5d60bb70c9
124 if safehasattr(time, 'perf_counter'):
124 if safehasattr(time, 'perf_counter'):
125 util.timer = time.perf_counter
125 util.timer = time.perf_counter
126 elif os.name == b'nt':
126 elif os.name == b'nt':
127 util.timer = time.clock
127 util.timer = time.clock
128 else:
128 else:
129 util.timer = time.time
129 util.timer = time.time
130
130
131 # for "historical portability":
131 # for "historical portability":
132 # use locally defined empty option list, if formatteropts isn't
132 # use locally defined empty option list, if formatteropts isn't
133 # available, because commands.formatteropts has been available since
133 # available, because commands.formatteropts has been available since
134 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
134 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
135 # available since 2.2 (or ae5f92e154d3)
135 # available since 2.2 (or ae5f92e154d3)
136 formatteropts = getattr(cmdutil, "formatteropts",
136 formatteropts = getattr(cmdutil, "formatteropts",
137 getattr(commands, "formatteropts", []))
137 getattr(commands, "formatteropts", []))
138
138
139 # for "historical portability":
139 # for "historical portability":
140 # use locally defined option list, if debugrevlogopts isn't available,
140 # use locally defined option list, if debugrevlogopts isn't available,
141 # because commands.debugrevlogopts has been available since 3.7 (or
141 # because commands.debugrevlogopts has been available since 3.7 (or
142 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
142 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
143 # since 1.9 (or a79fea6b3e77).
143 # since 1.9 (or a79fea6b3e77).
144 revlogopts = getattr(cmdutil, "debugrevlogopts",
144 revlogopts = getattr(cmdutil, "debugrevlogopts",
145 getattr(commands, "debugrevlogopts", [
145 getattr(commands, "debugrevlogopts", [
146 (b'c', b'changelog', False, (b'open changelog')),
146 (b'c', b'changelog', False, (b'open changelog')),
147 (b'm', b'manifest', False, (b'open manifest')),
147 (b'm', b'manifest', False, (b'open manifest')),
148 (b'', b'dir', False, (b'open directory manifest')),
148 (b'', b'dir', False, (b'open directory manifest')),
149 ]))
149 ]))
150
150
151 cmdtable = {}
151 cmdtable = {}
152
152
153 # for "historical portability":
153 # for "historical portability":
154 # define parsealiases locally, because cmdutil.parsealiases has been
154 # define parsealiases locally, because cmdutil.parsealiases has been
155 # available since 1.5 (or 6252852b4332)
155 # available since 1.5 (or 6252852b4332)
156 def parsealiases(cmd):
156 def parsealiases(cmd):
157 return cmd.split(b"|")
157 return cmd.split(b"|")
158
158
159 if safehasattr(registrar, 'command'):
159 if safehasattr(registrar, 'command'):
160 command = registrar.command(cmdtable)
160 command = registrar.command(cmdtable)
161 elif safehasattr(cmdutil, 'command'):
161 elif safehasattr(cmdutil, 'command'):
162 command = cmdutil.command(cmdtable)
162 command = cmdutil.command(cmdtable)
163 if b'norepo' not in getargspec(command).args:
163 if b'norepo' not in getargspec(command).args:
164 # for "historical portability":
164 # for "historical portability":
165 # wrap original cmdutil.command, because "norepo" option has
165 # wrap original cmdutil.command, because "norepo" option has
166 # been available since 3.1 (or 75a96326cecb)
166 # been available since 3.1 (or 75a96326cecb)
167 _command = command
167 _command = command
168 def command(name, options=(), synopsis=None, norepo=False):
168 def command(name, options=(), synopsis=None, norepo=False):
169 if norepo:
169 if norepo:
170 commands.norepo += b' %s' % b' '.join(parsealiases(name))
170 commands.norepo += b' %s' % b' '.join(parsealiases(name))
171 return _command(name, list(options), synopsis)
171 return _command(name, list(options), synopsis)
172 else:
172 else:
173 # for "historical portability":
173 # for "historical portability":
174 # define "@command" annotation locally, because cmdutil.command
174 # define "@command" annotation locally, because cmdutil.command
175 # has been available since 1.9 (or 2daa5179e73f)
175 # has been available since 1.9 (or 2daa5179e73f)
176 def command(name, options=(), synopsis=None, norepo=False):
176 def command(name, options=(), synopsis=None, norepo=False):
177 def decorator(func):
177 def decorator(func):
178 if synopsis:
178 if synopsis:
179 cmdtable[name] = func, list(options), synopsis
179 cmdtable[name] = func, list(options), synopsis
180 else:
180 else:
181 cmdtable[name] = func, list(options)
181 cmdtable[name] = func, list(options)
182 if norepo:
182 if norepo:
183 commands.norepo += b' %s' % b' '.join(parsealiases(name))
183 commands.norepo += b' %s' % b' '.join(parsealiases(name))
184 return func
184 return func
185 return decorator
185 return decorator
186
186
187 try:
187 try:
188 import mercurial.registrar
188 import mercurial.registrar
189 import mercurial.configitems
189 import mercurial.configitems
190 configtable = {}
190 configtable = {}
191 configitem = mercurial.registrar.configitem(configtable)
191 configitem = mercurial.registrar.configitem(configtable)
192 configitem(b'perf', b'presleep',
192 configitem(b'perf', b'presleep',
193 default=mercurial.configitems.dynamicdefault,
193 default=mercurial.configitems.dynamicdefault,
194 )
194 )
195 configitem(b'perf', b'stub',
195 configitem(b'perf', b'stub',
196 default=mercurial.configitems.dynamicdefault,
196 default=mercurial.configitems.dynamicdefault,
197 )
197 )
198 configitem(b'perf', b'parentscount',
198 configitem(b'perf', b'parentscount',
199 default=mercurial.configitems.dynamicdefault,
199 default=mercurial.configitems.dynamicdefault,
200 )
200 )
201 configitem(b'perf', b'all-timing',
201 configitem(b'perf', b'all-timing',
202 default=mercurial.configitems.dynamicdefault,
202 default=mercurial.configitems.dynamicdefault,
203 )
203 )
204 except (ImportError, AttributeError):
204 except (ImportError, AttributeError):
205 pass
205 pass
206
206
207 def getlen(ui):
207 def getlen(ui):
208 if ui.configbool(b"perf", b"stub", False):
208 if ui.configbool(b"perf", b"stub", False):
209 return lambda x: 1
209 return lambda x: 1
210 return len
210 return len
211
211
212 def gettimer(ui, opts=None):
212 def gettimer(ui, opts=None):
213 """return a timer function and formatter: (timer, formatter)
213 """return a timer function and formatter: (timer, formatter)
214
214
215 This function exists to gather the creation of formatter in a single
215 This function exists to gather the creation of formatter in a single
216 place instead of duplicating it in all performance commands."""
216 place instead of duplicating it in all performance commands."""
217
217
218 # enforce an idle period before execution to counteract power management
218 # enforce an idle period before execution to counteract power management
219 # experimental config: perf.presleep
219 # experimental config: perf.presleep
220 time.sleep(getint(ui, b"perf", b"presleep", 1))
220 time.sleep(getint(ui, b"perf", b"presleep", 1))
221
221
222 if opts is None:
222 if opts is None:
223 opts = {}
223 opts = {}
224 # redirect all to stderr unless buffer api is in use
224 # redirect all to stderr unless buffer api is in use
225 if not ui._buffers:
225 if not ui._buffers:
226 ui = ui.copy()
226 ui = ui.copy()
227 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
227 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
228 if uifout:
228 if uifout:
229 # for "historical portability":
229 # for "historical portability":
230 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
230 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
231 uifout.set(ui.ferr)
231 uifout.set(ui.ferr)
232
232
233 # get a formatter
233 # get a formatter
234 uiformatter = getattr(ui, 'formatter', None)
234 uiformatter = getattr(ui, 'formatter', None)
235 if uiformatter:
235 if uiformatter:
236 fm = uiformatter(b'perf', opts)
236 fm = uiformatter(b'perf', opts)
237 else:
237 else:
238 # for "historical portability":
238 # for "historical portability":
239 # define formatter locally, because ui.formatter has been
239 # define formatter locally, because ui.formatter has been
240 # available since 2.2 (or ae5f92e154d3)
240 # available since 2.2 (or ae5f92e154d3)
241 from mercurial import node
241 from mercurial import node
242 class defaultformatter(object):
242 class defaultformatter(object):
243 """Minimized composition of baseformatter and plainformatter
243 """Minimized composition of baseformatter and plainformatter
244 """
244 """
245 def __init__(self, ui, topic, opts):
245 def __init__(self, ui, topic, opts):
246 self._ui = ui
246 self._ui = ui
247 if ui.debugflag:
247 if ui.debugflag:
248 self.hexfunc = node.hex
248 self.hexfunc = node.hex
249 else:
249 else:
250 self.hexfunc = node.short
250 self.hexfunc = node.short
251 def __nonzero__(self):
251 def __nonzero__(self):
252 return False
252 return False
253 __bool__ = __nonzero__
253 __bool__ = __nonzero__
254 def startitem(self):
254 def startitem(self):
255 pass
255 pass
256 def data(self, **data):
256 def data(self, **data):
257 pass
257 pass
258 def write(self, fields, deftext, *fielddata, **opts):
258 def write(self, fields, deftext, *fielddata, **opts):
259 self._ui.write(deftext % fielddata, **opts)
259 self._ui.write(deftext % fielddata, **opts)
260 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
260 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
261 if cond:
261 if cond:
262 self._ui.write(deftext % fielddata, **opts)
262 self._ui.write(deftext % fielddata, **opts)
263 def plain(self, text, **opts):
263 def plain(self, text, **opts):
264 self._ui.write(text, **opts)
264 self._ui.write(text, **opts)
265 def end(self):
265 def end(self):
266 pass
266 pass
267 fm = defaultformatter(ui, b'perf', opts)
267 fm = defaultformatter(ui, b'perf', opts)
268
268
269 # stub function, runs code only once instead of in a loop
269 # stub function, runs code only once instead of in a loop
270 # experimental config: perf.stub
270 # experimental config: perf.stub
271 if ui.configbool(b"perf", b"stub", False):
271 if ui.configbool(b"perf", b"stub", False):
272 return functools.partial(stub_timer, fm), fm
272 return functools.partial(stub_timer, fm), fm
273
273
274 # experimental config: perf.all-timing
274 # experimental config: perf.all-timing
275 displayall = ui.configbool(b"perf", b"all-timing", False)
275 displayall = ui.configbool(b"perf", b"all-timing", False)
276 return functools.partial(_timer, fm, displayall=displayall), fm
276 return functools.partial(_timer, fm, displayall=displayall), fm
277
277
278 def stub_timer(fm, func, title=None):
278 def stub_timer(fm, func, title=None):
279 func()
279 func()
280
280
281 @contextlib.contextmanager
281 @contextlib.contextmanager
282 def timeone():
282 def timeone():
283 r = []
283 r = []
284 ostart = os.times()
284 ostart = os.times()
285 cstart = util.timer()
285 cstart = util.timer()
286 yield r
286 yield r
287 cstop = util.timer()
287 cstop = util.timer()
288 ostop = os.times()
288 ostop = os.times()
289 a, b = ostart, ostop
289 a, b = ostart, ostop
290 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
290 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
291
291
292 def _timer(fm, func, title=None, displayall=False):
292 def _timer(fm, func, title=None, displayall=False):
293 gc.collect()
293 gc.collect()
294 results = []
294 results = []
295 begin = util.timer()
295 begin = util.timer()
296 count = 0
296 count = 0
297 while True:
297 while True:
298 with timeone() as item:
298 with timeone() as item:
299 r = func()
299 r = func()
300 count += 1
300 count += 1
301 results.append(item[0])
301 results.append(item[0])
302 cstop = util.timer()
302 cstop = util.timer()
303 if cstop - begin > 3 and count >= 100:
303 if cstop - begin > 3 and count >= 100:
304 break
304 break
305 if cstop - begin > 10 and count >= 3:
305 if cstop - begin > 10 and count >= 3:
306 break
306 break
307
307
308 formatone(fm, results, title=title, result=r,
308 formatone(fm, results, title=title, result=r,
309 displayall=displayall)
309 displayall=displayall)
310
310
311 def formatone(fm, timings, title=None, result=None, displayall=False):
311 def formatone(fm, timings, title=None, result=None, displayall=False):
312
312
313 count = len(timings)
313 count = len(timings)
314
314
315 fm.startitem()
315 fm.startitem()
316
316
317 if title:
317 if title:
318 fm.write(b'title', b'! %s\n', title)
318 fm.write(b'title', b'! %s\n', title)
319 if result:
319 if result:
320 fm.write(b'result', b'! result: %s\n', result)
320 fm.write(b'result', b'! result: %s\n', result)
321 def display(role, entry):
321 def display(role, entry):
322 prefix = b''
322 prefix = b''
323 if role != b'best':
323 if role != b'best':
324 prefix = b'%s.' % role
324 prefix = b'%s.' % role
325 fm.plain(b'!')
325 fm.plain(b'!')
326 fm.write(prefix + b'wall', b' wall %f', entry[0])
326 fm.write(prefix + b'wall', b' wall %f', entry[0])
327 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
327 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
328 fm.write(prefix + b'user', b' user %f', entry[1])
328 fm.write(prefix + b'user', b' user %f', entry[1])
329 fm.write(prefix + b'sys', b' sys %f', entry[2])
329 fm.write(prefix + b'sys', b' sys %f', entry[2])
330 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
330 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
331 fm.plain(b'\n')
331 fm.plain(b'\n')
332 timings.sort()
332 timings.sort()
333 min_val = timings[0]
333 min_val = timings[0]
334 display(b'best', min_val)
334 display(b'best', min_val)
335 if displayall:
335 if displayall:
336 max_val = timings[-1]
336 max_val = timings[-1]
337 display(b'max', max_val)
337 display(b'max', max_val)
338 avg = tuple([sum(x) / count for x in zip(*timings)])
338 avg = tuple([sum(x) / count for x in zip(*timings)])
339 display(b'avg', avg)
339 display(b'avg', avg)
340 median = timings[len(timings) // 2]
340 median = timings[len(timings) // 2]
341 display(b'median', median)
341 display(b'median', median)
342
342
343 # utilities for historical portability
343 # utilities for historical portability
344
344
345 def getint(ui, section, name, default):
345 def getint(ui, section, name, default):
346 # for "historical portability":
346 # for "historical portability":
347 # ui.configint has been available since 1.9 (or fa2b596db182)
347 # ui.configint has been available since 1.9 (or fa2b596db182)
348 v = ui.config(section, name, None)
348 v = ui.config(section, name, None)
349 if v is None:
349 if v is None:
350 return default
350 return default
351 try:
351 try:
352 return int(v)
352 return int(v)
353 except ValueError:
353 except ValueError:
354 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
354 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
355 % (section, name, v))
355 % (section, name, v))
356
356
357 def safeattrsetter(obj, name, ignoremissing=False):
357 def safeattrsetter(obj, name, ignoremissing=False):
358 """Ensure that 'obj' has 'name' attribute before subsequent setattr
358 """Ensure that 'obj' has 'name' attribute before subsequent setattr
359
359
360 This function is aborted, if 'obj' doesn't have 'name' attribute
360 This function is aborted, if 'obj' doesn't have 'name' attribute
361 at runtime. This avoids overlooking removal of an attribute, which
361 at runtime. This avoids overlooking removal of an attribute, which
362 breaks assumption of performance measurement, in the future.
362 breaks assumption of performance measurement, in the future.
363
363
364 This function returns the object to (1) assign a new value, and
364 This function returns the object to (1) assign a new value, and
365 (2) restore an original value to the attribute.
365 (2) restore an original value to the attribute.
366
366
367 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
367 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
368 abortion, and this function returns None. This is useful to
368 abortion, and this function returns None. This is useful to
369 examine an attribute, which isn't ensured in all Mercurial
369 examine an attribute, which isn't ensured in all Mercurial
370 versions.
370 versions.
371 """
371 """
372 if not util.safehasattr(obj, name):
372 if not util.safehasattr(obj, name):
373 if ignoremissing:
373 if ignoremissing:
374 return None
374 return None
375 raise error.Abort((b"missing attribute %s of %s might break assumption"
375 raise error.Abort((b"missing attribute %s of %s might break assumption"
376 b" of performance measurement") % (name, obj))
376 b" of performance measurement") % (name, obj))
377
377
378 origvalue = getattr(obj, _sysstr(name))
378 origvalue = getattr(obj, _sysstr(name))
379 class attrutil(object):
379 class attrutil(object):
380 def set(self, newvalue):
380 def set(self, newvalue):
381 setattr(obj, _sysstr(name), newvalue)
381 setattr(obj, _sysstr(name), newvalue)
382 def restore(self):
382 def restore(self):
383 setattr(obj, _sysstr(name), origvalue)
383 setattr(obj, _sysstr(name), origvalue)
384
384
385 return attrutil()
385 return attrutil()
386
386
387 # utilities to examine each internal API changes
387 # utilities to examine each internal API changes
388
388
389 def getbranchmapsubsettable():
389 def getbranchmapsubsettable():
390 # for "historical portability":
390 # for "historical portability":
391 # subsettable is defined in:
391 # subsettable is defined in:
392 # - branchmap since 2.9 (or 175c6fd8cacc)
392 # - branchmap since 2.9 (or 175c6fd8cacc)
393 # - repoview since 2.5 (or 59a9f18d4587)
393 # - repoview since 2.5 (or 59a9f18d4587)
394 for mod in (branchmap, repoview):
394 for mod in (branchmap, repoview):
395 subsettable = getattr(mod, 'subsettable', None)
395 subsettable = getattr(mod, 'subsettable', None)
396 if subsettable:
396 if subsettable:
397 return subsettable
397 return subsettable
398
398
399 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
399 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
400 # branchmap and repoview modules exist, but subsettable attribute
400 # branchmap and repoview modules exist, but subsettable attribute
401 # doesn't)
401 # doesn't)
402 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
402 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
403 hint=b"use 2.5 or later")
403 hint=b"use 2.5 or later")
404
404
405 def getsvfs(repo):
405 def getsvfs(repo):
406 """Return appropriate object to access files under .hg/store
406 """Return appropriate object to access files under .hg/store
407 """
407 """
408 # for "historical portability":
408 # for "historical portability":
409 # repo.svfs has been available since 2.3 (or 7034365089bf)
409 # repo.svfs has been available since 2.3 (or 7034365089bf)
410 svfs = getattr(repo, 'svfs', None)
410 svfs = getattr(repo, 'svfs', None)
411 if svfs:
411 if svfs:
412 return svfs
412 return svfs
413 else:
413 else:
414 return getattr(repo, 'sopener')
414 return getattr(repo, 'sopener')
415
415
416 def getvfs(repo):
416 def getvfs(repo):
417 """Return appropriate object to access files under .hg
417 """Return appropriate object to access files under .hg
418 """
418 """
419 # for "historical portability":
419 # for "historical portability":
420 # repo.vfs has been available since 2.3 (or 7034365089bf)
420 # repo.vfs has been available since 2.3 (or 7034365089bf)
421 vfs = getattr(repo, 'vfs', None)
421 vfs = getattr(repo, 'vfs', None)
422 if vfs:
422 if vfs:
423 return vfs
423 return vfs
424 else:
424 else:
425 return getattr(repo, 'opener')
425 return getattr(repo, 'opener')
426
426
427 def repocleartagscachefunc(repo):
427 def repocleartagscachefunc(repo):
428 """Return the function to clear tags cache according to repo internal API
428 """Return the function to clear tags cache according to repo internal API
429 """
429 """
430 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
430 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
431 # in this case, setattr(repo, '_tagscache', None) or so isn't
431 # in this case, setattr(repo, '_tagscache', None) or so isn't
432 # correct way to clear tags cache, because existing code paths
432 # correct way to clear tags cache, because existing code paths
433 # expect _tagscache to be a structured object.
433 # expect _tagscache to be a structured object.
434 def clearcache():
434 def clearcache():
435 # _tagscache has been filteredpropertycache since 2.5 (or
435 # _tagscache has been filteredpropertycache since 2.5 (or
436 # 98c867ac1330), and delattr() can't work in such case
436 # 98c867ac1330), and delattr() can't work in such case
437 if b'_tagscache' in vars(repo):
437 if b'_tagscache' in vars(repo):
438 del repo.__dict__[b'_tagscache']
438 del repo.__dict__[b'_tagscache']
439 return clearcache
439 return clearcache
440
440
441 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
441 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
442 if repotags: # since 1.4 (or 5614a628d173)
442 if repotags: # since 1.4 (or 5614a628d173)
443 return lambda : repotags.set(None)
443 return lambda : repotags.set(None)
444
444
445 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
445 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
446 if repotagscache: # since 0.6 (or d7df759d0e97)
446 if repotagscache: # since 0.6 (or d7df759d0e97)
447 return lambda : repotagscache.set(None)
447 return lambda : repotagscache.set(None)
448
448
449 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
449 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
450 # this point, but it isn't so problematic, because:
450 # this point, but it isn't so problematic, because:
451 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
451 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
452 # in perftags() causes failure soon
452 # in perftags() causes failure soon
453 # - perf.py itself has been available since 1.1 (or eb240755386d)
453 # - perf.py itself has been available since 1.1 (or eb240755386d)
454 raise error.Abort((b"tags API of this hg command is unknown"))
454 raise error.Abort((b"tags API of this hg command is unknown"))
455
455
456 # utilities to clear cache
456 # utilities to clear cache
457
457
458 def clearfilecache(repo, attrname):
458 def clearfilecache(repo, attrname):
459 unfi = repo.unfiltered()
459 unfi = repo.unfiltered()
460 if attrname in vars(unfi):
460 if attrname in vars(unfi):
461 delattr(unfi, attrname)
461 delattr(unfi, attrname)
462 unfi._filecache.pop(attrname, None)
462 unfi._filecache.pop(attrname, None)
463
463
464 # perf commands
464 # perf commands
465
465
466 @command(b'perfwalk', formatteropts)
466 @command(b'perfwalk', formatteropts)
467 def perfwalk(ui, repo, *pats, **opts):
467 def perfwalk(ui, repo, *pats, **opts):
468 opts = _byteskwargs(opts)
468 opts = _byteskwargs(opts)
469 timer, fm = gettimer(ui, opts)
469 timer, fm = gettimer(ui, opts)
470 m = scmutil.match(repo[None], pats, {})
470 m = scmutil.match(repo[None], pats, {})
471 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
471 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
472 ignored=False))))
472 ignored=False))))
473 fm.end()
473 fm.end()
474
474
475 @command(b'perfannotate', formatteropts)
475 @command(b'perfannotate', formatteropts)
476 def perfannotate(ui, repo, f, **opts):
476 def perfannotate(ui, repo, f, **opts):
477 opts = _byteskwargs(opts)
477 opts = _byteskwargs(opts)
478 timer, fm = gettimer(ui, opts)
478 timer, fm = gettimer(ui, opts)
479 fc = repo[b'.'][f]
479 fc = repo[b'.'][f]
480 timer(lambda: len(fc.annotate(True)))
480 timer(lambda: len(fc.annotate(True)))
481 fm.end()
481 fm.end()
482
482
483 @command(b'perfstatus',
483 @command(b'perfstatus',
484 [(b'u', b'unknown', False,
484 [(b'u', b'unknown', False,
485 b'ask status to look for unknown files')] + formatteropts)
485 b'ask status to look for unknown files')] + formatteropts)
486 def perfstatus(ui, repo, **opts):
486 def perfstatus(ui, repo, **opts):
487 opts = _byteskwargs(opts)
487 opts = _byteskwargs(opts)
488 #m = match.always(repo.root, repo.getcwd())
488 #m = match.always(repo.root, repo.getcwd())
489 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
489 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
490 # False))))
490 # False))))
491 timer, fm = gettimer(ui, opts)
491 timer, fm = gettimer(ui, opts)
492 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
492 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
493 fm.end()
493 fm.end()
494
494
495 @command(b'perfaddremove', formatteropts)
495 @command(b'perfaddremove', formatteropts)
496 def perfaddremove(ui, repo, **opts):
496 def perfaddremove(ui, repo, **opts):
497 opts = _byteskwargs(opts)
497 opts = _byteskwargs(opts)
498 timer, fm = gettimer(ui, opts)
498 timer, fm = gettimer(ui, opts)
499 try:
499 try:
500 oldquiet = repo.ui.quiet
500 oldquiet = repo.ui.quiet
501 repo.ui.quiet = True
501 repo.ui.quiet = True
502 matcher = scmutil.match(repo[None])
502 matcher = scmutil.match(repo[None])
503 opts[b'dry_run'] = True
503 opts[b'dry_run'] = True
504 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
504 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
505 finally:
505 finally:
506 repo.ui.quiet = oldquiet
506 repo.ui.quiet = oldquiet
507 fm.end()
507 fm.end()
508
508
509 def clearcaches(cl):
509 def clearcaches(cl):
510 # behave somewhat consistently across internal API changes
510 # behave somewhat consistently across internal API changes
511 if util.safehasattr(cl, b'clearcaches'):
511 if util.safehasattr(cl, b'clearcaches'):
512 cl.clearcaches()
512 cl.clearcaches()
513 elif util.safehasattr(cl, b'_nodecache'):
513 elif util.safehasattr(cl, b'_nodecache'):
514 from mercurial.node import nullid, nullrev
514 from mercurial.node import nullid, nullrev
515 cl._nodecache = {nullid: nullrev}
515 cl._nodecache = {nullid: nullrev}
516 cl._nodepos = None
516 cl._nodepos = None
517
517
518 @command(b'perfheads', formatteropts)
518 @command(b'perfheads', formatteropts)
519 def perfheads(ui, repo, **opts):
519 def perfheads(ui, repo, **opts):
520 opts = _byteskwargs(opts)
520 opts = _byteskwargs(opts)
521 timer, fm = gettimer(ui, opts)
521 timer, fm = gettimer(ui, opts)
522 cl = repo.changelog
522 cl = repo.changelog
523 def d():
523 def d():
524 len(cl.headrevs())
524 len(cl.headrevs())
525 clearcaches(cl)
525 clearcaches(cl)
526 timer(d)
526 timer(d)
527 fm.end()
527 fm.end()
528
528
529 @command(b'perftags', formatteropts)
529 @command(b'perftags', formatteropts)
530 def perftags(ui, repo, **opts):
530 def perftags(ui, repo, **opts):
531 import mercurial.changelog
531 import mercurial.changelog
532 import mercurial.manifest
532 import mercurial.manifest
533
533
534 opts = _byteskwargs(opts)
534 opts = _byteskwargs(opts)
535 timer, fm = gettimer(ui, opts)
535 timer, fm = gettimer(ui, opts)
536 svfs = getsvfs(repo)
536 svfs = getsvfs(repo)
537 repocleartagscache = repocleartagscachefunc(repo)
537 repocleartagscache = repocleartagscachefunc(repo)
538 def t():
538 def t():
539 repo.changelog = mercurial.changelog.changelog(svfs)
539 repo.changelog = mercurial.changelog.changelog(svfs)
540 rootmanifest = mercurial.manifest.manifestrevlog(svfs)
540 rootmanifest = mercurial.manifest.manifestrevlog(svfs)
541 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo,
541 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo,
542 rootmanifest)
542 rootmanifest)
543 repocleartagscache()
543 repocleartagscache()
544 return len(repo.tags())
544 return len(repo.tags())
545 timer(t)
545 timer(t)
546 fm.end()
546 fm.end()
547
547
548 @command(b'perfancestors', formatteropts)
548 @command(b'perfancestors', formatteropts)
549 def perfancestors(ui, repo, **opts):
549 def perfancestors(ui, repo, **opts):
550 opts = _byteskwargs(opts)
550 opts = _byteskwargs(opts)
551 timer, fm = gettimer(ui, opts)
551 timer, fm = gettimer(ui, opts)
552 heads = repo.changelog.headrevs()
552 heads = repo.changelog.headrevs()
553 def d():
553 def d():
554 for a in repo.changelog.ancestors(heads):
554 for a in repo.changelog.ancestors(heads):
555 pass
555 pass
556 timer(d)
556 timer(d)
557 fm.end()
557 fm.end()
558
558
559 @command(b'perfancestorset', formatteropts)
559 @command(b'perfancestorset', formatteropts)
560 def perfancestorset(ui, repo, revset, **opts):
560 def perfancestorset(ui, repo, revset, **opts):
561 opts = _byteskwargs(opts)
561 opts = _byteskwargs(opts)
562 timer, fm = gettimer(ui, opts)
562 timer, fm = gettimer(ui, opts)
563 revs = repo.revs(revset)
563 revs = repo.revs(revset)
564 heads = repo.changelog.headrevs()
564 heads = repo.changelog.headrevs()
565 def d():
565 def d():
566 s = repo.changelog.ancestors(heads)
566 s = repo.changelog.ancestors(heads)
567 for rev in revs:
567 for rev in revs:
568 rev in s
568 rev in s
569 timer(d)
569 timer(d)
570 fm.end()
570 fm.end()
571
571
572 @command(b'perfbookmarks', formatteropts)
572 @command(b'perfbookmarks', formatteropts)
573 def perfbookmarks(ui, repo, **opts):
573 def perfbookmarks(ui, repo, **opts):
574 """benchmark parsing bookmarks from disk to memory"""
574 """benchmark parsing bookmarks from disk to memory"""
575 opts = _byteskwargs(opts)
575 opts = _byteskwargs(opts)
576 timer, fm = gettimer(ui, opts)
576 timer, fm = gettimer(ui, opts)
577 def d():
577 def d():
578 clearfilecache(repo, b'_bookmarks')
578 clearfilecache(repo, b'_bookmarks')
579 repo._bookmarks
579 repo._bookmarks
580 timer(d)
580 timer(d)
581 fm.end()
581 fm.end()
582
582
583 @command(b'perfbundleread', formatteropts, b'BUNDLE')
583 @command(b'perfbundleread', formatteropts, b'BUNDLE')
584 def perfbundleread(ui, repo, bundlepath, **opts):
584 def perfbundleread(ui, repo, bundlepath, **opts):
585 """Benchmark reading of bundle files.
585 """Benchmark reading of bundle files.
586
586
587 This command is meant to isolate the I/O part of bundle reading as
587 This command is meant to isolate the I/O part of bundle reading as
588 much as possible.
588 much as possible.
589 """
589 """
590 from mercurial import (
590 from mercurial import (
591 bundle2,
591 bundle2,
592 exchange,
592 exchange,
593 streamclone,
593 streamclone,
594 )
594 )
595
595
596 opts = _byteskwargs(opts)
596 opts = _byteskwargs(opts)
597
597
598 def makebench(fn):
598 def makebench(fn):
599 def run():
599 def run():
600 with open(bundlepath, b'rb') as fh:
600 with open(bundlepath, b'rb') as fh:
601 bundle = exchange.readbundle(ui, fh, bundlepath)
601 bundle = exchange.readbundle(ui, fh, bundlepath)
602 fn(bundle)
602 fn(bundle)
603
603
604 return run
604 return run
605
605
606 def makereadnbytes(size):
606 def makereadnbytes(size):
607 def run():
607 def run():
608 with open(bundlepath, b'rb') as fh:
608 with open(bundlepath, b'rb') as fh:
609 bundle = exchange.readbundle(ui, fh, bundlepath)
609 bundle = exchange.readbundle(ui, fh, bundlepath)
610 while bundle.read(size):
610 while bundle.read(size):
611 pass
611 pass
612
612
613 return run
613 return run
614
614
615 def makestdioread(size):
615 def makestdioread(size):
616 def run():
616 def run():
617 with open(bundlepath, b'rb') as fh:
617 with open(bundlepath, b'rb') as fh:
618 while fh.read(size):
618 while fh.read(size):
619 pass
619 pass
620
620
621 return run
621 return run
622
622
623 # bundle1
623 # bundle1
624
624
625 def deltaiter(bundle):
625 def deltaiter(bundle):
626 for delta in bundle.deltaiter():
626 for delta in bundle.deltaiter():
627 pass
627 pass
628
628
629 def iterchunks(bundle):
629 def iterchunks(bundle):
630 for chunk in bundle.getchunks():
630 for chunk in bundle.getchunks():
631 pass
631 pass
632
632
633 # bundle2
633 # bundle2
634
634
635 def forwardchunks(bundle):
635 def forwardchunks(bundle):
636 for chunk in bundle._forwardchunks():
636 for chunk in bundle._forwardchunks():
637 pass
637 pass
638
638
639 def iterparts(bundle):
639 def iterparts(bundle):
640 for part in bundle.iterparts():
640 for part in bundle.iterparts():
641 pass
641 pass
642
642
643 def iterpartsseekable(bundle):
643 def iterpartsseekable(bundle):
644 for part in bundle.iterparts(seekable=True):
644 for part in bundle.iterparts(seekable=True):
645 pass
645 pass
646
646
647 def seek(bundle):
647 def seek(bundle):
648 for part in bundle.iterparts(seekable=True):
648 for part in bundle.iterparts(seekable=True):
649 part.seek(0, os.SEEK_END)
649 part.seek(0, os.SEEK_END)
650
650
651 def makepartreadnbytes(size):
651 def makepartreadnbytes(size):
652 def run():
652 def run():
653 with open(bundlepath, b'rb') as fh:
653 with open(bundlepath, b'rb') as fh:
654 bundle = exchange.readbundle(ui, fh, bundlepath)
654 bundle = exchange.readbundle(ui, fh, bundlepath)
655 for part in bundle.iterparts():
655 for part in bundle.iterparts():
656 while part.read(size):
656 while part.read(size):
657 pass
657 pass
658
658
659 return run
659 return run
660
660
661 benches = [
661 benches = [
662 (makestdioread(8192), b'read(8k)'),
662 (makestdioread(8192), b'read(8k)'),
663 (makestdioread(16384), b'read(16k)'),
663 (makestdioread(16384), b'read(16k)'),
664 (makestdioread(32768), b'read(32k)'),
664 (makestdioread(32768), b'read(32k)'),
665 (makestdioread(131072), b'read(128k)'),
665 (makestdioread(131072), b'read(128k)'),
666 ]
666 ]
667
667
668 with open(bundlepath, b'rb') as fh:
668 with open(bundlepath, b'rb') as fh:
669 bundle = exchange.readbundle(ui, fh, bundlepath)
669 bundle = exchange.readbundle(ui, fh, bundlepath)
670
670
671 if isinstance(bundle, changegroup.cg1unpacker):
671 if isinstance(bundle, changegroup.cg1unpacker):
672 benches.extend([
672 benches.extend([
673 (makebench(deltaiter), b'cg1 deltaiter()'),
673 (makebench(deltaiter), b'cg1 deltaiter()'),
674 (makebench(iterchunks), b'cg1 getchunks()'),
674 (makebench(iterchunks), b'cg1 getchunks()'),
675 (makereadnbytes(8192), b'cg1 read(8k)'),
675 (makereadnbytes(8192), b'cg1 read(8k)'),
676 (makereadnbytes(16384), b'cg1 read(16k)'),
676 (makereadnbytes(16384), b'cg1 read(16k)'),
677 (makereadnbytes(32768), b'cg1 read(32k)'),
677 (makereadnbytes(32768), b'cg1 read(32k)'),
678 (makereadnbytes(131072), b'cg1 read(128k)'),
678 (makereadnbytes(131072), b'cg1 read(128k)'),
679 ])
679 ])
680 elif isinstance(bundle, bundle2.unbundle20):
680 elif isinstance(bundle, bundle2.unbundle20):
681 benches.extend([
681 benches.extend([
682 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
682 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
683 (makebench(iterparts), b'bundle2 iterparts()'),
683 (makebench(iterparts), b'bundle2 iterparts()'),
684 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
684 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
685 (makebench(seek), b'bundle2 part seek()'),
685 (makebench(seek), b'bundle2 part seek()'),
686 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
686 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
687 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
687 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
688 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
688 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
689 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
689 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
690 ])
690 ])
691 elif isinstance(bundle, streamclone.streamcloneapplier):
691 elif isinstance(bundle, streamclone.streamcloneapplier):
692 raise error.Abort(b'stream clone bundles not supported')
692 raise error.Abort(b'stream clone bundles not supported')
693 else:
693 else:
694 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
694 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
695
695
696 for fn, title in benches:
696 for fn, title in benches:
697 timer, fm = gettimer(ui, opts)
697 timer, fm = gettimer(ui, opts)
698 timer(fn, title=title)
698 timer(fn, title=title)
699 fm.end()
699 fm.end()
700
700
701 @command(b'perfchangegroupchangelog', formatteropts +
701 @command(b'perfchangegroupchangelog', formatteropts +
702 [(b'', b'version', b'02', b'changegroup version'),
702 [(b'', b'version', b'02', b'changegroup version'),
703 (b'r', b'rev', b'', b'revisions to add to changegroup')])
703 (b'r', b'rev', b'', b'revisions to add to changegroup')])
704 def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts):
704 def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts):
705 """Benchmark producing a changelog group for a changegroup.
705 """Benchmark producing a changelog group for a changegroup.
706
706
707 This measures the time spent processing the changelog during a
707 This measures the time spent processing the changelog during a
708 bundle operation. This occurs during `hg bundle` and on a server
708 bundle operation. This occurs during `hg bundle` and on a server
709 processing a `getbundle` wire protocol request (handles clones
709 processing a `getbundle` wire protocol request (handles clones
710 and pull requests).
710 and pull requests).
711
711
712 By default, all revisions are added to the changegroup.
712 By default, all revisions are added to the changegroup.
713 """
713 """
714 opts = _byteskwargs(opts)
714 opts = _byteskwargs(opts)
715 cl = repo.changelog
715 cl = repo.changelog
716 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
716 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
717 bundler = changegroup.getbundler(version, repo)
717 bundler = changegroup.getbundler(version, repo)
718
718
719 def d():
719 def d():
720 state, chunks = bundler._generatechangelog(cl, nodes)
720 state, chunks = bundler._generatechangelog(cl, nodes)
721 for chunk in chunks:
721 for chunk in chunks:
722 pass
722 pass
723
723
724 timer, fm = gettimer(ui, opts)
724 timer, fm = gettimer(ui, opts)
725
725
726 # Terminal printing can interfere with timing. So disable it.
726 # Terminal printing can interfere with timing. So disable it.
727 with ui.configoverride({(b'progress', b'disable'): True}):
727 with ui.configoverride({(b'progress', b'disable'): True}):
728 timer(d)
728 timer(d)
729
729
730 fm.end()
730 fm.end()
731
731
732 @command(b'perfdirs', formatteropts)
732 @command(b'perfdirs', formatteropts)
733 def perfdirs(ui, repo, **opts):
733 def perfdirs(ui, repo, **opts):
734 opts = _byteskwargs(opts)
734 opts = _byteskwargs(opts)
735 timer, fm = gettimer(ui, opts)
735 timer, fm = gettimer(ui, opts)
736 dirstate = repo.dirstate
736 dirstate = repo.dirstate
737 b'a' in dirstate
737 b'a' in dirstate
738 def d():
738 def d():
739 dirstate.hasdir(b'a')
739 dirstate.hasdir(b'a')
740 del dirstate._map._dirs
740 del dirstate._map._dirs
741 timer(d)
741 timer(d)
742 fm.end()
742 fm.end()
743
743
744 @command(b'perfdirstate', formatteropts)
744 @command(b'perfdirstate', formatteropts)
745 def perfdirstate(ui, repo, **opts):
745 def perfdirstate(ui, repo, **opts):
746 opts = _byteskwargs(opts)
746 opts = _byteskwargs(opts)
747 timer, fm = gettimer(ui, opts)
747 timer, fm = gettimer(ui, opts)
748 b"a" in repo.dirstate
748 b"a" in repo.dirstate
749 def d():
749 def d():
750 repo.dirstate.invalidate()
750 repo.dirstate.invalidate()
751 b"a" in repo.dirstate
751 b"a" in repo.dirstate
752 timer(d)
752 timer(d)
753 fm.end()
753 fm.end()
754
754
755 @command(b'perfdirstatedirs', formatteropts)
755 @command(b'perfdirstatedirs', formatteropts)
756 def perfdirstatedirs(ui, repo, **opts):
756 def perfdirstatedirs(ui, repo, **opts):
757 opts = _byteskwargs(opts)
757 opts = _byteskwargs(opts)
758 timer, fm = gettimer(ui, opts)
758 timer, fm = gettimer(ui, opts)
759 b"a" in repo.dirstate
759 b"a" in repo.dirstate
760 def d():
760 def d():
761 repo.dirstate.hasdir(b"a")
761 repo.dirstate.hasdir(b"a")
762 del repo.dirstate._map._dirs
762 del repo.dirstate._map._dirs
763 timer(d)
763 timer(d)
764 fm.end()
764 fm.end()
765
765
766 @command(b'perfdirstatefoldmap', formatteropts)
766 @command(b'perfdirstatefoldmap', formatteropts)
767 def perfdirstatefoldmap(ui, repo, **opts):
767 def perfdirstatefoldmap(ui, repo, **opts):
768 opts = _byteskwargs(opts)
768 opts = _byteskwargs(opts)
769 timer, fm = gettimer(ui, opts)
769 timer, fm = gettimer(ui, opts)
770 dirstate = repo.dirstate
770 dirstate = repo.dirstate
771 b'a' in dirstate
771 b'a' in dirstate
772 def d():
772 def d():
773 dirstate._map.filefoldmap.get(b'a')
773 dirstate._map.filefoldmap.get(b'a')
774 del dirstate._map.filefoldmap
774 del dirstate._map.filefoldmap
775 timer(d)
775 timer(d)
776 fm.end()
776 fm.end()
777
777
778 @command(b'perfdirfoldmap', formatteropts)
778 @command(b'perfdirfoldmap', formatteropts)
779 def perfdirfoldmap(ui, repo, **opts):
779 def perfdirfoldmap(ui, repo, **opts):
780 opts = _byteskwargs(opts)
780 opts = _byteskwargs(opts)
781 timer, fm = gettimer(ui, opts)
781 timer, fm = gettimer(ui, opts)
782 dirstate = repo.dirstate
782 dirstate = repo.dirstate
783 b'a' in dirstate
783 b'a' in dirstate
784 def d():
784 def d():
785 dirstate._map.dirfoldmap.get(b'a')
785 dirstate._map.dirfoldmap.get(b'a')
786 del dirstate._map.dirfoldmap
786 del dirstate._map.dirfoldmap
787 del dirstate._map._dirs
787 del dirstate._map._dirs
788 timer(d)
788 timer(d)
789 fm.end()
789 fm.end()
790
790
791 @command(b'perfdirstatewrite', formatteropts)
791 @command(b'perfdirstatewrite', formatteropts)
792 def perfdirstatewrite(ui, repo, **opts):
792 def perfdirstatewrite(ui, repo, **opts):
793 opts = _byteskwargs(opts)
793 opts = _byteskwargs(opts)
794 timer, fm = gettimer(ui, opts)
794 timer, fm = gettimer(ui, opts)
795 ds = repo.dirstate
795 ds = repo.dirstate
796 b"a" in ds
796 b"a" in ds
797 def d():
797 def d():
798 ds._dirty = True
798 ds._dirty = True
799 ds.write(repo.currenttransaction())
799 ds.write(repo.currenttransaction())
800 timer(d)
800 timer(d)
801 fm.end()
801 fm.end()
802
802
803 @command(b'perfmergecalculate',
803 @command(b'perfmergecalculate',
804 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
804 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
805 def perfmergecalculate(ui, repo, rev, **opts):
805 def perfmergecalculate(ui, repo, rev, **opts):
806 opts = _byteskwargs(opts)
806 opts = _byteskwargs(opts)
807 timer, fm = gettimer(ui, opts)
807 timer, fm = gettimer(ui, opts)
808 wctx = repo[None]
808 wctx = repo[None]
809 rctx = scmutil.revsingle(repo, rev, rev)
809 rctx = scmutil.revsingle(repo, rev, rev)
810 ancestor = wctx.ancestor(rctx)
810 ancestor = wctx.ancestor(rctx)
811 # we don't want working dir files to be stat'd in the benchmark, so prime
811 # we don't want working dir files to be stat'd in the benchmark, so prime
812 # that cache
812 # that cache
813 wctx.dirty()
813 wctx.dirty()
814 def d():
814 def d():
815 # acceptremote is True because we don't want prompts in the middle of
815 # acceptremote is True because we don't want prompts in the middle of
816 # our benchmark
816 # our benchmark
817 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
817 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
818 acceptremote=True, followcopies=True)
818 acceptremote=True, followcopies=True)
819 timer(d)
819 timer(d)
820 fm.end()
820 fm.end()
821
821
822 @command(b'perfpathcopies', [], b"REV REV")
822 @command(b'perfpathcopies', [], b"REV REV")
823 def perfpathcopies(ui, repo, rev1, rev2, **opts):
823 def perfpathcopies(ui, repo, rev1, rev2, **opts):
824 opts = _byteskwargs(opts)
824 opts = _byteskwargs(opts)
825 timer, fm = gettimer(ui, opts)
825 timer, fm = gettimer(ui, opts)
826 ctx1 = scmutil.revsingle(repo, rev1, rev1)
826 ctx1 = scmutil.revsingle(repo, rev1, rev1)
827 ctx2 = scmutil.revsingle(repo, rev2, rev2)
827 ctx2 = scmutil.revsingle(repo, rev2, rev2)
828 def d():
828 def d():
829 copies.pathcopies(ctx1, ctx2)
829 copies.pathcopies(ctx1, ctx2)
830 timer(d)
830 timer(d)
831 fm.end()
831 fm.end()
832
832
833 @command(b'perfphases',
833 @command(b'perfphases',
834 [(b'', b'full', False, b'include file reading time too'),
834 [(b'', b'full', False, b'include file reading time too'),
835 ], b"")
835 ], b"")
836 def perfphases(ui, repo, **opts):
836 def perfphases(ui, repo, **opts):
837 """benchmark phasesets computation"""
837 """benchmark phasesets computation"""
838 opts = _byteskwargs(opts)
838 opts = _byteskwargs(opts)
839 timer, fm = gettimer(ui, opts)
839 timer, fm = gettimer(ui, opts)
840 _phases = repo._phasecache
840 _phases = repo._phasecache
841 full = opts.get(b'full')
841 full = opts.get(b'full')
842 def d():
842 def d():
843 phases = _phases
843 phases = _phases
844 if full:
844 if full:
845 clearfilecache(repo, b'_phasecache')
845 clearfilecache(repo, b'_phasecache')
846 phases = repo._phasecache
846 phases = repo._phasecache
847 phases.invalidate()
847 phases.invalidate()
848 phases.loadphaserevs(repo)
848 phases.loadphaserevs(repo)
849 timer(d)
849 timer(d)
850 fm.end()
850 fm.end()
851
851
852 @command(b'perfphasesremote',
852 @command(b'perfphasesremote',
853 [], b"[DEST]")
853 [], b"[DEST]")
854 def perfphasesremote(ui, repo, dest=None, **opts):
854 def perfphasesremote(ui, repo, dest=None, **opts):
855 """benchmark time needed to analyse phases of the remote server"""
855 """benchmark time needed to analyse phases of the remote server"""
856 from mercurial.node import (
856 from mercurial.node import (
857 bin,
857 bin,
858 )
858 )
859 from mercurial import (
859 from mercurial import (
860 exchange,
860 exchange,
861 hg,
861 hg,
862 phases,
862 phases,
863 )
863 )
864 opts = _byteskwargs(opts)
864 opts = _byteskwargs(opts)
865 timer, fm = gettimer(ui, opts)
865 timer, fm = gettimer(ui, opts)
866
866
867 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
867 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
868 if not path:
868 if not path:
869 raise error.Abort((b'default repository not configured!'),
869 raise error.Abort((b'default repository not configured!'),
870 hint=(b"see 'hg help config.paths'"))
870 hint=(b"see 'hg help config.paths'"))
871 dest = path.pushloc or path.loc
871 dest = path.pushloc or path.loc
872 branches = (path.branch, opts.get(b'branch') or [])
872 branches = (path.branch, opts.get(b'branch') or [])
873 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
873 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
874 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
874 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
875 other = hg.peer(repo, opts, dest)
875 other = hg.peer(repo, opts, dest)
876
876
877 # easier to perform discovery through the operation
877 # easier to perform discovery through the operation
878 op = exchange.pushoperation(repo, other)
878 op = exchange.pushoperation(repo, other)
879 exchange._pushdiscoverychangeset(op)
879 exchange._pushdiscoverychangeset(op)
880
880
881 remotesubset = op.fallbackheads
881 remotesubset = op.fallbackheads
882
882
883 with other.commandexecutor() as e:
883 with other.commandexecutor() as e:
884 remotephases = e.callcommand(b'listkeys',
884 remotephases = e.callcommand(b'listkeys',
885 {b'namespace': b'phases'}).result()
885 {b'namespace': b'phases'}).result()
886 del other
886 del other
887 publishing = remotephases.get(b'publishing', False)
887 publishing = remotephases.get(b'publishing', False)
888 if publishing:
888 if publishing:
889 ui.status((b'publishing: yes\n'))
889 ui.status((b'publishing: yes\n'))
890 else:
890 else:
891 ui.status((b'publishing: no\n'))
891 ui.status((b'publishing: no\n'))
892
892
893 nodemap = repo.changelog.nodemap
893 nodemap = repo.changelog.nodemap
894 nonpublishroots = 0
894 nonpublishroots = 0
895 for nhex, phase in remotephases.iteritems():
895 for nhex, phase in remotephases.iteritems():
896 if nhex == b'publishing': # ignore data related to publish option
896 if nhex == b'publishing': # ignore data related to publish option
897 continue
897 continue
898 node = bin(nhex)
898 node = bin(nhex)
899 if node in nodemap and int(phase):
899 if node in nodemap and int(phase):
900 nonpublishroots += 1
900 nonpublishroots += 1
901 ui.status((b'number of roots: %d\n') % len(remotephases))
901 ui.status((b'number of roots: %d\n') % len(remotephases))
902 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
902 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
903 def d():
903 def d():
904 phases.remotephasessummary(repo,
904 phases.remotephasessummary(repo,
905 remotesubset,
905 remotesubset,
906 remotephases)
906 remotephases)
907 timer(d)
907 timer(d)
908 fm.end()
908 fm.end()
909
909
910 @command(b'perfmanifest',[
910 @command(b'perfmanifest',[
911 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
911 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
912 (b'', b'clear-disk', False, b'clear on-disk caches too'),
912 (b'', b'clear-disk', False, b'clear on-disk caches too'),
913 ] + formatteropts, b'REV|NODE')
913 ] + formatteropts, b'REV|NODE')
914 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
914 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
915 """benchmark the time to read a manifest from disk and return a usable
915 """benchmark the time to read a manifest from disk and return a usable
916 dict-like object
916 dict-like object
917
917
918 Manifest caches are cleared before retrieval."""
918 Manifest caches are cleared before retrieval."""
919 opts = _byteskwargs(opts)
919 opts = _byteskwargs(opts)
920 timer, fm = gettimer(ui, opts)
920 timer, fm = gettimer(ui, opts)
921 if not manifest_rev:
921 if not manifest_rev:
922 ctx = scmutil.revsingle(repo, rev, rev)
922 ctx = scmutil.revsingle(repo, rev, rev)
923 t = ctx.manifestnode()
923 t = ctx.manifestnode()
924 else:
924 else:
925 from mercurial.node import bin
925 from mercurial.node import bin
926
926
927 if len(rev) == 40:
927 if len(rev) == 40:
928 t = bin(rev)
928 t = bin(rev)
929 else:
929 else:
930 try:
930 try:
931 rev = int(rev)
931 rev = int(rev)
932
932
933 if util.safehasattr(repo.manifestlog, b'getstorage'):
933 if util.safehasattr(repo.manifestlog, b'getstorage'):
934 t = repo.manifestlog.getstorage(b'').node(rev)
934 t = repo.manifestlog.getstorage(b'').node(rev)
935 else:
935 else:
936 t = repo.manifestlog._revlog.lookup(rev)
936 t = repo.manifestlog._revlog.lookup(rev)
937 except ValueError:
937 except ValueError:
938 raise error.Abort(b'manifest revision must be integer or full '
938 raise error.Abort(b'manifest revision must be integer or full '
939 b'node')
939 b'node')
940 def d():
940 def d():
941 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
941 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
942 repo.manifestlog[t].read()
942 repo.manifestlog[t].read()
943 timer(d)
943 timer(d)
944 fm.end()
944 fm.end()
945
945
946 @command(b'perfchangeset', formatteropts)
946 @command(b'perfchangeset', formatteropts)
947 def perfchangeset(ui, repo, rev, **opts):
947 def perfchangeset(ui, repo, rev, **opts):
948 opts = _byteskwargs(opts)
948 opts = _byteskwargs(opts)
949 timer, fm = gettimer(ui, opts)
949 timer, fm = gettimer(ui, opts)
950 n = scmutil.revsingle(repo, rev).node()
950 n = scmutil.revsingle(repo, rev).node()
951 def d():
951 def d():
952 repo.changelog.read(n)
952 repo.changelog.read(n)
953 #repo.changelog._cache = None
953 #repo.changelog._cache = None
954 timer(d)
954 timer(d)
955 fm.end()
955 fm.end()
956
956
957 @command(b'perfindex', formatteropts)
957 @command(b'perfindex', formatteropts)
958 def perfindex(ui, repo, **opts):
958 def perfindex(ui, repo, **opts):
959 import mercurial.revlog
959 import mercurial.revlog
960 opts = _byteskwargs(opts)
960 opts = _byteskwargs(opts)
961 timer, fm = gettimer(ui, opts)
961 timer, fm = gettimer(ui, opts)
962 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
962 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
963 n = repo[b"tip"].node()
963 n = repo[b"tip"].node()
964 svfs = getsvfs(repo)
964 svfs = getsvfs(repo)
965 def d():
965 def d():
966 cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
966 cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
967 cl.rev(n)
967 cl.rev(n)
968 timer(d)
968 timer(d)
969 fm.end()
969 fm.end()
970
970
971 @command(b'perfstartup', formatteropts)
971 @command(b'perfstartup', formatteropts)
972 def perfstartup(ui, repo, **opts):
972 def perfstartup(ui, repo, **opts):
973 opts = _byteskwargs(opts)
973 opts = _byteskwargs(opts)
974 timer, fm = gettimer(ui, opts)
974 timer, fm = gettimer(ui, opts)
975 def d():
975 def d():
976 if os.name != r'nt':
976 if os.name != r'nt':
977 os.system(b"HGRCPATH= %s version -q > /dev/null" %
977 os.system(b"HGRCPATH= %s version -q > /dev/null" %
978 fsencode(sys.argv[0]))
978 fsencode(sys.argv[0]))
979 else:
979 else:
980 os.environ[r'HGRCPATH'] = r' '
980 os.environ[r'HGRCPATH'] = r' '
981 os.system(r"%s version -q > NUL" % sys.argv[0])
981 os.system(r"%s version -q > NUL" % sys.argv[0])
982 timer(d)
982 timer(d)
983 fm.end()
983 fm.end()
984
984
985 @command(b'perfparents', formatteropts)
985 @command(b'perfparents', formatteropts)
986 def perfparents(ui, repo, **opts):
986 def perfparents(ui, repo, **opts):
987 opts = _byteskwargs(opts)
987 opts = _byteskwargs(opts)
988 timer, fm = gettimer(ui, opts)
988 timer, fm = gettimer(ui, opts)
989 # control the number of commits perfparents iterates over
989 # control the number of commits perfparents iterates over
990 # experimental config: perf.parentscount
990 # experimental config: perf.parentscount
991 count = getint(ui, b"perf", b"parentscount", 1000)
991 count = getint(ui, b"perf", b"parentscount", 1000)
992 if len(repo.changelog) < count:
992 if len(repo.changelog) < count:
993 raise error.Abort(b"repo needs %d commits for this test" % count)
993 raise error.Abort(b"repo needs %d commits for this test" % count)
994 repo = repo.unfiltered()
994 repo = repo.unfiltered()
995 nl = [repo.changelog.node(i) for i in _xrange(count)]
995 nl = [repo.changelog.node(i) for i in _xrange(count)]
996 def d():
996 def d():
997 for n in nl:
997 for n in nl:
998 repo.changelog.parents(n)
998 repo.changelog.parents(n)
999 timer(d)
999 timer(d)
1000 fm.end()
1000 fm.end()
1001
1001
1002 @command(b'perfctxfiles', formatteropts)
1002 @command(b'perfctxfiles', formatteropts)
1003 def perfctxfiles(ui, repo, x, **opts):
1003 def perfctxfiles(ui, repo, x, **opts):
1004 opts = _byteskwargs(opts)
1004 opts = _byteskwargs(opts)
1005 x = int(x)
1005 x = int(x)
1006 timer, fm = gettimer(ui, opts)
1006 timer, fm = gettimer(ui, opts)
1007 def d():
1007 def d():
1008 len(repo[x].files())
1008 len(repo[x].files())
1009 timer(d)
1009 timer(d)
1010 fm.end()
1010 fm.end()
1011
1011
1012 @command(b'perfrawfiles', formatteropts)
1012 @command(b'perfrawfiles', formatteropts)
1013 def perfrawfiles(ui, repo, x, **opts):
1013 def perfrawfiles(ui, repo, x, **opts):
1014 opts = _byteskwargs(opts)
1014 opts = _byteskwargs(opts)
1015 x = int(x)
1015 x = int(x)
1016 timer, fm = gettimer(ui, opts)
1016 timer, fm = gettimer(ui, opts)
1017 cl = repo.changelog
1017 cl = repo.changelog
1018 def d():
1018 def d():
1019 len(cl.read(x)[3])
1019 len(cl.read(x)[3])
1020 timer(d)
1020 timer(d)
1021 fm.end()
1021 fm.end()
1022
1022
1023 @command(b'perflookup', formatteropts)
1023 @command(b'perflookup', formatteropts)
1024 def perflookup(ui, repo, rev, **opts):
1024 def perflookup(ui, repo, rev, **opts):
1025 opts = _byteskwargs(opts)
1025 opts = _byteskwargs(opts)
1026 timer, fm = gettimer(ui, opts)
1026 timer, fm = gettimer(ui, opts)
1027 timer(lambda: len(repo.lookup(rev)))
1027 timer(lambda: len(repo.lookup(rev)))
1028 fm.end()
1028 fm.end()
1029
1029
1030 @command(b'perflinelogedits',
1030 @command(b'perflinelogedits',
1031 [(b'n', b'edits', 10000, b'number of edits'),
1031 [(b'n', b'edits', 10000, b'number of edits'),
1032 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1032 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1033 ], norepo=True)
1033 ], norepo=True)
1034 def perflinelogedits(ui, **opts):
1034 def perflinelogedits(ui, **opts):
1035 from mercurial import linelog
1035 from mercurial import linelog
1036
1036
1037 opts = _byteskwargs(opts)
1037 opts = _byteskwargs(opts)
1038
1038
1039 edits = opts[b'edits']
1039 edits = opts[b'edits']
1040 maxhunklines = opts[b'max_hunk_lines']
1040 maxhunklines = opts[b'max_hunk_lines']
1041
1041
1042 maxb1 = 100000
1042 maxb1 = 100000
1043 random.seed(0)
1043 random.seed(0)
1044 randint = random.randint
1044 randint = random.randint
1045 currentlines = 0
1045 currentlines = 0
1046 arglist = []
1046 arglist = []
1047 for rev in _xrange(edits):
1047 for rev in _xrange(edits):
1048 a1 = randint(0, currentlines)
1048 a1 = randint(0, currentlines)
1049 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1049 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1050 b1 = randint(0, maxb1)
1050 b1 = randint(0, maxb1)
1051 b2 = randint(b1, b1 + maxhunklines)
1051 b2 = randint(b1, b1 + maxhunklines)
1052 currentlines += (b2 - b1) - (a2 - a1)
1052 currentlines += (b2 - b1) - (a2 - a1)
1053 arglist.append((rev, a1, a2, b1, b2))
1053 arglist.append((rev, a1, a2, b1, b2))
1054
1054
1055 def d():
1055 def d():
1056 ll = linelog.linelog()
1056 ll = linelog.linelog()
1057 for args in arglist:
1057 for args in arglist:
1058 ll.replacelines(*args)
1058 ll.replacelines(*args)
1059
1059
1060 timer, fm = gettimer(ui, opts)
1060 timer, fm = gettimer(ui, opts)
1061 timer(d)
1061 timer(d)
1062 fm.end()
1062 fm.end()
1063
1063
1064 @command(b'perfrevrange', formatteropts)
1064 @command(b'perfrevrange', formatteropts)
1065 def perfrevrange(ui, repo, *specs, **opts):
1065 def perfrevrange(ui, repo, *specs, **opts):
1066 opts = _byteskwargs(opts)
1066 opts = _byteskwargs(opts)
1067 timer, fm = gettimer(ui, opts)
1067 timer, fm = gettimer(ui, opts)
1068 revrange = scmutil.revrange
1068 revrange = scmutil.revrange
1069 timer(lambda: len(revrange(repo, specs)))
1069 timer(lambda: len(revrange(repo, specs)))
1070 fm.end()
1070 fm.end()
1071
1071
1072 @command(b'perfnodelookup', formatteropts)
1072 @command(b'perfnodelookup', formatteropts)
1073 def perfnodelookup(ui, repo, rev, **opts):
1073 def perfnodelookup(ui, repo, rev, **opts):
1074 opts = _byteskwargs(opts)
1074 opts = _byteskwargs(opts)
1075 timer, fm = gettimer(ui, opts)
1075 timer, fm = gettimer(ui, opts)
1076 import mercurial.revlog
1076 import mercurial.revlog
1077 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1077 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1078 n = scmutil.revsingle(repo, rev).node()
1078 n = scmutil.revsingle(repo, rev).node()
1079 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1079 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1080 def d():
1080 def d():
1081 cl.rev(n)
1081 cl.rev(n)
1082 clearcaches(cl)
1082 clearcaches(cl)
1083 timer(d)
1083 timer(d)
1084 fm.end()
1084 fm.end()
1085
1085
1086 @command(b'perflog',
1086 @command(b'perflog',
1087 [(b'', b'rename', False, b'ask log to follow renames')
1087 [(b'', b'rename', False, b'ask log to follow renames')
1088 ] + formatteropts)
1088 ] + formatteropts)
1089 def perflog(ui, repo, rev=None, **opts):
1089 def perflog(ui, repo, rev=None, **opts):
1090 opts = _byteskwargs(opts)
1090 opts = _byteskwargs(opts)
1091 if rev is None:
1091 if rev is None:
1092 rev=[]
1092 rev=[]
1093 timer, fm = gettimer(ui, opts)
1093 timer, fm = gettimer(ui, opts)
1094 ui.pushbuffer()
1094 ui.pushbuffer()
1095 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1095 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1096 copies=opts.get(b'rename')))
1096 copies=opts.get(b'rename')))
1097 ui.popbuffer()
1097 ui.popbuffer()
1098 fm.end()
1098 fm.end()
1099
1099
1100 @command(b'perfmoonwalk', formatteropts)
1100 @command(b'perfmoonwalk', formatteropts)
1101 def perfmoonwalk(ui, repo, **opts):
1101 def perfmoonwalk(ui, repo, **opts):
1102 """benchmark walking the changelog backwards
1102 """benchmark walking the changelog backwards
1103
1103
1104 This also loads the changelog data for each revision in the changelog.
1104 This also loads the changelog data for each revision in the changelog.
1105 """
1105 """
1106 opts = _byteskwargs(opts)
1106 opts = _byteskwargs(opts)
1107 timer, fm = gettimer(ui, opts)
1107 timer, fm = gettimer(ui, opts)
1108 def moonwalk():
1108 def moonwalk():
1109 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1109 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1110 ctx = repo[i]
1110 ctx = repo[i]
1111 ctx.branch() # read changelog data (in addition to the index)
1111 ctx.branch() # read changelog data (in addition to the index)
1112 timer(moonwalk)
1112 timer(moonwalk)
1113 fm.end()
1113 fm.end()
1114
1114
1115 @command(b'perftemplating',
1115 @command(b'perftemplating',
1116 [(b'r', b'rev', [], b'revisions to run the template on'),
1116 [(b'r', b'rev', [], b'revisions to run the template on'),
1117 ] + formatteropts)
1117 ] + formatteropts)
1118 def perftemplating(ui, repo, testedtemplate=None, **opts):
1118 def perftemplating(ui, repo, testedtemplate=None, **opts):
1119 """test the rendering time of a given template"""
1119 """test the rendering time of a given template"""
1120 if makelogtemplater is None:
1120 if makelogtemplater is None:
1121 raise error.Abort((b"perftemplating not available with this Mercurial"),
1121 raise error.Abort((b"perftemplating not available with this Mercurial"),
1122 hint=b"use 4.3 or later")
1122 hint=b"use 4.3 or later")
1123
1123
1124 opts = _byteskwargs(opts)
1124 opts = _byteskwargs(opts)
1125
1125
1126 nullui = ui.copy()
1126 nullui = ui.copy()
1127 nullui.fout = open(os.devnull, r'wb')
1127 nullui.fout = open(os.devnull, r'wb')
1128 nullui.disablepager()
1128 nullui.disablepager()
1129 revs = opts.get(b'rev')
1129 revs = opts.get(b'rev')
1130 if not revs:
1130 if not revs:
1131 revs = [b'all()']
1131 revs = [b'all()']
1132 revs = list(scmutil.revrange(repo, revs))
1132 revs = list(scmutil.revrange(repo, revs))
1133
1133
1134 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1134 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1135 b' {author|person}: {desc|firstline}\n')
1135 b' {author|person}: {desc|firstline}\n')
1136 if testedtemplate is None:
1136 if testedtemplate is None:
1137 testedtemplate = defaulttemplate
1137 testedtemplate = defaulttemplate
1138 displayer = makelogtemplater(nullui, repo, testedtemplate)
1138 displayer = makelogtemplater(nullui, repo, testedtemplate)
1139 def format():
1139 def format():
1140 for r in revs:
1140 for r in revs:
1141 ctx = repo[r]
1141 ctx = repo[r]
1142 displayer.show(ctx)
1142 displayer.show(ctx)
1143 displayer.flush(ctx)
1143 displayer.flush(ctx)
1144
1144
1145 timer, fm = gettimer(ui, opts)
1145 timer, fm = gettimer(ui, opts)
1146 timer(format)
1146 timer(format)
1147 fm.end()
1147 fm.end()
1148
1148
1149 @command(b'perfcca', formatteropts)
1149 @command(b'perfcca', formatteropts)
1150 def perfcca(ui, repo, **opts):
1150 def perfcca(ui, repo, **opts):
1151 opts = _byteskwargs(opts)
1151 opts = _byteskwargs(opts)
1152 timer, fm = gettimer(ui, opts)
1152 timer, fm = gettimer(ui, opts)
1153 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1153 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1154 fm.end()
1154 fm.end()
1155
1155
1156 @command(b'perffncacheload', formatteropts)
1156 @command(b'perffncacheload', formatteropts)
1157 def perffncacheload(ui, repo, **opts):
1157 def perffncacheload(ui, repo, **opts):
1158 opts = _byteskwargs(opts)
1158 opts = _byteskwargs(opts)
1159 timer, fm = gettimer(ui, opts)
1159 timer, fm = gettimer(ui, opts)
1160 s = repo.store
1160 s = repo.store
1161 def d():
1161 def d():
1162 s.fncache._load()
1162 s.fncache._load()
1163 timer(d)
1163 timer(d)
1164 fm.end()
1164 fm.end()
1165
1165
1166 @command(b'perffncachewrite', formatteropts)
1166 @command(b'perffncachewrite', formatteropts)
1167 def perffncachewrite(ui, repo, **opts):
1167 def perffncachewrite(ui, repo, **opts):
1168 opts = _byteskwargs(opts)
1168 opts = _byteskwargs(opts)
1169 timer, fm = gettimer(ui, opts)
1169 timer, fm = gettimer(ui, opts)
1170 s = repo.store
1170 s = repo.store
1171 lock = repo.lock()
1171 lock = repo.lock()
1172 s.fncache._load()
1172 s.fncache._load()
1173 tr = repo.transaction(b'perffncachewrite')
1173 tr = repo.transaction(b'perffncachewrite')
1174 tr.addbackup(b'fncache')
1174 tr.addbackup(b'fncache')
1175 def d():
1175 def d():
1176 s.fncache._dirty = True
1176 s.fncache._dirty = True
1177 s.fncache.write(tr)
1177 s.fncache.write(tr)
1178 timer(d)
1178 timer(d)
1179 tr.close()
1179 tr.close()
1180 lock.release()
1180 lock.release()
1181 fm.end()
1181 fm.end()
1182
1182
1183 @command(b'perffncacheencode', formatteropts)
1183 @command(b'perffncacheencode', formatteropts)
1184 def perffncacheencode(ui, repo, **opts):
1184 def perffncacheencode(ui, repo, **opts):
1185 opts = _byteskwargs(opts)
1185 opts = _byteskwargs(opts)
1186 timer, fm = gettimer(ui, opts)
1186 timer, fm = gettimer(ui, opts)
1187 s = repo.store
1187 s = repo.store
1188 s.fncache._load()
1188 s.fncache._load()
1189 def d():
1189 def d():
1190 for p in s.fncache.entries:
1190 for p in s.fncache.entries:
1191 s.encode(p)
1191 s.encode(p)
1192 timer(d)
1192 timer(d)
1193 fm.end()
1193 fm.end()
1194
1194
1195 def _bdiffworker(q, blocks, xdiff, ready, done):
1195 def _bdiffworker(q, blocks, xdiff, ready, done):
1196 while not done.is_set():
1196 while not done.is_set():
1197 pair = q.get()
1197 pair = q.get()
1198 while pair is not None:
1198 while pair is not None:
1199 if xdiff:
1199 if xdiff:
1200 mdiff.bdiff.xdiffblocks(*pair)
1200 mdiff.bdiff.xdiffblocks(*pair)
1201 elif blocks:
1201 elif blocks:
1202 mdiff.bdiff.blocks(*pair)
1202 mdiff.bdiff.blocks(*pair)
1203 else:
1203 else:
1204 mdiff.textdiff(*pair)
1204 mdiff.textdiff(*pair)
1205 q.task_done()
1205 q.task_done()
1206 pair = q.get()
1206 pair = q.get()
1207 q.task_done() # for the None one
1207 q.task_done() # for the None one
1208 with ready:
1208 with ready:
1209 ready.wait()
1209 ready.wait()
1210
1210
1211 def _manifestrevision(repo, mnode):
1211 def _manifestrevision(repo, mnode):
1212 ml = repo.manifestlog
1212 ml = repo.manifestlog
1213
1213
1214 if util.safehasattr(ml, b'getstorage'):
1214 if util.safehasattr(ml, b'getstorage'):
1215 store = ml.getstorage(b'')
1215 store = ml.getstorage(b'')
1216 else:
1216 else:
1217 store = ml._revlog
1217 store = ml._revlog
1218
1218
1219 return store.revision(mnode)
1219 return store.revision(mnode)
1220
1220
1221 @command(b'perfbdiff', revlogopts + formatteropts + [
1221 @command(b'perfbdiff', revlogopts + formatteropts + [
1222 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1222 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1223 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1223 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1224 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1224 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1225 (b'', b'blocks', False, b'test computing diffs into blocks'),
1225 (b'', b'blocks', False, b'test computing diffs into blocks'),
1226 (b'', b'xdiff', False, b'use xdiff algorithm'),
1226 (b'', b'xdiff', False, b'use xdiff algorithm'),
1227 ],
1227 ],
1228
1228
1229 b'-c|-m|FILE REV')
1229 b'-c|-m|FILE REV')
1230 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1230 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1231 """benchmark a bdiff between revisions
1231 """benchmark a bdiff between revisions
1232
1232
1233 By default, benchmark a bdiff between its delta parent and itself.
1233 By default, benchmark a bdiff between its delta parent and itself.
1234
1234
1235 With ``--count``, benchmark bdiffs between delta parents and self for N
1235 With ``--count``, benchmark bdiffs between delta parents and self for N
1236 revisions starting at the specified revision.
1236 revisions starting at the specified revision.
1237
1237
1238 With ``--alldata``, assume the requested revision is a changeset and
1238 With ``--alldata``, assume the requested revision is a changeset and
1239 measure bdiffs for all changes related to that changeset (manifest
1239 measure bdiffs for all changes related to that changeset (manifest
1240 and filelogs).
1240 and filelogs).
1241 """
1241 """
1242 opts = _byteskwargs(opts)
1242 opts = _byteskwargs(opts)
1243
1243
1244 if opts[b'xdiff'] and not opts[b'blocks']:
1244 if opts[b'xdiff'] and not opts[b'blocks']:
1245 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1245 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1246
1246
1247 if opts[b'alldata']:
1247 if opts[b'alldata']:
1248 opts[b'changelog'] = True
1248 opts[b'changelog'] = True
1249
1249
1250 if opts.get(b'changelog') or opts.get(b'manifest'):
1250 if opts.get(b'changelog') or opts.get(b'manifest'):
1251 file_, rev = None, file_
1251 file_, rev = None, file_
1252 elif rev is None:
1252 elif rev is None:
1253 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1253 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1254
1254
1255 blocks = opts[b'blocks']
1255 blocks = opts[b'blocks']
1256 xdiff = opts[b'xdiff']
1256 xdiff = opts[b'xdiff']
1257 textpairs = []
1257 textpairs = []
1258
1258
1259 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1259 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1260
1260
1261 startrev = r.rev(r.lookup(rev))
1261 startrev = r.rev(r.lookup(rev))
1262 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1262 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1263 if opts[b'alldata']:
1263 if opts[b'alldata']:
1264 # Load revisions associated with changeset.
1264 # Load revisions associated with changeset.
1265 ctx = repo[rev]
1265 ctx = repo[rev]
1266 mtext = _manifestrevision(repo, ctx.manifestnode())
1266 mtext = _manifestrevision(repo, ctx.manifestnode())
1267 for pctx in ctx.parents():
1267 for pctx in ctx.parents():
1268 pman = _manifestrevision(repo, pctx.manifestnode())
1268 pman = _manifestrevision(repo, pctx.manifestnode())
1269 textpairs.append((pman, mtext))
1269 textpairs.append((pman, mtext))
1270
1270
1271 # Load filelog revisions by iterating manifest delta.
1271 # Load filelog revisions by iterating manifest delta.
1272 man = ctx.manifest()
1272 man = ctx.manifest()
1273 pman = ctx.p1().manifest()
1273 pman = ctx.p1().manifest()
1274 for filename, change in pman.diff(man).items():
1274 for filename, change in pman.diff(man).items():
1275 fctx = repo.file(filename)
1275 fctx = repo.file(filename)
1276 f1 = fctx.revision(change[0][0] or -1)
1276 f1 = fctx.revision(change[0][0] or -1)
1277 f2 = fctx.revision(change[1][0] or -1)
1277 f2 = fctx.revision(change[1][0] or -1)
1278 textpairs.append((f1, f2))
1278 textpairs.append((f1, f2))
1279 else:
1279 else:
1280 dp = r.deltaparent(rev)
1280 dp = r.deltaparent(rev)
1281 textpairs.append((r.revision(dp), r.revision(rev)))
1281 textpairs.append((r.revision(dp), r.revision(rev)))
1282
1282
1283 withthreads = threads > 0
1283 withthreads = threads > 0
1284 if not withthreads:
1284 if not withthreads:
1285 def d():
1285 def d():
1286 for pair in textpairs:
1286 for pair in textpairs:
1287 if xdiff:
1287 if xdiff:
1288 mdiff.bdiff.xdiffblocks(*pair)
1288 mdiff.bdiff.xdiffblocks(*pair)
1289 elif blocks:
1289 elif blocks:
1290 mdiff.bdiff.blocks(*pair)
1290 mdiff.bdiff.blocks(*pair)
1291 else:
1291 else:
1292 mdiff.textdiff(*pair)
1292 mdiff.textdiff(*pair)
1293 else:
1293 else:
1294 q = queue()
1294 q = queue()
1295 for i in _xrange(threads):
1295 for i in _xrange(threads):
1296 q.put(None)
1296 q.put(None)
1297 ready = threading.Condition()
1297 ready = threading.Condition()
1298 done = threading.Event()
1298 done = threading.Event()
1299 for i in _xrange(threads):
1299 for i in _xrange(threads):
1300 threading.Thread(target=_bdiffworker,
1300 threading.Thread(target=_bdiffworker,
1301 args=(q, blocks, xdiff, ready, done)).start()
1301 args=(q, blocks, xdiff, ready, done)).start()
1302 q.join()
1302 q.join()
1303 def d():
1303 def d():
1304 for pair in textpairs:
1304 for pair in textpairs:
1305 q.put(pair)
1305 q.put(pair)
1306 for i in _xrange(threads):
1306 for i in _xrange(threads):
1307 q.put(None)
1307 q.put(None)
1308 with ready:
1308 with ready:
1309 ready.notify_all()
1309 ready.notify_all()
1310 q.join()
1310 q.join()
1311 timer, fm = gettimer(ui, opts)
1311 timer, fm = gettimer(ui, opts)
1312 timer(d)
1312 timer(d)
1313 fm.end()
1313 fm.end()
1314
1314
1315 if withthreads:
1315 if withthreads:
1316 done.set()
1316 done.set()
1317 for i in _xrange(threads):
1317 for i in _xrange(threads):
1318 q.put(None)
1318 q.put(None)
1319 with ready:
1319 with ready:
1320 ready.notify_all()
1320 ready.notify_all()
1321
1321
1322 @command(b'perfunidiff', revlogopts + formatteropts + [
1322 @command(b'perfunidiff', revlogopts + formatteropts + [
1323 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1323 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1324 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1324 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1325 ], b'-c|-m|FILE REV')
1325 ], b'-c|-m|FILE REV')
1326 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1326 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1327 """benchmark a unified diff between revisions
1327 """benchmark a unified diff between revisions
1328
1328
1329 This doesn't include any copy tracing - it's just a unified diff
1329 This doesn't include any copy tracing - it's just a unified diff
1330 of the texts.
1330 of the texts.
1331
1331
1332 By default, benchmark a diff between its delta parent and itself.
1332 By default, benchmark a diff between its delta parent and itself.
1333
1333
1334 With ``--count``, benchmark diffs between delta parents and self for N
1334 With ``--count``, benchmark diffs between delta parents and self for N
1335 revisions starting at the specified revision.
1335 revisions starting at the specified revision.
1336
1336
1337 With ``--alldata``, assume the requested revision is a changeset and
1337 With ``--alldata``, assume the requested revision is a changeset and
1338 measure diffs for all changes related to that changeset (manifest
1338 measure diffs for all changes related to that changeset (manifest
1339 and filelogs).
1339 and filelogs).
1340 """
1340 """
1341 opts = _byteskwargs(opts)
1341 opts = _byteskwargs(opts)
1342 if opts[b'alldata']:
1342 if opts[b'alldata']:
1343 opts[b'changelog'] = True
1343 opts[b'changelog'] = True
1344
1344
1345 if opts.get(b'changelog') or opts.get(b'manifest'):
1345 if opts.get(b'changelog') or opts.get(b'manifest'):
1346 file_, rev = None, file_
1346 file_, rev = None, file_
1347 elif rev is None:
1347 elif rev is None:
1348 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1348 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1349
1349
1350 textpairs = []
1350 textpairs = []
1351
1351
1352 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1352 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1353
1353
1354 startrev = r.rev(r.lookup(rev))
1354 startrev = r.rev(r.lookup(rev))
1355 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1355 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1356 if opts[b'alldata']:
1356 if opts[b'alldata']:
1357 # Load revisions associated with changeset.
1357 # Load revisions associated with changeset.
1358 ctx = repo[rev]
1358 ctx = repo[rev]
1359 mtext = _manifestrevision(repo, ctx.manifestnode())
1359 mtext = _manifestrevision(repo, ctx.manifestnode())
1360 for pctx in ctx.parents():
1360 for pctx in ctx.parents():
1361 pman = _manifestrevision(repo, pctx.manifestnode())
1361 pman = _manifestrevision(repo, pctx.manifestnode())
1362 textpairs.append((pman, mtext))
1362 textpairs.append((pman, mtext))
1363
1363
1364 # Load filelog revisions by iterating manifest delta.
1364 # Load filelog revisions by iterating manifest delta.
1365 man = ctx.manifest()
1365 man = ctx.manifest()
1366 pman = ctx.p1().manifest()
1366 pman = ctx.p1().manifest()
1367 for filename, change in pman.diff(man).items():
1367 for filename, change in pman.diff(man).items():
1368 fctx = repo.file(filename)
1368 fctx = repo.file(filename)
1369 f1 = fctx.revision(change[0][0] or -1)
1369 f1 = fctx.revision(change[0][0] or -1)
1370 f2 = fctx.revision(change[1][0] or -1)
1370 f2 = fctx.revision(change[1][0] or -1)
1371 textpairs.append((f1, f2))
1371 textpairs.append((f1, f2))
1372 else:
1372 else:
1373 dp = r.deltaparent(rev)
1373 dp = r.deltaparent(rev)
1374 textpairs.append((r.revision(dp), r.revision(rev)))
1374 textpairs.append((r.revision(dp), r.revision(rev)))
1375
1375
1376 def d():
1376 def d():
1377 for left, right in textpairs:
1377 for left, right in textpairs:
1378 # The date strings don't matter, so we pass empty strings.
1378 # The date strings don't matter, so we pass empty strings.
1379 headerlines, hunks = mdiff.unidiff(
1379 headerlines, hunks = mdiff.unidiff(
1380 left, b'', right, b'', b'left', b'right', binary=False)
1380 left, b'', right, b'', b'left', b'right', binary=False)
1381 # consume iterators in roughly the way patch.py does
1381 # consume iterators in roughly the way patch.py does
1382 b'\n'.join(headerlines)
1382 b'\n'.join(headerlines)
1383 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1383 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1384 timer, fm = gettimer(ui, opts)
1384 timer, fm = gettimer(ui, opts)
1385 timer(d)
1385 timer(d)
1386 fm.end()
1386 fm.end()
1387
1387
1388 @command(b'perfdiffwd', formatteropts)
1388 @command(b'perfdiffwd', formatteropts)
1389 def perfdiffwd(ui, repo, **opts):
1389 def perfdiffwd(ui, repo, **opts):
1390 """Profile diff of working directory changes"""
1390 """Profile diff of working directory changes"""
1391 opts = _byteskwargs(opts)
1391 opts = _byteskwargs(opts)
1392 timer, fm = gettimer(ui, opts)
1392 timer, fm = gettimer(ui, opts)
1393 options = {
1393 options = {
1394 'w': 'ignore_all_space',
1394 'w': 'ignore_all_space',
1395 'b': 'ignore_space_change',
1395 'b': 'ignore_space_change',
1396 'B': 'ignore_blank_lines',
1396 'B': 'ignore_blank_lines',
1397 }
1397 }
1398
1398
1399 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1399 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1400 opts = dict((options[c], b'1') for c in diffopt)
1400 opts = dict((options[c], b'1') for c in diffopt)
1401 def d():
1401 def d():
1402 ui.pushbuffer()
1402 ui.pushbuffer()
1403 commands.diff(ui, repo, **opts)
1403 commands.diff(ui, repo, **opts)
1404 ui.popbuffer()
1404 ui.popbuffer()
1405 diffopt = diffopt.encode('ascii')
1405 diffopt = diffopt.encode('ascii')
1406 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1406 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1407 timer(d, title)
1407 timer(d, title=title)
1408 fm.end()
1408 fm.end()
1409
1409
1410 @command(b'perfrevlogindex', revlogopts + formatteropts,
1410 @command(b'perfrevlogindex', revlogopts + formatteropts,
1411 b'-c|-m|FILE')
1411 b'-c|-m|FILE')
1412 def perfrevlogindex(ui, repo, file_=None, **opts):
1412 def perfrevlogindex(ui, repo, file_=None, **opts):
1413 """Benchmark operations against a revlog index.
1413 """Benchmark operations against a revlog index.
1414
1414
1415 This tests constructing a revlog instance, reading index data,
1415 This tests constructing a revlog instance, reading index data,
1416 parsing index data, and performing various operations related to
1416 parsing index data, and performing various operations related to
1417 index data.
1417 index data.
1418 """
1418 """
1419
1419
1420 opts = _byteskwargs(opts)
1420 opts = _byteskwargs(opts)
1421
1421
1422 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1422 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1423
1423
1424 opener = getattr(rl, 'opener') # trick linter
1424 opener = getattr(rl, 'opener') # trick linter
1425 indexfile = rl.indexfile
1425 indexfile = rl.indexfile
1426 data = opener.read(indexfile)
1426 data = opener.read(indexfile)
1427
1427
1428 header = struct.unpack(b'>I', data[0:4])[0]
1428 header = struct.unpack(b'>I', data[0:4])[0]
1429 version = header & 0xFFFF
1429 version = header & 0xFFFF
1430 if version == 1:
1430 if version == 1:
1431 revlogio = revlog.revlogio()
1431 revlogio = revlog.revlogio()
1432 inline = header & (1 << 16)
1432 inline = header & (1 << 16)
1433 else:
1433 else:
1434 raise error.Abort((b'unsupported revlog version: %d') % version)
1434 raise error.Abort((b'unsupported revlog version: %d') % version)
1435
1435
1436 rllen = len(rl)
1436 rllen = len(rl)
1437
1437
1438 node0 = rl.node(0)
1438 node0 = rl.node(0)
1439 node25 = rl.node(rllen // 4)
1439 node25 = rl.node(rllen // 4)
1440 node50 = rl.node(rllen // 2)
1440 node50 = rl.node(rllen // 2)
1441 node75 = rl.node(rllen // 4 * 3)
1441 node75 = rl.node(rllen // 4 * 3)
1442 node100 = rl.node(rllen - 1)
1442 node100 = rl.node(rllen - 1)
1443
1443
1444 allrevs = range(rllen)
1444 allrevs = range(rllen)
1445 allrevsrev = list(reversed(allrevs))
1445 allrevsrev = list(reversed(allrevs))
1446 allnodes = [rl.node(rev) for rev in range(rllen)]
1446 allnodes = [rl.node(rev) for rev in range(rllen)]
1447 allnodesrev = list(reversed(allnodes))
1447 allnodesrev = list(reversed(allnodes))
1448
1448
1449 def constructor():
1449 def constructor():
1450 revlog.revlog(opener, indexfile)
1450 revlog.revlog(opener, indexfile)
1451
1451
1452 def read():
1452 def read():
1453 with opener(indexfile) as fh:
1453 with opener(indexfile) as fh:
1454 fh.read()
1454 fh.read()
1455
1455
1456 def parseindex():
1456 def parseindex():
1457 revlogio.parseindex(data, inline)
1457 revlogio.parseindex(data, inline)
1458
1458
1459 def getentry(revornode):
1459 def getentry(revornode):
1460 index = revlogio.parseindex(data, inline)[0]
1460 index = revlogio.parseindex(data, inline)[0]
1461 index[revornode]
1461 index[revornode]
1462
1462
1463 def getentries(revs, count=1):
1463 def getentries(revs, count=1):
1464 index = revlogio.parseindex(data, inline)[0]
1464 index = revlogio.parseindex(data, inline)[0]
1465
1465
1466 for i in range(count):
1466 for i in range(count):
1467 for rev in revs:
1467 for rev in revs:
1468 index[rev]
1468 index[rev]
1469
1469
1470 def resolvenode(node):
1470 def resolvenode(node):
1471 nodemap = revlogio.parseindex(data, inline)[1]
1471 nodemap = revlogio.parseindex(data, inline)[1]
1472 # This only works for the C code.
1472 # This only works for the C code.
1473 if nodemap is None:
1473 if nodemap is None:
1474 return
1474 return
1475
1475
1476 try:
1476 try:
1477 nodemap[node]
1477 nodemap[node]
1478 except error.RevlogError:
1478 except error.RevlogError:
1479 pass
1479 pass
1480
1480
1481 def resolvenodes(nodes, count=1):
1481 def resolvenodes(nodes, count=1):
1482 nodemap = revlogio.parseindex(data, inline)[1]
1482 nodemap = revlogio.parseindex(data, inline)[1]
1483 if nodemap is None:
1483 if nodemap is None:
1484 return
1484 return
1485
1485
1486 for i in range(count):
1486 for i in range(count):
1487 for node in nodes:
1487 for node in nodes:
1488 try:
1488 try:
1489 nodemap[node]
1489 nodemap[node]
1490 except error.RevlogError:
1490 except error.RevlogError:
1491 pass
1491 pass
1492
1492
1493 benches = [
1493 benches = [
1494 (constructor, b'revlog constructor'),
1494 (constructor, b'revlog constructor'),
1495 (read, b'read'),
1495 (read, b'read'),
1496 (parseindex, b'create index object'),
1496 (parseindex, b'create index object'),
1497 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1497 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1498 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1498 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1499 (lambda: resolvenode(node0), b'look up node at rev 0'),
1499 (lambda: resolvenode(node0), b'look up node at rev 0'),
1500 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1500 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1501 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1501 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1502 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1502 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1503 (lambda: resolvenode(node100), b'look up node at tip'),
1503 (lambda: resolvenode(node100), b'look up node at tip'),
1504 # 2x variation is to measure caching impact.
1504 # 2x variation is to measure caching impact.
1505 (lambda: resolvenodes(allnodes),
1505 (lambda: resolvenodes(allnodes),
1506 b'look up all nodes (forward)'),
1506 b'look up all nodes (forward)'),
1507 (lambda: resolvenodes(allnodes, 2),
1507 (lambda: resolvenodes(allnodes, 2),
1508 b'look up all nodes 2x (forward)'),
1508 b'look up all nodes 2x (forward)'),
1509 (lambda: resolvenodes(allnodesrev),
1509 (lambda: resolvenodes(allnodesrev),
1510 b'look up all nodes (reverse)'),
1510 b'look up all nodes (reverse)'),
1511 (lambda: resolvenodes(allnodesrev, 2),
1511 (lambda: resolvenodes(allnodesrev, 2),
1512 b'look up all nodes 2x (reverse)'),
1512 b'look up all nodes 2x (reverse)'),
1513 (lambda: getentries(allrevs),
1513 (lambda: getentries(allrevs),
1514 b'retrieve all index entries (forward)'),
1514 b'retrieve all index entries (forward)'),
1515 (lambda: getentries(allrevs, 2),
1515 (lambda: getentries(allrevs, 2),
1516 b'retrieve all index entries 2x (forward)'),
1516 b'retrieve all index entries 2x (forward)'),
1517 (lambda: getentries(allrevsrev),
1517 (lambda: getentries(allrevsrev),
1518 b'retrieve all index entries (reverse)'),
1518 b'retrieve all index entries (reverse)'),
1519 (lambda: getentries(allrevsrev, 2),
1519 (lambda: getentries(allrevsrev, 2),
1520 b'retrieve all index entries 2x (reverse)'),
1520 b'retrieve all index entries 2x (reverse)'),
1521 ]
1521 ]
1522
1522
1523 for fn, title in benches:
1523 for fn, title in benches:
1524 timer, fm = gettimer(ui, opts)
1524 timer, fm = gettimer(ui, opts)
1525 timer(fn, title=title)
1525 timer(fn, title=title)
1526 fm.end()
1526 fm.end()
1527
1527
1528 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1528 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1529 [(b'd', b'dist', 100, b'distance between the revisions'),
1529 [(b'd', b'dist', 100, b'distance between the revisions'),
1530 (b's', b'startrev', 0, b'revision to start reading at'),
1530 (b's', b'startrev', 0, b'revision to start reading at'),
1531 (b'', b'reverse', False, b'read in reverse')],
1531 (b'', b'reverse', False, b'read in reverse')],
1532 b'-c|-m|FILE')
1532 b'-c|-m|FILE')
1533 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1533 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1534 **opts):
1534 **opts):
1535 """Benchmark reading a series of revisions from a revlog.
1535 """Benchmark reading a series of revisions from a revlog.
1536
1536
1537 By default, we read every ``-d/--dist`` revision from 0 to tip of
1537 By default, we read every ``-d/--dist`` revision from 0 to tip of
1538 the specified revlog.
1538 the specified revlog.
1539
1539
1540 The start revision can be defined via ``-s/--startrev``.
1540 The start revision can be defined via ``-s/--startrev``.
1541 """
1541 """
1542 opts = _byteskwargs(opts)
1542 opts = _byteskwargs(opts)
1543
1543
1544 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1544 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1545 rllen = getlen(ui)(rl)
1545 rllen = getlen(ui)(rl)
1546
1546
1547 if startrev < 0:
1547 if startrev < 0:
1548 startrev = rllen + startrev
1548 startrev = rllen + startrev
1549
1549
1550 def d():
1550 def d():
1551 rl.clearcaches()
1551 rl.clearcaches()
1552
1552
1553 beginrev = startrev
1553 beginrev = startrev
1554 endrev = rllen
1554 endrev = rllen
1555 dist = opts[b'dist']
1555 dist = opts[b'dist']
1556
1556
1557 if reverse:
1557 if reverse:
1558 beginrev, endrev = endrev - 1, beginrev - 1
1558 beginrev, endrev = endrev - 1, beginrev - 1
1559 dist = -1 * dist
1559 dist = -1 * dist
1560
1560
1561 for x in _xrange(beginrev, endrev, dist):
1561 for x in _xrange(beginrev, endrev, dist):
1562 # Old revisions don't support passing int.
1562 # Old revisions don't support passing int.
1563 n = rl.node(x)
1563 n = rl.node(x)
1564 rl.revision(n)
1564 rl.revision(n)
1565
1565
1566 timer, fm = gettimer(ui, opts)
1566 timer, fm = gettimer(ui, opts)
1567 timer(d)
1567 timer(d)
1568 fm.end()
1568 fm.end()
1569
1569
1570 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1570 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1571 [(b's', b'startrev', 1000, b'revision to start writing at'),
1571 [(b's', b'startrev', 1000, b'revision to start writing at'),
1572 (b'', b'stoprev', -1, b'last revision to write'),
1572 (b'', b'stoprev', -1, b'last revision to write'),
1573 (b'', b'count', 3, b'last revision to write'),
1573 (b'', b'count', 3, b'last revision to write'),
1574 (b'', b'details', False, b'print timing for every revisions tested'),
1574 (b'', b'details', False, b'print timing for every revisions tested'),
1575 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1575 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1576 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1576 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1577 ],
1577 ],
1578 b'-c|-m|FILE')
1578 b'-c|-m|FILE')
1579 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1579 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1580 """Benchmark writing a series of revisions to a revlog.
1580 """Benchmark writing a series of revisions to a revlog.
1581
1581
1582 Possible source values are:
1582 Possible source values are:
1583 * `full`: add from a full text (default).
1583 * `full`: add from a full text (default).
1584 * `parent-1`: add from a delta to the first parent
1584 * `parent-1`: add from a delta to the first parent
1585 * `parent-2`: add from a delta to the second parent if it exists
1585 * `parent-2`: add from a delta to the second parent if it exists
1586 (use a delta from the first parent otherwise)
1586 (use a delta from the first parent otherwise)
1587 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1587 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1588 * `storage`: add from the existing precomputed deltas
1588 * `storage`: add from the existing precomputed deltas
1589 """
1589 """
1590 opts = _byteskwargs(opts)
1590 opts = _byteskwargs(opts)
1591
1591
1592 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1592 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1593 rllen = getlen(ui)(rl)
1593 rllen = getlen(ui)(rl)
1594 if startrev < 0:
1594 if startrev < 0:
1595 startrev = rllen + startrev
1595 startrev = rllen + startrev
1596 if stoprev < 0:
1596 if stoprev < 0:
1597 stoprev = rllen + stoprev
1597 stoprev = rllen + stoprev
1598
1598
1599 lazydeltabase = opts['lazydeltabase']
1599 lazydeltabase = opts['lazydeltabase']
1600 source = opts['source']
1600 source = opts['source']
1601 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1601 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1602 b'storage')
1602 b'storage')
1603 if source not in validsource:
1603 if source not in validsource:
1604 raise error.Abort('invalid source type: %s' % source)
1604 raise error.Abort('invalid source type: %s' % source)
1605
1605
1606 ### actually gather results
1606 ### actually gather results
1607 count = opts['count']
1607 count = opts['count']
1608 if count <= 0:
1608 if count <= 0:
1609 raise error.Abort('invalide run count: %d' % count)
1609 raise error.Abort('invalide run count: %d' % count)
1610 allresults = []
1610 allresults = []
1611 for c in range(count):
1611 for c in range(count):
1612 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1612 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1613 lazydeltabase=lazydeltabase)
1613 lazydeltabase=lazydeltabase)
1614 allresults.append(timing)
1614 allresults.append(timing)
1615
1615
1616 ### consolidate the results in a single list
1616 ### consolidate the results in a single list
1617 results = []
1617 results = []
1618 for idx, (rev, t) in enumerate(allresults[0]):
1618 for idx, (rev, t) in enumerate(allresults[0]):
1619 ts = [t]
1619 ts = [t]
1620 for other in allresults[1:]:
1620 for other in allresults[1:]:
1621 orev, ot = other[idx]
1621 orev, ot = other[idx]
1622 assert orev == rev
1622 assert orev == rev
1623 ts.append(ot)
1623 ts.append(ot)
1624 results.append((rev, ts))
1624 results.append((rev, ts))
1625 resultcount = len(results)
1625 resultcount = len(results)
1626
1626
1627 ### Compute and display relevant statistics
1627 ### Compute and display relevant statistics
1628
1628
1629 # get a formatter
1629 # get a formatter
1630 fm = ui.formatter(b'perf', opts)
1630 fm = ui.formatter(b'perf', opts)
1631 displayall = ui.configbool(b"perf", b"all-timing", False)
1631 displayall = ui.configbool(b"perf", b"all-timing", False)
1632
1632
1633 # print individual details if requested
1633 # print individual details if requested
1634 if opts['details']:
1634 if opts['details']:
1635 for idx, item in enumerate(results, 1):
1635 for idx, item in enumerate(results, 1):
1636 rev, data = item
1636 rev, data = item
1637 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1637 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1638 formatone(fm, data, title=title, displayall=displayall)
1638 formatone(fm, data, title=title, displayall=displayall)
1639
1639
1640 # sorts results by median time
1640 # sorts results by median time
1641 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1641 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1642 # list of (name, index) to display)
1642 # list of (name, index) to display)
1643 relevants = [
1643 relevants = [
1644 ("min", 0),
1644 ("min", 0),
1645 ("10%", resultcount * 10 // 100),
1645 ("10%", resultcount * 10 // 100),
1646 ("25%", resultcount * 25 // 100),
1646 ("25%", resultcount * 25 // 100),
1647 ("50%", resultcount * 70 // 100),
1647 ("50%", resultcount * 70 // 100),
1648 ("75%", resultcount * 75 // 100),
1648 ("75%", resultcount * 75 // 100),
1649 ("90%", resultcount * 90 // 100),
1649 ("90%", resultcount * 90 // 100),
1650 ("95%", resultcount * 95 // 100),
1650 ("95%", resultcount * 95 // 100),
1651 ("99%", resultcount * 99 // 100),
1651 ("99%", resultcount * 99 // 100),
1652 ("max", -1),
1652 ("max", -1),
1653 ]
1653 ]
1654 if not ui.quiet:
1654 if not ui.quiet:
1655 for name, idx in relevants:
1655 for name, idx in relevants:
1656 data = results[idx]
1656 data = results[idx]
1657 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1657 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1658 formatone(fm, data[1], title=title, displayall=displayall)
1658 formatone(fm, data[1], title=title, displayall=displayall)
1659
1659
1660 # XXX summing that many float will not be very precise, we ignore this fact
1660 # XXX summing that many float will not be very precise, we ignore this fact
1661 # for now
1661 # for now
1662 totaltime = []
1662 totaltime = []
1663 for item in allresults:
1663 for item in allresults:
1664 totaltime.append((sum(x[1][0] for x in item),
1664 totaltime.append((sum(x[1][0] for x in item),
1665 sum(x[1][1] for x in item),
1665 sum(x[1][1] for x in item),
1666 sum(x[1][2] for x in item),)
1666 sum(x[1][2] for x in item),)
1667 )
1667 )
1668 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1668 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1669 displayall=displayall)
1669 displayall=displayall)
1670 fm.end()
1670 fm.end()
1671
1671
1672 class _faketr(object):
1672 class _faketr(object):
1673 def add(s, x, y, z=None):
1673 def add(s, x, y, z=None):
1674 return None
1674 return None
1675
1675
1676 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1676 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1677 lazydeltabase=True):
1677 lazydeltabase=True):
1678 timings = []
1678 timings = []
1679 tr = _faketr()
1679 tr = _faketr()
1680 with _temprevlog(ui, orig, startrev) as dest:
1680 with _temprevlog(ui, orig, startrev) as dest:
1681 dest._lazydeltabase = lazydeltabase
1681 dest._lazydeltabase = lazydeltabase
1682 revs = list(orig.revs(startrev, stoprev))
1682 revs = list(orig.revs(startrev, stoprev))
1683 total = len(revs)
1683 total = len(revs)
1684 topic = 'adding'
1684 topic = 'adding'
1685 if runidx is not None:
1685 if runidx is not None:
1686 topic += ' (run #%d)' % runidx
1686 topic += ' (run #%d)' % runidx
1687 for idx, rev in enumerate(revs):
1687 for idx, rev in enumerate(revs):
1688 ui.progress(topic, idx, unit='revs', total=total)
1688 ui.progress(topic, idx, unit='revs', total=total)
1689 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1689 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1690 with timeone() as r:
1690 with timeone() as r:
1691 dest.addrawrevision(*addargs, **addkwargs)
1691 dest.addrawrevision(*addargs, **addkwargs)
1692 timings.append((rev, r[0]))
1692 timings.append((rev, r[0]))
1693 ui.progress(topic, total, unit='revs', total=total)
1693 ui.progress(topic, total, unit='revs', total=total)
1694 ui.progress(topic, None, unit='revs', total=total)
1694 ui.progress(topic, None, unit='revs', total=total)
1695 return timings
1695 return timings
1696
1696
1697 def _getrevisionseed(orig, rev, tr, source):
1697 def _getrevisionseed(orig, rev, tr, source):
1698 from mercurial.node import nullid
1698 from mercurial.node import nullid
1699
1699
1700 linkrev = orig.linkrev(rev)
1700 linkrev = orig.linkrev(rev)
1701 node = orig.node(rev)
1701 node = orig.node(rev)
1702 p1, p2 = orig.parents(node)
1702 p1, p2 = orig.parents(node)
1703 flags = orig.flags(rev)
1703 flags = orig.flags(rev)
1704 cachedelta = None
1704 cachedelta = None
1705 text = None
1705 text = None
1706
1706
1707 if source == b'full':
1707 if source == b'full':
1708 text = orig.revision(rev)
1708 text = orig.revision(rev)
1709 elif source == b'parent-1':
1709 elif source == b'parent-1':
1710 baserev = orig.rev(p1)
1710 baserev = orig.rev(p1)
1711 cachedelta = (baserev, orig.revdiff(p1, rev))
1711 cachedelta = (baserev, orig.revdiff(p1, rev))
1712 elif source == b'parent-2':
1712 elif source == b'parent-2':
1713 parent = p2
1713 parent = p2
1714 if p2 == nullid:
1714 if p2 == nullid:
1715 parent = p1
1715 parent = p1
1716 baserev = orig.rev(parent)
1716 baserev = orig.rev(parent)
1717 cachedelta = (baserev, orig.revdiff(parent, rev))
1717 cachedelta = (baserev, orig.revdiff(parent, rev))
1718 elif source == b'parent-smallest':
1718 elif source == b'parent-smallest':
1719 p1diff = orig.revdiff(p1, rev)
1719 p1diff = orig.revdiff(p1, rev)
1720 parent = p1
1720 parent = p1
1721 diff = p1diff
1721 diff = p1diff
1722 if p2 != nullid:
1722 if p2 != nullid:
1723 p2diff = orig.revdiff(p2, rev)
1723 p2diff = orig.revdiff(p2, rev)
1724 if len(p1diff) > len(p2diff):
1724 if len(p1diff) > len(p2diff):
1725 parent = p2
1725 parent = p2
1726 diff = p2diff
1726 diff = p2diff
1727 baserev = orig.rev(parent)
1727 baserev = orig.rev(parent)
1728 cachedelta = (baserev, diff)
1728 cachedelta = (baserev, diff)
1729 elif source == b'storage':
1729 elif source == b'storage':
1730 baserev = orig.deltaparent(rev)
1730 baserev = orig.deltaparent(rev)
1731 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1731 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1732
1732
1733 return ((text, tr, linkrev, p1, p2),
1733 return ((text, tr, linkrev, p1, p2),
1734 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1734 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1735
1735
1736 @contextlib.contextmanager
1736 @contextlib.contextmanager
1737 def _temprevlog(ui, orig, truncaterev):
1737 def _temprevlog(ui, orig, truncaterev):
1738 from mercurial import vfs as vfsmod
1738 from mercurial import vfs as vfsmod
1739
1739
1740 if orig._inline:
1740 if orig._inline:
1741 raise error.Abort('not supporting inline revlog (yet)')
1741 raise error.Abort('not supporting inline revlog (yet)')
1742
1742
1743 origindexpath = orig.opener.join(orig.indexfile)
1743 origindexpath = orig.opener.join(orig.indexfile)
1744 origdatapath = orig.opener.join(orig.datafile)
1744 origdatapath = orig.opener.join(orig.datafile)
1745 indexname = 'revlog.i'
1745 indexname = 'revlog.i'
1746 dataname = 'revlog.d'
1746 dataname = 'revlog.d'
1747
1747
1748 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1748 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1749 try:
1749 try:
1750 # copy the data file in a temporary directory
1750 # copy the data file in a temporary directory
1751 ui.debug('copying data in %s\n' % tmpdir)
1751 ui.debug('copying data in %s\n' % tmpdir)
1752 destindexpath = os.path.join(tmpdir, 'revlog.i')
1752 destindexpath = os.path.join(tmpdir, 'revlog.i')
1753 destdatapath = os.path.join(tmpdir, 'revlog.d')
1753 destdatapath = os.path.join(tmpdir, 'revlog.d')
1754 shutil.copyfile(origindexpath, destindexpath)
1754 shutil.copyfile(origindexpath, destindexpath)
1755 shutil.copyfile(origdatapath, destdatapath)
1755 shutil.copyfile(origdatapath, destdatapath)
1756
1756
1757 # remove the data we want to add again
1757 # remove the data we want to add again
1758 ui.debug('truncating data to be rewritten\n')
1758 ui.debug('truncating data to be rewritten\n')
1759 with open(destindexpath, 'ab') as index:
1759 with open(destindexpath, 'ab') as index:
1760 index.seek(0)
1760 index.seek(0)
1761 index.truncate(truncaterev * orig._io.size)
1761 index.truncate(truncaterev * orig._io.size)
1762 with open(destdatapath, 'ab') as data:
1762 with open(destdatapath, 'ab') as data:
1763 data.seek(0)
1763 data.seek(0)
1764 data.truncate(orig.start(truncaterev))
1764 data.truncate(orig.start(truncaterev))
1765
1765
1766 # instantiate a new revlog from the temporary copy
1766 # instantiate a new revlog from the temporary copy
1767 ui.debug('truncating adding to be rewritten\n')
1767 ui.debug('truncating adding to be rewritten\n')
1768 vfs = vfsmod.vfs(tmpdir)
1768 vfs = vfsmod.vfs(tmpdir)
1769 vfs.options = getattr(orig.opener, 'options', None)
1769 vfs.options = getattr(orig.opener, 'options', None)
1770
1770
1771 dest = revlog.revlog(vfs,
1771 dest = revlog.revlog(vfs,
1772 indexfile=indexname,
1772 indexfile=indexname,
1773 datafile=dataname)
1773 datafile=dataname)
1774 if dest._inline:
1774 if dest._inline:
1775 raise error.Abort('not supporting inline revlog (yet)')
1775 raise error.Abort('not supporting inline revlog (yet)')
1776 # make sure internals are initialized
1776 # make sure internals are initialized
1777 dest.revision(len(dest) - 1)
1777 dest.revision(len(dest) - 1)
1778 yield dest
1778 yield dest
1779 del dest, vfs
1779 del dest, vfs
1780 finally:
1780 finally:
1781 shutil.rmtree(tmpdir, True)
1781 shutil.rmtree(tmpdir, True)
1782
1782
1783 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1783 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1784 [(b'e', b'engines', b'', b'compression engines to use'),
1784 [(b'e', b'engines', b'', b'compression engines to use'),
1785 (b's', b'startrev', 0, b'revision to start at')],
1785 (b's', b'startrev', 0, b'revision to start at')],
1786 b'-c|-m|FILE')
1786 b'-c|-m|FILE')
1787 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1787 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1788 """Benchmark operations on revlog chunks.
1788 """Benchmark operations on revlog chunks.
1789
1789
1790 Logically, each revlog is a collection of fulltext revisions. However,
1790 Logically, each revlog is a collection of fulltext revisions. However,
1791 stored within each revlog are "chunks" of possibly compressed data. This
1791 stored within each revlog are "chunks" of possibly compressed data. This
1792 data needs to be read and decompressed or compressed and written.
1792 data needs to be read and decompressed or compressed and written.
1793
1793
1794 This command measures the time it takes to read+decompress and recompress
1794 This command measures the time it takes to read+decompress and recompress
1795 chunks in a revlog. It effectively isolates I/O and compression performance.
1795 chunks in a revlog. It effectively isolates I/O and compression performance.
1796 For measurements of higher-level operations like resolving revisions,
1796 For measurements of higher-level operations like resolving revisions,
1797 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1797 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1798 """
1798 """
1799 opts = _byteskwargs(opts)
1799 opts = _byteskwargs(opts)
1800
1800
1801 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1801 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1802
1802
1803 # _chunkraw was renamed to _getsegmentforrevs.
1803 # _chunkraw was renamed to _getsegmentforrevs.
1804 try:
1804 try:
1805 segmentforrevs = rl._getsegmentforrevs
1805 segmentforrevs = rl._getsegmentforrevs
1806 except AttributeError:
1806 except AttributeError:
1807 segmentforrevs = rl._chunkraw
1807 segmentforrevs = rl._chunkraw
1808
1808
1809 # Verify engines argument.
1809 # Verify engines argument.
1810 if engines:
1810 if engines:
1811 engines = set(e.strip() for e in engines.split(b','))
1811 engines = set(e.strip() for e in engines.split(b','))
1812 for engine in engines:
1812 for engine in engines:
1813 try:
1813 try:
1814 util.compressionengines[engine]
1814 util.compressionengines[engine]
1815 except KeyError:
1815 except KeyError:
1816 raise error.Abort(b'unknown compression engine: %s' % engine)
1816 raise error.Abort(b'unknown compression engine: %s' % engine)
1817 else:
1817 else:
1818 engines = []
1818 engines = []
1819 for e in util.compengines:
1819 for e in util.compengines:
1820 engine = util.compengines[e]
1820 engine = util.compengines[e]
1821 try:
1821 try:
1822 if engine.available():
1822 if engine.available():
1823 engine.revlogcompressor().compress(b'dummy')
1823 engine.revlogcompressor().compress(b'dummy')
1824 engines.append(e)
1824 engines.append(e)
1825 except NotImplementedError:
1825 except NotImplementedError:
1826 pass
1826 pass
1827
1827
1828 revs = list(rl.revs(startrev, len(rl) - 1))
1828 revs = list(rl.revs(startrev, len(rl) - 1))
1829
1829
1830 def rlfh(rl):
1830 def rlfh(rl):
1831 if rl._inline:
1831 if rl._inline:
1832 return getsvfs(repo)(rl.indexfile)
1832 return getsvfs(repo)(rl.indexfile)
1833 else:
1833 else:
1834 return getsvfs(repo)(rl.datafile)
1834 return getsvfs(repo)(rl.datafile)
1835
1835
1836 def doread():
1836 def doread():
1837 rl.clearcaches()
1837 rl.clearcaches()
1838 for rev in revs:
1838 for rev in revs:
1839 segmentforrevs(rev, rev)
1839 segmentforrevs(rev, rev)
1840
1840
1841 def doreadcachedfh():
1841 def doreadcachedfh():
1842 rl.clearcaches()
1842 rl.clearcaches()
1843 fh = rlfh(rl)
1843 fh = rlfh(rl)
1844 for rev in revs:
1844 for rev in revs:
1845 segmentforrevs(rev, rev, df=fh)
1845 segmentforrevs(rev, rev, df=fh)
1846
1846
1847 def doreadbatch():
1847 def doreadbatch():
1848 rl.clearcaches()
1848 rl.clearcaches()
1849 segmentforrevs(revs[0], revs[-1])
1849 segmentforrevs(revs[0], revs[-1])
1850
1850
1851 def doreadbatchcachedfh():
1851 def doreadbatchcachedfh():
1852 rl.clearcaches()
1852 rl.clearcaches()
1853 fh = rlfh(rl)
1853 fh = rlfh(rl)
1854 segmentforrevs(revs[0], revs[-1], df=fh)
1854 segmentforrevs(revs[0], revs[-1], df=fh)
1855
1855
1856 def dochunk():
1856 def dochunk():
1857 rl.clearcaches()
1857 rl.clearcaches()
1858 fh = rlfh(rl)
1858 fh = rlfh(rl)
1859 for rev in revs:
1859 for rev in revs:
1860 rl._chunk(rev, df=fh)
1860 rl._chunk(rev, df=fh)
1861
1861
1862 chunks = [None]
1862 chunks = [None]
1863
1863
1864 def dochunkbatch():
1864 def dochunkbatch():
1865 rl.clearcaches()
1865 rl.clearcaches()
1866 fh = rlfh(rl)
1866 fh = rlfh(rl)
1867 # Save chunks as a side-effect.
1867 # Save chunks as a side-effect.
1868 chunks[0] = rl._chunks(revs, df=fh)
1868 chunks[0] = rl._chunks(revs, df=fh)
1869
1869
1870 def docompress(compressor):
1870 def docompress(compressor):
1871 rl.clearcaches()
1871 rl.clearcaches()
1872
1872
1873 try:
1873 try:
1874 # Swap in the requested compression engine.
1874 # Swap in the requested compression engine.
1875 oldcompressor = rl._compressor
1875 oldcompressor = rl._compressor
1876 rl._compressor = compressor
1876 rl._compressor = compressor
1877 for chunk in chunks[0]:
1877 for chunk in chunks[0]:
1878 rl.compress(chunk)
1878 rl.compress(chunk)
1879 finally:
1879 finally:
1880 rl._compressor = oldcompressor
1880 rl._compressor = oldcompressor
1881
1881
1882 benches = [
1882 benches = [
1883 (lambda: doread(), b'read'),
1883 (lambda: doread(), b'read'),
1884 (lambda: doreadcachedfh(), b'read w/ reused fd'),
1884 (lambda: doreadcachedfh(), b'read w/ reused fd'),
1885 (lambda: doreadbatch(), b'read batch'),
1885 (lambda: doreadbatch(), b'read batch'),
1886 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
1886 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
1887 (lambda: dochunk(), b'chunk'),
1887 (lambda: dochunk(), b'chunk'),
1888 (lambda: dochunkbatch(), b'chunk batch'),
1888 (lambda: dochunkbatch(), b'chunk batch'),
1889 ]
1889 ]
1890
1890
1891 for engine in sorted(engines):
1891 for engine in sorted(engines):
1892 compressor = util.compengines[engine].revlogcompressor()
1892 compressor = util.compengines[engine].revlogcompressor()
1893 benches.append((functools.partial(docompress, compressor),
1893 benches.append((functools.partial(docompress, compressor),
1894 b'compress w/ %s' % engine))
1894 b'compress w/ %s' % engine))
1895
1895
1896 for fn, title in benches:
1896 for fn, title in benches:
1897 timer, fm = gettimer(ui, opts)
1897 timer, fm = gettimer(ui, opts)
1898 timer(fn, title=title)
1898 timer(fn, title=title)
1899 fm.end()
1899 fm.end()
1900
1900
1901 @command(b'perfrevlogrevision', revlogopts + formatteropts +
1901 @command(b'perfrevlogrevision', revlogopts + formatteropts +
1902 [(b'', b'cache', False, b'use caches instead of clearing')],
1902 [(b'', b'cache', False, b'use caches instead of clearing')],
1903 b'-c|-m|FILE REV')
1903 b'-c|-m|FILE REV')
1904 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1904 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1905 """Benchmark obtaining a revlog revision.
1905 """Benchmark obtaining a revlog revision.
1906
1906
1907 Obtaining a revlog revision consists of roughly the following steps:
1907 Obtaining a revlog revision consists of roughly the following steps:
1908
1908
1909 1. Compute the delta chain
1909 1. Compute the delta chain
1910 2. Slice the delta chain if applicable
1910 2. Slice the delta chain if applicable
1911 3. Obtain the raw chunks for that delta chain
1911 3. Obtain the raw chunks for that delta chain
1912 4. Decompress each raw chunk
1912 4. Decompress each raw chunk
1913 5. Apply binary patches to obtain fulltext
1913 5. Apply binary patches to obtain fulltext
1914 6. Verify hash of fulltext
1914 6. Verify hash of fulltext
1915
1915
1916 This command measures the time spent in each of these phases.
1916 This command measures the time spent in each of these phases.
1917 """
1917 """
1918 opts = _byteskwargs(opts)
1918 opts = _byteskwargs(opts)
1919
1919
1920 if opts.get(b'changelog') or opts.get(b'manifest'):
1920 if opts.get(b'changelog') or opts.get(b'manifest'):
1921 file_, rev = None, file_
1921 file_, rev = None, file_
1922 elif rev is None:
1922 elif rev is None:
1923 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
1923 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
1924
1924
1925 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
1925 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
1926
1926
1927 # _chunkraw was renamed to _getsegmentforrevs.
1927 # _chunkraw was renamed to _getsegmentforrevs.
1928 try:
1928 try:
1929 segmentforrevs = r._getsegmentforrevs
1929 segmentforrevs = r._getsegmentforrevs
1930 except AttributeError:
1930 except AttributeError:
1931 segmentforrevs = r._chunkraw
1931 segmentforrevs = r._chunkraw
1932
1932
1933 node = r.lookup(rev)
1933 node = r.lookup(rev)
1934 rev = r.rev(node)
1934 rev = r.rev(node)
1935
1935
1936 def getrawchunks(data, chain):
1936 def getrawchunks(data, chain):
1937 start = r.start
1937 start = r.start
1938 length = r.length
1938 length = r.length
1939 inline = r._inline
1939 inline = r._inline
1940 iosize = r._io.size
1940 iosize = r._io.size
1941 buffer = util.buffer
1941 buffer = util.buffer
1942
1942
1943 chunks = []
1943 chunks = []
1944 ladd = chunks.append
1944 ladd = chunks.append
1945 for idx, item in enumerate(chain):
1945 for idx, item in enumerate(chain):
1946 offset = start(item[0])
1946 offset = start(item[0])
1947 bits = data[idx]
1947 bits = data[idx]
1948 for rev in item:
1948 for rev in item:
1949 chunkstart = start(rev)
1949 chunkstart = start(rev)
1950 if inline:
1950 if inline:
1951 chunkstart += (rev + 1) * iosize
1951 chunkstart += (rev + 1) * iosize
1952 chunklength = length(rev)
1952 chunklength = length(rev)
1953 ladd(buffer(bits, chunkstart - offset, chunklength))
1953 ladd(buffer(bits, chunkstart - offset, chunklength))
1954
1954
1955 return chunks
1955 return chunks
1956
1956
1957 def dodeltachain(rev):
1957 def dodeltachain(rev):
1958 if not cache:
1958 if not cache:
1959 r.clearcaches()
1959 r.clearcaches()
1960 r._deltachain(rev)
1960 r._deltachain(rev)
1961
1961
1962 def doread(chain):
1962 def doread(chain):
1963 if not cache:
1963 if not cache:
1964 r.clearcaches()
1964 r.clearcaches()
1965 for item in slicedchain:
1965 for item in slicedchain:
1966 segmentforrevs(item[0], item[-1])
1966 segmentforrevs(item[0], item[-1])
1967
1967
1968 def doslice(r, chain, size):
1968 def doslice(r, chain, size):
1969 for s in slicechunk(r, chain, targetsize=size):
1969 for s in slicechunk(r, chain, targetsize=size):
1970 pass
1970 pass
1971
1971
1972 def dorawchunks(data, chain):
1972 def dorawchunks(data, chain):
1973 if not cache:
1973 if not cache:
1974 r.clearcaches()
1974 r.clearcaches()
1975 getrawchunks(data, chain)
1975 getrawchunks(data, chain)
1976
1976
1977 def dodecompress(chunks):
1977 def dodecompress(chunks):
1978 decomp = r.decompress
1978 decomp = r.decompress
1979 for chunk in chunks:
1979 for chunk in chunks:
1980 decomp(chunk)
1980 decomp(chunk)
1981
1981
1982 def dopatch(text, bins):
1982 def dopatch(text, bins):
1983 if not cache:
1983 if not cache:
1984 r.clearcaches()
1984 r.clearcaches()
1985 mdiff.patches(text, bins)
1985 mdiff.patches(text, bins)
1986
1986
1987 def dohash(text):
1987 def dohash(text):
1988 if not cache:
1988 if not cache:
1989 r.clearcaches()
1989 r.clearcaches()
1990 r.checkhash(text, node, rev=rev)
1990 r.checkhash(text, node, rev=rev)
1991
1991
1992 def dorevision():
1992 def dorevision():
1993 if not cache:
1993 if not cache:
1994 r.clearcaches()
1994 r.clearcaches()
1995 r.revision(node)
1995 r.revision(node)
1996
1996
1997 try:
1997 try:
1998 from mercurial.revlogutils.deltas import slicechunk
1998 from mercurial.revlogutils.deltas import slicechunk
1999 except ImportError:
1999 except ImportError:
2000 slicechunk = getattr(revlog, '_slicechunk', None)
2000 slicechunk = getattr(revlog, '_slicechunk', None)
2001
2001
2002 size = r.length(rev)
2002 size = r.length(rev)
2003 chain = r._deltachain(rev)[0]
2003 chain = r._deltachain(rev)[0]
2004 if not getattr(r, '_withsparseread', False):
2004 if not getattr(r, '_withsparseread', False):
2005 slicedchain = (chain,)
2005 slicedchain = (chain,)
2006 else:
2006 else:
2007 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2007 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2008 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2008 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2009 rawchunks = getrawchunks(data, slicedchain)
2009 rawchunks = getrawchunks(data, slicedchain)
2010 bins = r._chunks(chain)
2010 bins = r._chunks(chain)
2011 text = bytes(bins[0])
2011 text = bytes(bins[0])
2012 bins = bins[1:]
2012 bins = bins[1:]
2013 text = mdiff.patches(text, bins)
2013 text = mdiff.patches(text, bins)
2014
2014
2015 benches = [
2015 benches = [
2016 (lambda: dorevision(), b'full'),
2016 (lambda: dorevision(), b'full'),
2017 (lambda: dodeltachain(rev), b'deltachain'),
2017 (lambda: dodeltachain(rev), b'deltachain'),
2018 (lambda: doread(chain), b'read'),
2018 (lambda: doread(chain), b'read'),
2019 ]
2019 ]
2020
2020
2021 if getattr(r, '_withsparseread', False):
2021 if getattr(r, '_withsparseread', False):
2022 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2022 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2023 benches.append(slicing)
2023 benches.append(slicing)
2024
2024
2025 benches.extend([
2025 benches.extend([
2026 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2026 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2027 (lambda: dodecompress(rawchunks), b'decompress'),
2027 (lambda: dodecompress(rawchunks), b'decompress'),
2028 (lambda: dopatch(text, bins), b'patch'),
2028 (lambda: dopatch(text, bins), b'patch'),
2029 (lambda: dohash(text), b'hash'),
2029 (lambda: dohash(text), b'hash'),
2030 ])
2030 ])
2031
2031
2032 timer, fm = gettimer(ui, opts)
2032 timer, fm = gettimer(ui, opts)
2033 for fn, title in benches:
2033 for fn, title in benches:
2034 timer(fn, title=title)
2034 timer(fn, title=title)
2035 fm.end()
2035 fm.end()
2036
2036
2037 @command(b'perfrevset',
2037 @command(b'perfrevset',
2038 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2038 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2039 (b'', b'contexts', False, b'obtain changectx for each revision')]
2039 (b'', b'contexts', False, b'obtain changectx for each revision')]
2040 + formatteropts, b"REVSET")
2040 + formatteropts, b"REVSET")
2041 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2041 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2042 """benchmark the execution time of a revset
2042 """benchmark the execution time of a revset
2043
2043
2044 Use the --clean option if need to evaluate the impact of build volatile
2044 Use the --clean option if need to evaluate the impact of build volatile
2045 revisions set cache on the revset execution. Volatile cache hold filtered
2045 revisions set cache on the revset execution. Volatile cache hold filtered
2046 and obsolete related cache."""
2046 and obsolete related cache."""
2047 opts = _byteskwargs(opts)
2047 opts = _byteskwargs(opts)
2048
2048
2049 timer, fm = gettimer(ui, opts)
2049 timer, fm = gettimer(ui, opts)
2050 def d():
2050 def d():
2051 if clear:
2051 if clear:
2052 repo.invalidatevolatilesets()
2052 repo.invalidatevolatilesets()
2053 if contexts:
2053 if contexts:
2054 for ctx in repo.set(expr): pass
2054 for ctx in repo.set(expr): pass
2055 else:
2055 else:
2056 for r in repo.revs(expr): pass
2056 for r in repo.revs(expr): pass
2057 timer(d)
2057 timer(d)
2058 fm.end()
2058 fm.end()
2059
2059
2060 @command(b'perfvolatilesets',
2060 @command(b'perfvolatilesets',
2061 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2061 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2062 ] + formatteropts)
2062 ] + formatteropts)
2063 def perfvolatilesets(ui, repo, *names, **opts):
2063 def perfvolatilesets(ui, repo, *names, **opts):
2064 """benchmark the computation of various volatile set
2064 """benchmark the computation of various volatile set
2065
2065
2066 Volatile set computes element related to filtering and obsolescence."""
2066 Volatile set computes element related to filtering and obsolescence."""
2067 opts = _byteskwargs(opts)
2067 opts = _byteskwargs(opts)
2068 timer, fm = gettimer(ui, opts)
2068 timer, fm = gettimer(ui, opts)
2069 repo = repo.unfiltered()
2069 repo = repo.unfiltered()
2070
2070
2071 def getobs(name):
2071 def getobs(name):
2072 def d():
2072 def d():
2073 repo.invalidatevolatilesets()
2073 repo.invalidatevolatilesets()
2074 if opts[b'clear_obsstore']:
2074 if opts[b'clear_obsstore']:
2075 clearfilecache(repo, b'obsstore')
2075 clearfilecache(repo, b'obsstore')
2076 obsolete.getrevs(repo, name)
2076 obsolete.getrevs(repo, name)
2077 return d
2077 return d
2078
2078
2079 allobs = sorted(obsolete.cachefuncs)
2079 allobs = sorted(obsolete.cachefuncs)
2080 if names:
2080 if names:
2081 allobs = [n for n in allobs if n in names]
2081 allobs = [n for n in allobs if n in names]
2082
2082
2083 for name in allobs:
2083 for name in allobs:
2084 timer(getobs(name), title=name)
2084 timer(getobs(name), title=name)
2085
2085
2086 def getfiltered(name):
2086 def getfiltered(name):
2087 def d():
2087 def d():
2088 repo.invalidatevolatilesets()
2088 repo.invalidatevolatilesets()
2089 if opts[b'clear_obsstore']:
2089 if opts[b'clear_obsstore']:
2090 clearfilecache(repo, b'obsstore')
2090 clearfilecache(repo, b'obsstore')
2091 repoview.filterrevs(repo, name)
2091 repoview.filterrevs(repo, name)
2092 return d
2092 return d
2093
2093
2094 allfilter = sorted(repoview.filtertable)
2094 allfilter = sorted(repoview.filtertable)
2095 if names:
2095 if names:
2096 allfilter = [n for n in allfilter if n in names]
2096 allfilter = [n for n in allfilter if n in names]
2097
2097
2098 for name in allfilter:
2098 for name in allfilter:
2099 timer(getfiltered(name), title=name)
2099 timer(getfiltered(name), title=name)
2100 fm.end()
2100 fm.end()
2101
2101
2102 @command(b'perfbranchmap',
2102 @command(b'perfbranchmap',
2103 [(b'f', b'full', False,
2103 [(b'f', b'full', False,
2104 b'Includes build time of subset'),
2104 b'Includes build time of subset'),
2105 (b'', b'clear-revbranch', False,
2105 (b'', b'clear-revbranch', False,
2106 b'purge the revbranch cache between computation'),
2106 b'purge the revbranch cache between computation'),
2107 ] + formatteropts)
2107 ] + formatteropts)
2108 def perfbranchmap(ui, repo, *filternames, **opts):
2108 def perfbranchmap(ui, repo, *filternames, **opts):
2109 """benchmark the update of a branchmap
2109 """benchmark the update of a branchmap
2110
2110
2111 This benchmarks the full repo.branchmap() call with read and write disabled
2111 This benchmarks the full repo.branchmap() call with read and write disabled
2112 """
2112 """
2113 opts = _byteskwargs(opts)
2113 opts = _byteskwargs(opts)
2114 full = opts.get(b"full", False)
2114 full = opts.get(b"full", False)
2115 clear_revbranch = opts.get(b"clear_revbranch", False)
2115 clear_revbranch = opts.get(b"clear_revbranch", False)
2116 timer, fm = gettimer(ui, opts)
2116 timer, fm = gettimer(ui, opts)
2117 def getbranchmap(filtername):
2117 def getbranchmap(filtername):
2118 """generate a benchmark function for the filtername"""
2118 """generate a benchmark function for the filtername"""
2119 if filtername is None:
2119 if filtername is None:
2120 view = repo
2120 view = repo
2121 else:
2121 else:
2122 view = repo.filtered(filtername)
2122 view = repo.filtered(filtername)
2123 def d():
2123 def d():
2124 if clear_revbranch:
2124 if clear_revbranch:
2125 repo.revbranchcache()._clear()
2125 repo.revbranchcache()._clear()
2126 if full:
2126 if full:
2127 view._branchcaches.clear()
2127 view._branchcaches.clear()
2128 else:
2128 else:
2129 view._branchcaches.pop(filtername, None)
2129 view._branchcaches.pop(filtername, None)
2130 view.branchmap()
2130 view.branchmap()
2131 return d
2131 return d
2132 # add filter in smaller subset to bigger subset
2132 # add filter in smaller subset to bigger subset
2133 possiblefilters = set(repoview.filtertable)
2133 possiblefilters = set(repoview.filtertable)
2134 if filternames:
2134 if filternames:
2135 possiblefilters &= set(filternames)
2135 possiblefilters &= set(filternames)
2136 subsettable = getbranchmapsubsettable()
2136 subsettable = getbranchmapsubsettable()
2137 allfilters = []
2137 allfilters = []
2138 while possiblefilters:
2138 while possiblefilters:
2139 for name in possiblefilters:
2139 for name in possiblefilters:
2140 subset = subsettable.get(name)
2140 subset = subsettable.get(name)
2141 if subset not in possiblefilters:
2141 if subset not in possiblefilters:
2142 break
2142 break
2143 else:
2143 else:
2144 assert False, b'subset cycle %s!' % possiblefilters
2144 assert False, b'subset cycle %s!' % possiblefilters
2145 allfilters.append(name)
2145 allfilters.append(name)
2146 possiblefilters.remove(name)
2146 possiblefilters.remove(name)
2147
2147
2148 # warm the cache
2148 # warm the cache
2149 if not full:
2149 if not full:
2150 for name in allfilters:
2150 for name in allfilters:
2151 repo.filtered(name).branchmap()
2151 repo.filtered(name).branchmap()
2152 if not filternames or b'unfiltered' in filternames:
2152 if not filternames or b'unfiltered' in filternames:
2153 # add unfiltered
2153 # add unfiltered
2154 allfilters.append(None)
2154 allfilters.append(None)
2155
2155
2156 branchcacheread = safeattrsetter(branchmap, b'read')
2156 branchcacheread = safeattrsetter(branchmap, b'read')
2157 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2157 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2158 branchcacheread.set(lambda repo: None)
2158 branchcacheread.set(lambda repo: None)
2159 branchcachewrite.set(lambda bc, repo: None)
2159 branchcachewrite.set(lambda bc, repo: None)
2160 try:
2160 try:
2161 for name in allfilters:
2161 for name in allfilters:
2162 printname = name
2162 printname = name
2163 if name is None:
2163 if name is None:
2164 printname = b'unfiltered'
2164 printname = b'unfiltered'
2165 timer(getbranchmap(name), title=str(printname))
2165 timer(getbranchmap(name), title=str(printname))
2166 finally:
2166 finally:
2167 branchcacheread.restore()
2167 branchcacheread.restore()
2168 branchcachewrite.restore()
2168 branchcachewrite.restore()
2169 fm.end()
2169 fm.end()
2170
2170
2171 @command(b'perfbranchmapload', [
2171 @command(b'perfbranchmapload', [
2172 (b'f', b'filter', b'', b'Specify repoview filter'),
2172 (b'f', b'filter', b'', b'Specify repoview filter'),
2173 (b'', b'list', False, b'List brachmap filter caches'),
2173 (b'', b'list', False, b'List brachmap filter caches'),
2174 ] + formatteropts)
2174 ] + formatteropts)
2175 def perfbranchmapread(ui, repo, filter=b'', list=False, **opts):
2175 def perfbranchmapread(ui, repo, filter=b'', list=False, **opts):
2176 """benchmark reading the branchmap"""
2176 """benchmark reading the branchmap"""
2177 opts = _byteskwargs(opts)
2177 opts = _byteskwargs(opts)
2178
2178
2179 if list:
2179 if list:
2180 for name, kind, st in repo.cachevfs.readdir(stat=True):
2180 for name, kind, st in repo.cachevfs.readdir(stat=True):
2181 if name.startswith(b'branch2'):
2181 if name.startswith(b'branch2'):
2182 filtername = name.partition(b'-')[2] or b'unfiltered'
2182 filtername = name.partition(b'-')[2] or b'unfiltered'
2183 ui.status(b'%s - %s\n'
2183 ui.status(b'%s - %s\n'
2184 % (filtername, util.bytecount(st.st_size)))
2184 % (filtername, util.bytecount(st.st_size)))
2185 return
2185 return
2186 if filter:
2186 if filter:
2187 repo = repoview.repoview(repo, filter)
2187 repo = repoview.repoview(repo, filter)
2188 else:
2188 else:
2189 repo = repo.unfiltered()
2189 repo = repo.unfiltered()
2190 # try once without timer, the filter may not be cached
2190 # try once without timer, the filter may not be cached
2191 if branchmap.read(repo) is None:
2191 if branchmap.read(repo) is None:
2192 raise error.Abort(b'No brachmap cached for %s repo'
2192 raise error.Abort(b'No brachmap cached for %s repo'
2193 % (filter or b'unfiltered'))
2193 % (filter or b'unfiltered'))
2194 timer, fm = gettimer(ui, opts)
2194 timer, fm = gettimer(ui, opts)
2195 timer(lambda: branchmap.read(repo) and None)
2195 timer(lambda: branchmap.read(repo) and None)
2196 fm.end()
2196 fm.end()
2197
2197
2198 @command(b'perfloadmarkers')
2198 @command(b'perfloadmarkers')
2199 def perfloadmarkers(ui, repo):
2199 def perfloadmarkers(ui, repo):
2200 """benchmark the time to parse the on-disk markers for a repo
2200 """benchmark the time to parse the on-disk markers for a repo
2201
2201
2202 Result is the number of markers in the repo."""
2202 Result is the number of markers in the repo."""
2203 timer, fm = gettimer(ui)
2203 timer, fm = gettimer(ui)
2204 svfs = getsvfs(repo)
2204 svfs = getsvfs(repo)
2205 timer(lambda: len(obsolete.obsstore(svfs)))
2205 timer(lambda: len(obsolete.obsstore(svfs)))
2206 fm.end()
2206 fm.end()
2207
2207
2208 @command(b'perflrucachedict', formatteropts +
2208 @command(b'perflrucachedict', formatteropts +
2209 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2209 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2210 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2210 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2211 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2211 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2212 (b'', b'size', 4, b'size of cache'),
2212 (b'', b'size', 4, b'size of cache'),
2213 (b'', b'gets', 10000, b'number of key lookups'),
2213 (b'', b'gets', 10000, b'number of key lookups'),
2214 (b'', b'sets', 10000, b'number of key sets'),
2214 (b'', b'sets', 10000, b'number of key sets'),
2215 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2215 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2216 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2216 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2217 norepo=True)
2217 norepo=True)
2218 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2218 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2219 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2219 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2220 opts = _byteskwargs(opts)
2220 opts = _byteskwargs(opts)
2221
2221
2222 def doinit():
2222 def doinit():
2223 for i in _xrange(10000):
2223 for i in _xrange(10000):
2224 util.lrucachedict(size)
2224 util.lrucachedict(size)
2225
2225
2226 costrange = list(range(mincost, maxcost + 1))
2226 costrange = list(range(mincost, maxcost + 1))
2227
2227
2228 values = []
2228 values = []
2229 for i in _xrange(size):
2229 for i in _xrange(size):
2230 values.append(random.randint(0, _maxint))
2230 values.append(random.randint(0, _maxint))
2231
2231
2232 # Get mode fills the cache and tests raw lookup performance with no
2232 # Get mode fills the cache and tests raw lookup performance with no
2233 # eviction.
2233 # eviction.
2234 getseq = []
2234 getseq = []
2235 for i in _xrange(gets):
2235 for i in _xrange(gets):
2236 getseq.append(random.choice(values))
2236 getseq.append(random.choice(values))
2237
2237
2238 def dogets():
2238 def dogets():
2239 d = util.lrucachedict(size)
2239 d = util.lrucachedict(size)
2240 for v in values:
2240 for v in values:
2241 d[v] = v
2241 d[v] = v
2242 for key in getseq:
2242 for key in getseq:
2243 value = d[key]
2243 value = d[key]
2244 value # silence pyflakes warning
2244 value # silence pyflakes warning
2245
2245
2246 def dogetscost():
2246 def dogetscost():
2247 d = util.lrucachedict(size, maxcost=costlimit)
2247 d = util.lrucachedict(size, maxcost=costlimit)
2248 for i, v in enumerate(values):
2248 for i, v in enumerate(values):
2249 d.insert(v, v, cost=costs[i])
2249 d.insert(v, v, cost=costs[i])
2250 for key in getseq:
2250 for key in getseq:
2251 try:
2251 try:
2252 value = d[key]
2252 value = d[key]
2253 value # silence pyflakes warning
2253 value # silence pyflakes warning
2254 except KeyError:
2254 except KeyError:
2255 pass
2255 pass
2256
2256
2257 # Set mode tests insertion speed with cache eviction.
2257 # Set mode tests insertion speed with cache eviction.
2258 setseq = []
2258 setseq = []
2259 costs = []
2259 costs = []
2260 for i in _xrange(sets):
2260 for i in _xrange(sets):
2261 setseq.append(random.randint(0, _maxint))
2261 setseq.append(random.randint(0, _maxint))
2262 costs.append(random.choice(costrange))
2262 costs.append(random.choice(costrange))
2263
2263
2264 def doinserts():
2264 def doinserts():
2265 d = util.lrucachedict(size)
2265 d = util.lrucachedict(size)
2266 for v in setseq:
2266 for v in setseq:
2267 d.insert(v, v)
2267 d.insert(v, v)
2268
2268
2269 def doinsertscost():
2269 def doinsertscost():
2270 d = util.lrucachedict(size, maxcost=costlimit)
2270 d = util.lrucachedict(size, maxcost=costlimit)
2271 for i, v in enumerate(setseq):
2271 for i, v in enumerate(setseq):
2272 d.insert(v, v, cost=costs[i])
2272 d.insert(v, v, cost=costs[i])
2273
2273
2274 def dosets():
2274 def dosets():
2275 d = util.lrucachedict(size)
2275 d = util.lrucachedict(size)
2276 for v in setseq:
2276 for v in setseq:
2277 d[v] = v
2277 d[v] = v
2278
2278
2279 # Mixed mode randomly performs gets and sets with eviction.
2279 # Mixed mode randomly performs gets and sets with eviction.
2280 mixedops = []
2280 mixedops = []
2281 for i in _xrange(mixed):
2281 for i in _xrange(mixed):
2282 r = random.randint(0, 100)
2282 r = random.randint(0, 100)
2283 if r < mixedgetfreq:
2283 if r < mixedgetfreq:
2284 op = 0
2284 op = 0
2285 else:
2285 else:
2286 op = 1
2286 op = 1
2287
2287
2288 mixedops.append((op,
2288 mixedops.append((op,
2289 random.randint(0, size * 2),
2289 random.randint(0, size * 2),
2290 random.choice(costrange)))
2290 random.choice(costrange)))
2291
2291
2292 def domixed():
2292 def domixed():
2293 d = util.lrucachedict(size)
2293 d = util.lrucachedict(size)
2294
2294
2295 for op, v, cost in mixedops:
2295 for op, v, cost in mixedops:
2296 if op == 0:
2296 if op == 0:
2297 try:
2297 try:
2298 d[v]
2298 d[v]
2299 except KeyError:
2299 except KeyError:
2300 pass
2300 pass
2301 else:
2301 else:
2302 d[v] = v
2302 d[v] = v
2303
2303
2304 def domixedcost():
2304 def domixedcost():
2305 d = util.lrucachedict(size, maxcost=costlimit)
2305 d = util.lrucachedict(size, maxcost=costlimit)
2306
2306
2307 for op, v, cost in mixedops:
2307 for op, v, cost in mixedops:
2308 if op == 0:
2308 if op == 0:
2309 try:
2309 try:
2310 d[v]
2310 d[v]
2311 except KeyError:
2311 except KeyError:
2312 pass
2312 pass
2313 else:
2313 else:
2314 d.insert(v, v, cost=cost)
2314 d.insert(v, v, cost=cost)
2315
2315
2316 benches = [
2316 benches = [
2317 (doinit, b'init'),
2317 (doinit, b'init'),
2318 ]
2318 ]
2319
2319
2320 if costlimit:
2320 if costlimit:
2321 benches.extend([
2321 benches.extend([
2322 (dogetscost, b'gets w/ cost limit'),
2322 (dogetscost, b'gets w/ cost limit'),
2323 (doinsertscost, b'inserts w/ cost limit'),
2323 (doinsertscost, b'inserts w/ cost limit'),
2324 (domixedcost, b'mixed w/ cost limit'),
2324 (domixedcost, b'mixed w/ cost limit'),
2325 ])
2325 ])
2326 else:
2326 else:
2327 benches.extend([
2327 benches.extend([
2328 (dogets, b'gets'),
2328 (dogets, b'gets'),
2329 (doinserts, b'inserts'),
2329 (doinserts, b'inserts'),
2330 (dosets, b'sets'),
2330 (dosets, b'sets'),
2331 (domixed, b'mixed')
2331 (domixed, b'mixed')
2332 ])
2332 ])
2333
2333
2334 for fn, title in benches:
2334 for fn, title in benches:
2335 timer, fm = gettimer(ui, opts)
2335 timer, fm = gettimer(ui, opts)
2336 timer(fn, title=title)
2336 timer(fn, title=title)
2337 fm.end()
2337 fm.end()
2338
2338
2339 @command(b'perfwrite', formatteropts)
2339 @command(b'perfwrite', formatteropts)
2340 def perfwrite(ui, repo, **opts):
2340 def perfwrite(ui, repo, **opts):
2341 """microbenchmark ui.write
2341 """microbenchmark ui.write
2342 """
2342 """
2343 opts = _byteskwargs(opts)
2343 opts = _byteskwargs(opts)
2344
2344
2345 timer, fm = gettimer(ui, opts)
2345 timer, fm = gettimer(ui, opts)
2346 def write():
2346 def write():
2347 for i in range(100000):
2347 for i in range(100000):
2348 ui.write((b'Testing write performance\n'))
2348 ui.write((b'Testing write performance\n'))
2349 timer(write)
2349 timer(write)
2350 fm.end()
2350 fm.end()
2351
2351
2352 def uisetup(ui):
2352 def uisetup(ui):
2353 if (util.safehasattr(cmdutil, b'openrevlog') and
2353 if (util.safehasattr(cmdutil, b'openrevlog') and
2354 not util.safehasattr(commands, b'debugrevlogopts')):
2354 not util.safehasattr(commands, b'debugrevlogopts')):
2355 # for "historical portability":
2355 # for "historical portability":
2356 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2356 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2357 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2357 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2358 # openrevlog() should cause failure, because it has been
2358 # openrevlog() should cause failure, because it has been
2359 # available since 3.5 (or 49c583ca48c4).
2359 # available since 3.5 (or 49c583ca48c4).
2360 def openrevlog(orig, repo, cmd, file_, opts):
2360 def openrevlog(orig, repo, cmd, file_, opts):
2361 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2361 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2362 raise error.Abort(b"This version doesn't support --dir option",
2362 raise error.Abort(b"This version doesn't support --dir option",
2363 hint=b"use 3.5 or later")
2363 hint=b"use 3.5 or later")
2364 return orig(repo, cmd, file_, opts)
2364 return orig(repo, cmd, file_, opts)
2365 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2365 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
General Comments 0
You need to be logged in to leave comments. Login now