##// END OF EJS Templates
perf: add a `perfbranchmapupdate` command...
Boris Feld -
r40804:f7230146 default
parent child Browse files
Show More
@@ -1,2497 +1,2552 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance'''
2 '''helper extension to measure performance'''
3
3
4 # "historical portability" policy of perf.py:
4 # "historical portability" policy of perf.py:
5 #
5 #
6 # We have to do:
6 # We have to do:
7 # - make perf.py "loadable" with as wide Mercurial version as possible
7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 # This doesn't mean that perf commands work correctly with that Mercurial.
8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 # - make historical perf command work correctly with as wide Mercurial
10 # - make historical perf command work correctly with as wide Mercurial
11 # version as possible
11 # version as possible
12 #
12 #
13 # We have to do, if possible with reasonable cost:
13 # We have to do, if possible with reasonable cost:
14 # - make recent perf command for historical feature work correctly
14 # - make recent perf command for historical feature work correctly
15 # with early Mercurial
15 # with early Mercurial
16 #
16 #
17 # We don't have to do:
17 # We don't have to do:
18 # - make perf command for recent feature work correctly with early
18 # - make perf command for recent feature work correctly with early
19 # Mercurial
19 # Mercurial
20
20
21 from __future__ import absolute_import
21 from __future__ import absolute_import
22 import contextlib
22 import contextlib
23 import functools
23 import functools
24 import gc
24 import gc
25 import os
25 import os
26 import random
26 import random
27 import shutil
27 import shutil
28 import struct
28 import struct
29 import sys
29 import sys
30 import tempfile
30 import tempfile
31 import threading
31 import threading
32 import time
32 import time
33 from mercurial import (
33 from mercurial import (
34 changegroup,
34 changegroup,
35 cmdutil,
35 cmdutil,
36 commands,
36 commands,
37 copies,
37 copies,
38 error,
38 error,
39 extensions,
39 extensions,
40 mdiff,
40 mdiff,
41 merge,
41 merge,
42 revlog,
42 revlog,
43 util,
43 util,
44 )
44 )
45
45
46 # for "historical portability":
46 # for "historical portability":
47 # try to import modules separately (in dict order), and ignore
47 # try to import modules separately (in dict order), and ignore
48 # failure, because these aren't available with early Mercurial
48 # failure, because these aren't available with early Mercurial
49 try:
49 try:
50 from mercurial import branchmap # since 2.5 (or bcee63733aad)
50 from mercurial import branchmap # since 2.5 (or bcee63733aad)
51 except ImportError:
51 except ImportError:
52 pass
52 pass
53 try:
53 try:
54 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
54 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
55 except ImportError:
55 except ImportError:
56 pass
56 pass
57 try:
57 try:
58 from mercurial import registrar # since 3.7 (or 37d50250b696)
58 from mercurial import registrar # since 3.7 (or 37d50250b696)
59 dir(registrar) # forcibly load it
59 dir(registrar) # forcibly load it
60 except ImportError:
60 except ImportError:
61 registrar = None
61 registrar = None
62 try:
62 try:
63 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
63 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
64 except ImportError:
64 except ImportError:
65 pass
65 pass
66 try:
66 try:
67 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
67 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
68 except ImportError:
68 except ImportError:
69 pass
69 pass
70
70
71 def identity(a):
71 def identity(a):
72 return a
72 return a
73
73
74 try:
74 try:
75 from mercurial import pycompat
75 from mercurial import pycompat
76 getargspec = pycompat.getargspec # added to module after 4.5
76 getargspec = pycompat.getargspec # added to module after 4.5
77 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
77 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
78 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
78 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
79 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
79 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
80 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
80 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
81 if pycompat.ispy3:
81 if pycompat.ispy3:
82 _maxint = sys.maxsize # per py3 docs for replacing maxint
82 _maxint = sys.maxsize # per py3 docs for replacing maxint
83 else:
83 else:
84 _maxint = sys.maxint
84 _maxint = sys.maxint
85 except (ImportError, AttributeError):
85 except (ImportError, AttributeError):
86 import inspect
86 import inspect
87 getargspec = inspect.getargspec
87 getargspec = inspect.getargspec
88 _byteskwargs = identity
88 _byteskwargs = identity
89 fsencode = identity # no py3 support
89 fsencode = identity # no py3 support
90 _maxint = sys.maxint # no py3 support
90 _maxint = sys.maxint # no py3 support
91 _sysstr = lambda x: x # no py3 support
91 _sysstr = lambda x: x # no py3 support
92 _xrange = xrange
92 _xrange = xrange
93
93
94 try:
94 try:
95 # 4.7+
95 # 4.7+
96 queue = pycompat.queue.Queue
96 queue = pycompat.queue.Queue
97 except (AttributeError, ImportError):
97 except (AttributeError, ImportError):
98 # <4.7.
98 # <4.7.
99 try:
99 try:
100 queue = pycompat.queue
100 queue = pycompat.queue
101 except (AttributeError, ImportError):
101 except (AttributeError, ImportError):
102 queue = util.queue
102 queue = util.queue
103
103
104 try:
104 try:
105 from mercurial import logcmdutil
105 from mercurial import logcmdutil
106 makelogtemplater = logcmdutil.maketemplater
106 makelogtemplater = logcmdutil.maketemplater
107 except (AttributeError, ImportError):
107 except (AttributeError, ImportError):
108 try:
108 try:
109 makelogtemplater = cmdutil.makelogtemplater
109 makelogtemplater = cmdutil.makelogtemplater
110 except (AttributeError, ImportError):
110 except (AttributeError, ImportError):
111 makelogtemplater = None
111 makelogtemplater = None
112
112
113 # for "historical portability":
113 # for "historical portability":
114 # define util.safehasattr forcibly, because util.safehasattr has been
114 # define util.safehasattr forcibly, because util.safehasattr has been
115 # available since 1.9.3 (or 94b200a11cf7)
115 # available since 1.9.3 (or 94b200a11cf7)
116 _undefined = object()
116 _undefined = object()
117 def safehasattr(thing, attr):
117 def safehasattr(thing, attr):
118 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
118 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
119 setattr(util, 'safehasattr', safehasattr)
119 setattr(util, 'safehasattr', safehasattr)
120
120
121 # for "historical portability":
121 # for "historical portability":
122 # define util.timer forcibly, because util.timer has been available
122 # define util.timer forcibly, because util.timer has been available
123 # since ae5d60bb70c9
123 # since ae5d60bb70c9
124 if safehasattr(time, 'perf_counter'):
124 if safehasattr(time, 'perf_counter'):
125 util.timer = time.perf_counter
125 util.timer = time.perf_counter
126 elif os.name == b'nt':
126 elif os.name == b'nt':
127 util.timer = time.clock
127 util.timer = time.clock
128 else:
128 else:
129 util.timer = time.time
129 util.timer = time.time
130
130
131 # for "historical portability":
131 # for "historical portability":
132 # use locally defined empty option list, if formatteropts isn't
132 # use locally defined empty option list, if formatteropts isn't
133 # available, because commands.formatteropts has been available since
133 # available, because commands.formatteropts has been available since
134 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
134 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
135 # available since 2.2 (or ae5f92e154d3)
135 # available since 2.2 (or ae5f92e154d3)
136 formatteropts = getattr(cmdutil, "formatteropts",
136 formatteropts = getattr(cmdutil, "formatteropts",
137 getattr(commands, "formatteropts", []))
137 getattr(commands, "formatteropts", []))
138
138
139 # for "historical portability":
139 # for "historical portability":
140 # use locally defined option list, if debugrevlogopts isn't available,
140 # use locally defined option list, if debugrevlogopts isn't available,
141 # because commands.debugrevlogopts has been available since 3.7 (or
141 # because commands.debugrevlogopts has been available since 3.7 (or
142 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
142 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
143 # since 1.9 (or a79fea6b3e77).
143 # since 1.9 (or a79fea6b3e77).
144 revlogopts = getattr(cmdutil, "debugrevlogopts",
144 revlogopts = getattr(cmdutil, "debugrevlogopts",
145 getattr(commands, "debugrevlogopts", [
145 getattr(commands, "debugrevlogopts", [
146 (b'c', b'changelog', False, (b'open changelog')),
146 (b'c', b'changelog', False, (b'open changelog')),
147 (b'm', b'manifest', False, (b'open manifest')),
147 (b'm', b'manifest', False, (b'open manifest')),
148 (b'', b'dir', False, (b'open directory manifest')),
148 (b'', b'dir', False, (b'open directory manifest')),
149 ]))
149 ]))
150
150
151 cmdtable = {}
151 cmdtable = {}
152
152
153 # for "historical portability":
153 # for "historical portability":
154 # define parsealiases locally, because cmdutil.parsealiases has been
154 # define parsealiases locally, because cmdutil.parsealiases has been
155 # available since 1.5 (or 6252852b4332)
155 # available since 1.5 (or 6252852b4332)
156 def parsealiases(cmd):
156 def parsealiases(cmd):
157 return cmd.split(b"|")
157 return cmd.split(b"|")
158
158
159 if safehasattr(registrar, 'command'):
159 if safehasattr(registrar, 'command'):
160 command = registrar.command(cmdtable)
160 command = registrar.command(cmdtable)
161 elif safehasattr(cmdutil, 'command'):
161 elif safehasattr(cmdutil, 'command'):
162 command = cmdutil.command(cmdtable)
162 command = cmdutil.command(cmdtable)
163 if b'norepo' not in getargspec(command).args:
163 if b'norepo' not in getargspec(command).args:
164 # for "historical portability":
164 # for "historical portability":
165 # wrap original cmdutil.command, because "norepo" option has
165 # wrap original cmdutil.command, because "norepo" option has
166 # been available since 3.1 (or 75a96326cecb)
166 # been available since 3.1 (or 75a96326cecb)
167 _command = command
167 _command = command
168 def command(name, options=(), synopsis=None, norepo=False):
168 def command(name, options=(), synopsis=None, norepo=False):
169 if norepo:
169 if norepo:
170 commands.norepo += b' %s' % b' '.join(parsealiases(name))
170 commands.norepo += b' %s' % b' '.join(parsealiases(name))
171 return _command(name, list(options), synopsis)
171 return _command(name, list(options), synopsis)
172 else:
172 else:
173 # for "historical portability":
173 # for "historical portability":
174 # define "@command" annotation locally, because cmdutil.command
174 # define "@command" annotation locally, because cmdutil.command
175 # has been available since 1.9 (or 2daa5179e73f)
175 # has been available since 1.9 (or 2daa5179e73f)
176 def command(name, options=(), synopsis=None, norepo=False):
176 def command(name, options=(), synopsis=None, norepo=False):
177 def decorator(func):
177 def decorator(func):
178 if synopsis:
178 if synopsis:
179 cmdtable[name] = func, list(options), synopsis
179 cmdtable[name] = func, list(options), synopsis
180 else:
180 else:
181 cmdtable[name] = func, list(options)
181 cmdtable[name] = func, list(options)
182 if norepo:
182 if norepo:
183 commands.norepo += b' %s' % b' '.join(parsealiases(name))
183 commands.norepo += b' %s' % b' '.join(parsealiases(name))
184 return func
184 return func
185 return decorator
185 return decorator
186
186
187 try:
187 try:
188 import mercurial.registrar
188 import mercurial.registrar
189 import mercurial.configitems
189 import mercurial.configitems
190 configtable = {}
190 configtable = {}
191 configitem = mercurial.registrar.configitem(configtable)
191 configitem = mercurial.registrar.configitem(configtable)
192 configitem(b'perf', b'presleep',
192 configitem(b'perf', b'presleep',
193 default=mercurial.configitems.dynamicdefault,
193 default=mercurial.configitems.dynamicdefault,
194 )
194 )
195 configitem(b'perf', b'stub',
195 configitem(b'perf', b'stub',
196 default=mercurial.configitems.dynamicdefault,
196 default=mercurial.configitems.dynamicdefault,
197 )
197 )
198 configitem(b'perf', b'parentscount',
198 configitem(b'perf', b'parentscount',
199 default=mercurial.configitems.dynamicdefault,
199 default=mercurial.configitems.dynamicdefault,
200 )
200 )
201 configitem(b'perf', b'all-timing',
201 configitem(b'perf', b'all-timing',
202 default=mercurial.configitems.dynamicdefault,
202 default=mercurial.configitems.dynamicdefault,
203 )
203 )
204 except (ImportError, AttributeError):
204 except (ImportError, AttributeError):
205 pass
205 pass
206
206
207 def getlen(ui):
207 def getlen(ui):
208 if ui.configbool(b"perf", b"stub", False):
208 if ui.configbool(b"perf", b"stub", False):
209 return lambda x: 1
209 return lambda x: 1
210 return len
210 return len
211
211
212 def gettimer(ui, opts=None):
212 def gettimer(ui, opts=None):
213 """return a timer function and formatter: (timer, formatter)
213 """return a timer function and formatter: (timer, formatter)
214
214
215 This function exists to gather the creation of formatter in a single
215 This function exists to gather the creation of formatter in a single
216 place instead of duplicating it in all performance commands."""
216 place instead of duplicating it in all performance commands."""
217
217
218 # enforce an idle period before execution to counteract power management
218 # enforce an idle period before execution to counteract power management
219 # experimental config: perf.presleep
219 # experimental config: perf.presleep
220 time.sleep(getint(ui, b"perf", b"presleep", 1))
220 time.sleep(getint(ui, b"perf", b"presleep", 1))
221
221
222 if opts is None:
222 if opts is None:
223 opts = {}
223 opts = {}
224 # redirect all to stderr unless buffer api is in use
224 # redirect all to stderr unless buffer api is in use
225 if not ui._buffers:
225 if not ui._buffers:
226 ui = ui.copy()
226 ui = ui.copy()
227 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
227 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
228 if uifout:
228 if uifout:
229 # for "historical portability":
229 # for "historical portability":
230 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
230 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
231 uifout.set(ui.ferr)
231 uifout.set(ui.ferr)
232
232
233 # get a formatter
233 # get a formatter
234 uiformatter = getattr(ui, 'formatter', None)
234 uiformatter = getattr(ui, 'formatter', None)
235 if uiformatter:
235 if uiformatter:
236 fm = uiformatter(b'perf', opts)
236 fm = uiformatter(b'perf', opts)
237 else:
237 else:
238 # for "historical portability":
238 # for "historical portability":
239 # define formatter locally, because ui.formatter has been
239 # define formatter locally, because ui.formatter has been
240 # available since 2.2 (or ae5f92e154d3)
240 # available since 2.2 (or ae5f92e154d3)
241 from mercurial import node
241 from mercurial import node
242 class defaultformatter(object):
242 class defaultformatter(object):
243 """Minimized composition of baseformatter and plainformatter
243 """Minimized composition of baseformatter and plainformatter
244 """
244 """
245 def __init__(self, ui, topic, opts):
245 def __init__(self, ui, topic, opts):
246 self._ui = ui
246 self._ui = ui
247 if ui.debugflag:
247 if ui.debugflag:
248 self.hexfunc = node.hex
248 self.hexfunc = node.hex
249 else:
249 else:
250 self.hexfunc = node.short
250 self.hexfunc = node.short
251 def __nonzero__(self):
251 def __nonzero__(self):
252 return False
252 return False
253 __bool__ = __nonzero__
253 __bool__ = __nonzero__
254 def startitem(self):
254 def startitem(self):
255 pass
255 pass
256 def data(self, **data):
256 def data(self, **data):
257 pass
257 pass
258 def write(self, fields, deftext, *fielddata, **opts):
258 def write(self, fields, deftext, *fielddata, **opts):
259 self._ui.write(deftext % fielddata, **opts)
259 self._ui.write(deftext % fielddata, **opts)
260 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
260 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
261 if cond:
261 if cond:
262 self._ui.write(deftext % fielddata, **opts)
262 self._ui.write(deftext % fielddata, **opts)
263 def plain(self, text, **opts):
263 def plain(self, text, **opts):
264 self._ui.write(text, **opts)
264 self._ui.write(text, **opts)
265 def end(self):
265 def end(self):
266 pass
266 pass
267 fm = defaultformatter(ui, b'perf', opts)
267 fm = defaultformatter(ui, b'perf', opts)
268
268
269 # stub function, runs code only once instead of in a loop
269 # stub function, runs code only once instead of in a loop
270 # experimental config: perf.stub
270 # experimental config: perf.stub
271 if ui.configbool(b"perf", b"stub", False):
271 if ui.configbool(b"perf", b"stub", False):
272 return functools.partial(stub_timer, fm), fm
272 return functools.partial(stub_timer, fm), fm
273
273
274 # experimental config: perf.all-timing
274 # experimental config: perf.all-timing
275 displayall = ui.configbool(b"perf", b"all-timing", False)
275 displayall = ui.configbool(b"perf", b"all-timing", False)
276 return functools.partial(_timer, fm, displayall=displayall), fm
276 return functools.partial(_timer, fm, displayall=displayall), fm
277
277
278 def stub_timer(fm, func, setup=None, title=None):
278 def stub_timer(fm, func, setup=None, title=None):
279 if setup is not None:
279 if setup is not None:
280 setup()
280 setup()
281 func()
281 func()
282
282
283 @contextlib.contextmanager
283 @contextlib.contextmanager
284 def timeone():
284 def timeone():
285 r = []
285 r = []
286 ostart = os.times()
286 ostart = os.times()
287 cstart = util.timer()
287 cstart = util.timer()
288 yield r
288 yield r
289 cstop = util.timer()
289 cstop = util.timer()
290 ostop = os.times()
290 ostop = os.times()
291 a, b = ostart, ostop
291 a, b = ostart, ostop
292 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
292 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
293
293
294 def _timer(fm, func, setup=None, title=None, displayall=False):
294 def _timer(fm, func, setup=None, title=None, displayall=False):
295 gc.collect()
295 gc.collect()
296 results = []
296 results = []
297 begin = util.timer()
297 begin = util.timer()
298 count = 0
298 count = 0
299 while True:
299 while True:
300 if setup is not None:
300 if setup is not None:
301 setup()
301 setup()
302 with timeone() as item:
302 with timeone() as item:
303 r = func()
303 r = func()
304 count += 1
304 count += 1
305 results.append(item[0])
305 results.append(item[0])
306 cstop = util.timer()
306 cstop = util.timer()
307 if cstop - begin > 3 and count >= 100:
307 if cstop - begin > 3 and count >= 100:
308 break
308 break
309 if cstop - begin > 10 and count >= 3:
309 if cstop - begin > 10 and count >= 3:
310 break
310 break
311
311
312 formatone(fm, results, title=title, result=r,
312 formatone(fm, results, title=title, result=r,
313 displayall=displayall)
313 displayall=displayall)
314
314
315 def formatone(fm, timings, title=None, result=None, displayall=False):
315 def formatone(fm, timings, title=None, result=None, displayall=False):
316
316
317 count = len(timings)
317 count = len(timings)
318
318
319 fm.startitem()
319 fm.startitem()
320
320
321 if title:
321 if title:
322 fm.write(b'title', b'! %s\n', title)
322 fm.write(b'title', b'! %s\n', title)
323 if result:
323 if result:
324 fm.write(b'result', b'! result: %s\n', result)
324 fm.write(b'result', b'! result: %s\n', result)
325 def display(role, entry):
325 def display(role, entry):
326 prefix = b''
326 prefix = b''
327 if role != b'best':
327 if role != b'best':
328 prefix = b'%s.' % role
328 prefix = b'%s.' % role
329 fm.plain(b'!')
329 fm.plain(b'!')
330 fm.write(prefix + b'wall', b' wall %f', entry[0])
330 fm.write(prefix + b'wall', b' wall %f', entry[0])
331 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
331 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
332 fm.write(prefix + b'user', b' user %f', entry[1])
332 fm.write(prefix + b'user', b' user %f', entry[1])
333 fm.write(prefix + b'sys', b' sys %f', entry[2])
333 fm.write(prefix + b'sys', b' sys %f', entry[2])
334 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
334 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
335 fm.plain(b'\n')
335 fm.plain(b'\n')
336 timings.sort()
336 timings.sort()
337 min_val = timings[0]
337 min_val = timings[0]
338 display(b'best', min_val)
338 display(b'best', min_val)
339 if displayall:
339 if displayall:
340 max_val = timings[-1]
340 max_val = timings[-1]
341 display(b'max', max_val)
341 display(b'max', max_val)
342 avg = tuple([sum(x) / count for x in zip(*timings)])
342 avg = tuple([sum(x) / count for x in zip(*timings)])
343 display(b'avg', avg)
343 display(b'avg', avg)
344 median = timings[len(timings) // 2]
344 median = timings[len(timings) // 2]
345 display(b'median', median)
345 display(b'median', median)
346
346
347 # utilities for historical portability
347 # utilities for historical portability
348
348
349 def getint(ui, section, name, default):
349 def getint(ui, section, name, default):
350 # for "historical portability":
350 # for "historical portability":
351 # ui.configint has been available since 1.9 (or fa2b596db182)
351 # ui.configint has been available since 1.9 (or fa2b596db182)
352 v = ui.config(section, name, None)
352 v = ui.config(section, name, None)
353 if v is None:
353 if v is None:
354 return default
354 return default
355 try:
355 try:
356 return int(v)
356 return int(v)
357 except ValueError:
357 except ValueError:
358 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
358 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
359 % (section, name, v))
359 % (section, name, v))
360
360
361 def safeattrsetter(obj, name, ignoremissing=False):
361 def safeattrsetter(obj, name, ignoremissing=False):
362 """Ensure that 'obj' has 'name' attribute before subsequent setattr
362 """Ensure that 'obj' has 'name' attribute before subsequent setattr
363
363
364 This function is aborted, if 'obj' doesn't have 'name' attribute
364 This function is aborted, if 'obj' doesn't have 'name' attribute
365 at runtime. This avoids overlooking removal of an attribute, which
365 at runtime. This avoids overlooking removal of an attribute, which
366 breaks assumption of performance measurement, in the future.
366 breaks assumption of performance measurement, in the future.
367
367
368 This function returns the object to (1) assign a new value, and
368 This function returns the object to (1) assign a new value, and
369 (2) restore an original value to the attribute.
369 (2) restore an original value to the attribute.
370
370
371 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
371 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
372 abortion, and this function returns None. This is useful to
372 abortion, and this function returns None. This is useful to
373 examine an attribute, which isn't ensured in all Mercurial
373 examine an attribute, which isn't ensured in all Mercurial
374 versions.
374 versions.
375 """
375 """
376 if not util.safehasattr(obj, name):
376 if not util.safehasattr(obj, name):
377 if ignoremissing:
377 if ignoremissing:
378 return None
378 return None
379 raise error.Abort((b"missing attribute %s of %s might break assumption"
379 raise error.Abort((b"missing attribute %s of %s might break assumption"
380 b" of performance measurement") % (name, obj))
380 b" of performance measurement") % (name, obj))
381
381
382 origvalue = getattr(obj, _sysstr(name))
382 origvalue = getattr(obj, _sysstr(name))
383 class attrutil(object):
383 class attrutil(object):
384 def set(self, newvalue):
384 def set(self, newvalue):
385 setattr(obj, _sysstr(name), newvalue)
385 setattr(obj, _sysstr(name), newvalue)
386 def restore(self):
386 def restore(self):
387 setattr(obj, _sysstr(name), origvalue)
387 setattr(obj, _sysstr(name), origvalue)
388
388
389 return attrutil()
389 return attrutil()
390
390
391 # utilities to examine each internal API changes
391 # utilities to examine each internal API changes
392
392
393 def getbranchmapsubsettable():
393 def getbranchmapsubsettable():
394 # for "historical portability":
394 # for "historical portability":
395 # subsettable is defined in:
395 # subsettable is defined in:
396 # - branchmap since 2.9 (or 175c6fd8cacc)
396 # - branchmap since 2.9 (or 175c6fd8cacc)
397 # - repoview since 2.5 (or 59a9f18d4587)
397 # - repoview since 2.5 (or 59a9f18d4587)
398 for mod in (branchmap, repoview):
398 for mod in (branchmap, repoview):
399 subsettable = getattr(mod, 'subsettable', None)
399 subsettable = getattr(mod, 'subsettable', None)
400 if subsettable:
400 if subsettable:
401 return subsettable
401 return subsettable
402
402
403 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
403 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
404 # branchmap and repoview modules exist, but subsettable attribute
404 # branchmap and repoview modules exist, but subsettable attribute
405 # doesn't)
405 # doesn't)
406 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
406 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
407 hint=b"use 2.5 or later")
407 hint=b"use 2.5 or later")
408
408
409 def getsvfs(repo):
409 def getsvfs(repo):
410 """Return appropriate object to access files under .hg/store
410 """Return appropriate object to access files under .hg/store
411 """
411 """
412 # for "historical portability":
412 # for "historical portability":
413 # repo.svfs has been available since 2.3 (or 7034365089bf)
413 # repo.svfs has been available since 2.3 (or 7034365089bf)
414 svfs = getattr(repo, 'svfs', None)
414 svfs = getattr(repo, 'svfs', None)
415 if svfs:
415 if svfs:
416 return svfs
416 return svfs
417 else:
417 else:
418 return getattr(repo, 'sopener')
418 return getattr(repo, 'sopener')
419
419
420 def getvfs(repo):
420 def getvfs(repo):
421 """Return appropriate object to access files under .hg
421 """Return appropriate object to access files under .hg
422 """
422 """
423 # for "historical portability":
423 # for "historical portability":
424 # repo.vfs has been available since 2.3 (or 7034365089bf)
424 # repo.vfs has been available since 2.3 (or 7034365089bf)
425 vfs = getattr(repo, 'vfs', None)
425 vfs = getattr(repo, 'vfs', None)
426 if vfs:
426 if vfs:
427 return vfs
427 return vfs
428 else:
428 else:
429 return getattr(repo, 'opener')
429 return getattr(repo, 'opener')
430
430
431 def repocleartagscachefunc(repo):
431 def repocleartagscachefunc(repo):
432 """Return the function to clear tags cache according to repo internal API
432 """Return the function to clear tags cache according to repo internal API
433 """
433 """
434 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
434 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
435 # in this case, setattr(repo, '_tagscache', None) or so isn't
435 # in this case, setattr(repo, '_tagscache', None) or so isn't
436 # correct way to clear tags cache, because existing code paths
436 # correct way to clear tags cache, because existing code paths
437 # expect _tagscache to be a structured object.
437 # expect _tagscache to be a structured object.
438 def clearcache():
438 def clearcache():
439 # _tagscache has been filteredpropertycache since 2.5 (or
439 # _tagscache has been filteredpropertycache since 2.5 (or
440 # 98c867ac1330), and delattr() can't work in such case
440 # 98c867ac1330), and delattr() can't work in such case
441 if b'_tagscache' in vars(repo):
441 if b'_tagscache' in vars(repo):
442 del repo.__dict__[b'_tagscache']
442 del repo.__dict__[b'_tagscache']
443 return clearcache
443 return clearcache
444
444
445 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
445 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
446 if repotags: # since 1.4 (or 5614a628d173)
446 if repotags: # since 1.4 (or 5614a628d173)
447 return lambda : repotags.set(None)
447 return lambda : repotags.set(None)
448
448
449 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
449 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
450 if repotagscache: # since 0.6 (or d7df759d0e97)
450 if repotagscache: # since 0.6 (or d7df759d0e97)
451 return lambda : repotagscache.set(None)
451 return lambda : repotagscache.set(None)
452
452
453 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
453 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
454 # this point, but it isn't so problematic, because:
454 # this point, but it isn't so problematic, because:
455 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
455 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
456 # in perftags() causes failure soon
456 # in perftags() causes failure soon
457 # - perf.py itself has been available since 1.1 (or eb240755386d)
457 # - perf.py itself has been available since 1.1 (or eb240755386d)
458 raise error.Abort((b"tags API of this hg command is unknown"))
458 raise error.Abort((b"tags API of this hg command is unknown"))
459
459
460 # utilities to clear cache
460 # utilities to clear cache
461
461
462 def clearfilecache(obj, attrname):
462 def clearfilecache(obj, attrname):
463 unfiltered = getattr(obj, 'unfiltered', None)
463 unfiltered = getattr(obj, 'unfiltered', None)
464 if unfiltered is not None:
464 if unfiltered is not None:
465 obj = obj.unfiltered()
465 obj = obj.unfiltered()
466 if attrname in vars(obj):
466 if attrname in vars(obj):
467 delattr(obj, attrname)
467 delattr(obj, attrname)
468 obj._filecache.pop(attrname, None)
468 obj._filecache.pop(attrname, None)
469
469
470 def clearchangelog(repo):
470 def clearchangelog(repo):
471 if repo is not repo.unfiltered():
471 if repo is not repo.unfiltered():
472 object.__setattr__(repo, r'_clcachekey', None)
472 object.__setattr__(repo, r'_clcachekey', None)
473 object.__setattr__(repo, r'_clcache', None)
473 object.__setattr__(repo, r'_clcache', None)
474 clearfilecache(repo.unfiltered(), 'changelog')
474 clearfilecache(repo.unfiltered(), 'changelog')
475
475
476 # perf commands
476 # perf commands
477
477
478 @command(b'perfwalk', formatteropts)
478 @command(b'perfwalk', formatteropts)
479 def perfwalk(ui, repo, *pats, **opts):
479 def perfwalk(ui, repo, *pats, **opts):
480 opts = _byteskwargs(opts)
480 opts = _byteskwargs(opts)
481 timer, fm = gettimer(ui, opts)
481 timer, fm = gettimer(ui, opts)
482 m = scmutil.match(repo[None], pats, {})
482 m = scmutil.match(repo[None], pats, {})
483 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
483 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
484 ignored=False))))
484 ignored=False))))
485 fm.end()
485 fm.end()
486
486
487 @command(b'perfannotate', formatteropts)
487 @command(b'perfannotate', formatteropts)
488 def perfannotate(ui, repo, f, **opts):
488 def perfannotate(ui, repo, f, **opts):
489 opts = _byteskwargs(opts)
489 opts = _byteskwargs(opts)
490 timer, fm = gettimer(ui, opts)
490 timer, fm = gettimer(ui, opts)
491 fc = repo[b'.'][f]
491 fc = repo[b'.'][f]
492 timer(lambda: len(fc.annotate(True)))
492 timer(lambda: len(fc.annotate(True)))
493 fm.end()
493 fm.end()
494
494
495 @command(b'perfstatus',
495 @command(b'perfstatus',
496 [(b'u', b'unknown', False,
496 [(b'u', b'unknown', False,
497 b'ask status to look for unknown files')] + formatteropts)
497 b'ask status to look for unknown files')] + formatteropts)
498 def perfstatus(ui, repo, **opts):
498 def perfstatus(ui, repo, **opts):
499 opts = _byteskwargs(opts)
499 opts = _byteskwargs(opts)
500 #m = match.always(repo.root, repo.getcwd())
500 #m = match.always(repo.root, repo.getcwd())
501 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
501 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
502 # False))))
502 # False))))
503 timer, fm = gettimer(ui, opts)
503 timer, fm = gettimer(ui, opts)
504 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
504 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
505 fm.end()
505 fm.end()
506
506
507 @command(b'perfaddremove', formatteropts)
507 @command(b'perfaddremove', formatteropts)
508 def perfaddremove(ui, repo, **opts):
508 def perfaddremove(ui, repo, **opts):
509 opts = _byteskwargs(opts)
509 opts = _byteskwargs(opts)
510 timer, fm = gettimer(ui, opts)
510 timer, fm = gettimer(ui, opts)
511 try:
511 try:
512 oldquiet = repo.ui.quiet
512 oldquiet = repo.ui.quiet
513 repo.ui.quiet = True
513 repo.ui.quiet = True
514 matcher = scmutil.match(repo[None])
514 matcher = scmutil.match(repo[None])
515 opts[b'dry_run'] = True
515 opts[b'dry_run'] = True
516 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
516 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
517 finally:
517 finally:
518 repo.ui.quiet = oldquiet
518 repo.ui.quiet = oldquiet
519 fm.end()
519 fm.end()
520
520
521 def clearcaches(cl):
521 def clearcaches(cl):
522 # behave somewhat consistently across internal API changes
522 # behave somewhat consistently across internal API changes
523 if util.safehasattr(cl, b'clearcaches'):
523 if util.safehasattr(cl, b'clearcaches'):
524 cl.clearcaches()
524 cl.clearcaches()
525 elif util.safehasattr(cl, b'_nodecache'):
525 elif util.safehasattr(cl, b'_nodecache'):
526 from mercurial.node import nullid, nullrev
526 from mercurial.node import nullid, nullrev
527 cl._nodecache = {nullid: nullrev}
527 cl._nodecache = {nullid: nullrev}
528 cl._nodepos = None
528 cl._nodepos = None
529
529
530 @command(b'perfheads', formatteropts)
530 @command(b'perfheads', formatteropts)
531 def perfheads(ui, repo, **opts):
531 def perfheads(ui, repo, **opts):
532 opts = _byteskwargs(opts)
532 opts = _byteskwargs(opts)
533 timer, fm = gettimer(ui, opts)
533 timer, fm = gettimer(ui, opts)
534 cl = repo.changelog
534 cl = repo.changelog
535 def d():
535 def d():
536 len(cl.headrevs())
536 len(cl.headrevs())
537 clearcaches(cl)
537 clearcaches(cl)
538 timer(d)
538 timer(d)
539 fm.end()
539 fm.end()
540
540
541 @command(b'perftags', formatteropts+
541 @command(b'perftags', formatteropts+
542 [
542 [
543 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
543 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
544 ])
544 ])
545 def perftags(ui, repo, **opts):
545 def perftags(ui, repo, **opts):
546 opts = _byteskwargs(opts)
546 opts = _byteskwargs(opts)
547 timer, fm = gettimer(ui, opts)
547 timer, fm = gettimer(ui, opts)
548 repocleartagscache = repocleartagscachefunc(repo)
548 repocleartagscache = repocleartagscachefunc(repo)
549 clearrevlogs = opts[b'clear_revlogs']
549 clearrevlogs = opts[b'clear_revlogs']
550 def s():
550 def s():
551 if clearrevlogs:
551 if clearrevlogs:
552 clearchangelog(repo)
552 clearchangelog(repo)
553 clearfilecache(repo.unfiltered(), 'manifest')
553 clearfilecache(repo.unfiltered(), 'manifest')
554 repocleartagscache()
554 repocleartagscache()
555 def t():
555 def t():
556 return len(repo.tags())
556 return len(repo.tags())
557 timer(t, setup=s)
557 timer(t, setup=s)
558 fm.end()
558 fm.end()
559
559
560 @command(b'perfancestors', formatteropts)
560 @command(b'perfancestors', formatteropts)
561 def perfancestors(ui, repo, **opts):
561 def perfancestors(ui, repo, **opts):
562 opts = _byteskwargs(opts)
562 opts = _byteskwargs(opts)
563 timer, fm = gettimer(ui, opts)
563 timer, fm = gettimer(ui, opts)
564 heads = repo.changelog.headrevs()
564 heads = repo.changelog.headrevs()
565 def d():
565 def d():
566 for a in repo.changelog.ancestors(heads):
566 for a in repo.changelog.ancestors(heads):
567 pass
567 pass
568 timer(d)
568 timer(d)
569 fm.end()
569 fm.end()
570
570
571 @command(b'perfancestorset', formatteropts)
571 @command(b'perfancestorset', formatteropts)
572 def perfancestorset(ui, repo, revset, **opts):
572 def perfancestorset(ui, repo, revset, **opts):
573 opts = _byteskwargs(opts)
573 opts = _byteskwargs(opts)
574 timer, fm = gettimer(ui, opts)
574 timer, fm = gettimer(ui, opts)
575 revs = repo.revs(revset)
575 revs = repo.revs(revset)
576 heads = repo.changelog.headrevs()
576 heads = repo.changelog.headrevs()
577 def d():
577 def d():
578 s = repo.changelog.ancestors(heads)
578 s = repo.changelog.ancestors(heads)
579 for rev in revs:
579 for rev in revs:
580 rev in s
580 rev in s
581 timer(d)
581 timer(d)
582 fm.end()
582 fm.end()
583
583
584 @command(b'perfbookmarks', formatteropts +
584 @command(b'perfbookmarks', formatteropts +
585 [
585 [
586 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
586 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
587 ])
587 ])
588 def perfbookmarks(ui, repo, **opts):
588 def perfbookmarks(ui, repo, **opts):
589 """benchmark parsing bookmarks from disk to memory"""
589 """benchmark parsing bookmarks from disk to memory"""
590 opts = _byteskwargs(opts)
590 opts = _byteskwargs(opts)
591 timer, fm = gettimer(ui, opts)
591 timer, fm = gettimer(ui, opts)
592
592
593 clearrevlogs = opts[b'clear_revlogs']
593 clearrevlogs = opts[b'clear_revlogs']
594 def s():
594 def s():
595 if clearrevlogs:
595 if clearrevlogs:
596 clearchangelog(repo)
596 clearchangelog(repo)
597 clearfilecache(repo, b'_bookmarks')
597 clearfilecache(repo, b'_bookmarks')
598 def d():
598 def d():
599 repo._bookmarks
599 repo._bookmarks
600 timer(d, setup=s)
600 timer(d, setup=s)
601 fm.end()
601 fm.end()
602
602
603 @command(b'perfbundleread', formatteropts, b'BUNDLE')
603 @command(b'perfbundleread', formatteropts, b'BUNDLE')
604 def perfbundleread(ui, repo, bundlepath, **opts):
604 def perfbundleread(ui, repo, bundlepath, **opts):
605 """Benchmark reading of bundle files.
605 """Benchmark reading of bundle files.
606
606
607 This command is meant to isolate the I/O part of bundle reading as
607 This command is meant to isolate the I/O part of bundle reading as
608 much as possible.
608 much as possible.
609 """
609 """
610 from mercurial import (
610 from mercurial import (
611 bundle2,
611 bundle2,
612 exchange,
612 exchange,
613 streamclone,
613 streamclone,
614 )
614 )
615
615
616 opts = _byteskwargs(opts)
616 opts = _byteskwargs(opts)
617
617
618 def makebench(fn):
618 def makebench(fn):
619 def run():
619 def run():
620 with open(bundlepath, b'rb') as fh:
620 with open(bundlepath, b'rb') as fh:
621 bundle = exchange.readbundle(ui, fh, bundlepath)
621 bundle = exchange.readbundle(ui, fh, bundlepath)
622 fn(bundle)
622 fn(bundle)
623
623
624 return run
624 return run
625
625
626 def makereadnbytes(size):
626 def makereadnbytes(size):
627 def run():
627 def run():
628 with open(bundlepath, b'rb') as fh:
628 with open(bundlepath, b'rb') as fh:
629 bundle = exchange.readbundle(ui, fh, bundlepath)
629 bundle = exchange.readbundle(ui, fh, bundlepath)
630 while bundle.read(size):
630 while bundle.read(size):
631 pass
631 pass
632
632
633 return run
633 return run
634
634
635 def makestdioread(size):
635 def makestdioread(size):
636 def run():
636 def run():
637 with open(bundlepath, b'rb') as fh:
637 with open(bundlepath, b'rb') as fh:
638 while fh.read(size):
638 while fh.read(size):
639 pass
639 pass
640
640
641 return run
641 return run
642
642
643 # bundle1
643 # bundle1
644
644
645 def deltaiter(bundle):
645 def deltaiter(bundle):
646 for delta in bundle.deltaiter():
646 for delta in bundle.deltaiter():
647 pass
647 pass
648
648
649 def iterchunks(bundle):
649 def iterchunks(bundle):
650 for chunk in bundle.getchunks():
650 for chunk in bundle.getchunks():
651 pass
651 pass
652
652
653 # bundle2
653 # bundle2
654
654
655 def forwardchunks(bundle):
655 def forwardchunks(bundle):
656 for chunk in bundle._forwardchunks():
656 for chunk in bundle._forwardchunks():
657 pass
657 pass
658
658
659 def iterparts(bundle):
659 def iterparts(bundle):
660 for part in bundle.iterparts():
660 for part in bundle.iterparts():
661 pass
661 pass
662
662
663 def iterpartsseekable(bundle):
663 def iterpartsseekable(bundle):
664 for part in bundle.iterparts(seekable=True):
664 for part in bundle.iterparts(seekable=True):
665 pass
665 pass
666
666
667 def seek(bundle):
667 def seek(bundle):
668 for part in bundle.iterparts(seekable=True):
668 for part in bundle.iterparts(seekable=True):
669 part.seek(0, os.SEEK_END)
669 part.seek(0, os.SEEK_END)
670
670
671 def makepartreadnbytes(size):
671 def makepartreadnbytes(size):
672 def run():
672 def run():
673 with open(bundlepath, b'rb') as fh:
673 with open(bundlepath, b'rb') as fh:
674 bundle = exchange.readbundle(ui, fh, bundlepath)
674 bundle = exchange.readbundle(ui, fh, bundlepath)
675 for part in bundle.iterparts():
675 for part in bundle.iterparts():
676 while part.read(size):
676 while part.read(size):
677 pass
677 pass
678
678
679 return run
679 return run
680
680
681 benches = [
681 benches = [
682 (makestdioread(8192), b'read(8k)'),
682 (makestdioread(8192), b'read(8k)'),
683 (makestdioread(16384), b'read(16k)'),
683 (makestdioread(16384), b'read(16k)'),
684 (makestdioread(32768), b'read(32k)'),
684 (makestdioread(32768), b'read(32k)'),
685 (makestdioread(131072), b'read(128k)'),
685 (makestdioread(131072), b'read(128k)'),
686 ]
686 ]
687
687
688 with open(bundlepath, b'rb') as fh:
688 with open(bundlepath, b'rb') as fh:
689 bundle = exchange.readbundle(ui, fh, bundlepath)
689 bundle = exchange.readbundle(ui, fh, bundlepath)
690
690
691 if isinstance(bundle, changegroup.cg1unpacker):
691 if isinstance(bundle, changegroup.cg1unpacker):
692 benches.extend([
692 benches.extend([
693 (makebench(deltaiter), b'cg1 deltaiter()'),
693 (makebench(deltaiter), b'cg1 deltaiter()'),
694 (makebench(iterchunks), b'cg1 getchunks()'),
694 (makebench(iterchunks), b'cg1 getchunks()'),
695 (makereadnbytes(8192), b'cg1 read(8k)'),
695 (makereadnbytes(8192), b'cg1 read(8k)'),
696 (makereadnbytes(16384), b'cg1 read(16k)'),
696 (makereadnbytes(16384), b'cg1 read(16k)'),
697 (makereadnbytes(32768), b'cg1 read(32k)'),
697 (makereadnbytes(32768), b'cg1 read(32k)'),
698 (makereadnbytes(131072), b'cg1 read(128k)'),
698 (makereadnbytes(131072), b'cg1 read(128k)'),
699 ])
699 ])
700 elif isinstance(bundle, bundle2.unbundle20):
700 elif isinstance(bundle, bundle2.unbundle20):
701 benches.extend([
701 benches.extend([
702 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
702 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
703 (makebench(iterparts), b'bundle2 iterparts()'),
703 (makebench(iterparts), b'bundle2 iterparts()'),
704 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
704 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
705 (makebench(seek), b'bundle2 part seek()'),
705 (makebench(seek), b'bundle2 part seek()'),
706 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
706 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
707 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
707 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
708 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
708 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
709 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
709 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
710 ])
710 ])
711 elif isinstance(bundle, streamclone.streamcloneapplier):
711 elif isinstance(bundle, streamclone.streamcloneapplier):
712 raise error.Abort(b'stream clone bundles not supported')
712 raise error.Abort(b'stream clone bundles not supported')
713 else:
713 else:
714 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
714 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
715
715
716 for fn, title in benches:
716 for fn, title in benches:
717 timer, fm = gettimer(ui, opts)
717 timer, fm = gettimer(ui, opts)
718 timer(fn, title=title)
718 timer(fn, title=title)
719 fm.end()
719 fm.end()
720
720
721 @command(b'perfchangegroupchangelog', formatteropts +
721 @command(b'perfchangegroupchangelog', formatteropts +
722 [(b'', b'cgversion', b'02', b'changegroup version'),
722 [(b'', b'cgversion', b'02', b'changegroup version'),
723 (b'r', b'rev', b'', b'revisions to add to changegroup')])
723 (b'r', b'rev', b'', b'revisions to add to changegroup')])
724 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
724 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
725 """Benchmark producing a changelog group for a changegroup.
725 """Benchmark producing a changelog group for a changegroup.
726
726
727 This measures the time spent processing the changelog during a
727 This measures the time spent processing the changelog during a
728 bundle operation. This occurs during `hg bundle` and on a server
728 bundle operation. This occurs during `hg bundle` and on a server
729 processing a `getbundle` wire protocol request (handles clones
729 processing a `getbundle` wire protocol request (handles clones
730 and pull requests).
730 and pull requests).
731
731
732 By default, all revisions are added to the changegroup.
732 By default, all revisions are added to the changegroup.
733 """
733 """
734 opts = _byteskwargs(opts)
734 opts = _byteskwargs(opts)
735 cl = repo.changelog
735 cl = repo.changelog
736 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
736 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
737 bundler = changegroup.getbundler(cgversion, repo)
737 bundler = changegroup.getbundler(cgversion, repo)
738
738
739 def d():
739 def d():
740 state, chunks = bundler._generatechangelog(cl, nodes)
740 state, chunks = bundler._generatechangelog(cl, nodes)
741 for chunk in chunks:
741 for chunk in chunks:
742 pass
742 pass
743
743
744 timer, fm = gettimer(ui, opts)
744 timer, fm = gettimer(ui, opts)
745
745
746 # Terminal printing can interfere with timing. So disable it.
746 # Terminal printing can interfere with timing. So disable it.
747 with ui.configoverride({(b'progress', b'disable'): True}):
747 with ui.configoverride({(b'progress', b'disable'): True}):
748 timer(d)
748 timer(d)
749
749
750 fm.end()
750 fm.end()
751
751
752 @command(b'perfdirs', formatteropts)
752 @command(b'perfdirs', formatteropts)
753 def perfdirs(ui, repo, **opts):
753 def perfdirs(ui, repo, **opts):
754 opts = _byteskwargs(opts)
754 opts = _byteskwargs(opts)
755 timer, fm = gettimer(ui, opts)
755 timer, fm = gettimer(ui, opts)
756 dirstate = repo.dirstate
756 dirstate = repo.dirstate
757 b'a' in dirstate
757 b'a' in dirstate
758 def d():
758 def d():
759 dirstate.hasdir(b'a')
759 dirstate.hasdir(b'a')
760 del dirstate._map._dirs
760 del dirstate._map._dirs
761 timer(d)
761 timer(d)
762 fm.end()
762 fm.end()
763
763
764 @command(b'perfdirstate', formatteropts)
764 @command(b'perfdirstate', formatteropts)
765 def perfdirstate(ui, repo, **opts):
765 def perfdirstate(ui, repo, **opts):
766 opts = _byteskwargs(opts)
766 opts = _byteskwargs(opts)
767 timer, fm = gettimer(ui, opts)
767 timer, fm = gettimer(ui, opts)
768 b"a" in repo.dirstate
768 b"a" in repo.dirstate
769 def d():
769 def d():
770 repo.dirstate.invalidate()
770 repo.dirstate.invalidate()
771 b"a" in repo.dirstate
771 b"a" in repo.dirstate
772 timer(d)
772 timer(d)
773 fm.end()
773 fm.end()
774
774
775 @command(b'perfdirstatedirs', formatteropts)
775 @command(b'perfdirstatedirs', formatteropts)
776 def perfdirstatedirs(ui, repo, **opts):
776 def perfdirstatedirs(ui, repo, **opts):
777 opts = _byteskwargs(opts)
777 opts = _byteskwargs(opts)
778 timer, fm = gettimer(ui, opts)
778 timer, fm = gettimer(ui, opts)
779 b"a" in repo.dirstate
779 b"a" in repo.dirstate
780 def d():
780 def d():
781 repo.dirstate.hasdir(b"a")
781 repo.dirstate.hasdir(b"a")
782 del repo.dirstate._map._dirs
782 del repo.dirstate._map._dirs
783 timer(d)
783 timer(d)
784 fm.end()
784 fm.end()
785
785
786 @command(b'perfdirstatefoldmap', formatteropts)
786 @command(b'perfdirstatefoldmap', formatteropts)
787 def perfdirstatefoldmap(ui, repo, **opts):
787 def perfdirstatefoldmap(ui, repo, **opts):
788 opts = _byteskwargs(opts)
788 opts = _byteskwargs(opts)
789 timer, fm = gettimer(ui, opts)
789 timer, fm = gettimer(ui, opts)
790 dirstate = repo.dirstate
790 dirstate = repo.dirstate
791 b'a' in dirstate
791 b'a' in dirstate
792 def d():
792 def d():
793 dirstate._map.filefoldmap.get(b'a')
793 dirstate._map.filefoldmap.get(b'a')
794 del dirstate._map.filefoldmap
794 del dirstate._map.filefoldmap
795 timer(d)
795 timer(d)
796 fm.end()
796 fm.end()
797
797
798 @command(b'perfdirfoldmap', formatteropts)
798 @command(b'perfdirfoldmap', formatteropts)
799 def perfdirfoldmap(ui, repo, **opts):
799 def perfdirfoldmap(ui, repo, **opts):
800 opts = _byteskwargs(opts)
800 opts = _byteskwargs(opts)
801 timer, fm = gettimer(ui, opts)
801 timer, fm = gettimer(ui, opts)
802 dirstate = repo.dirstate
802 dirstate = repo.dirstate
803 b'a' in dirstate
803 b'a' in dirstate
804 def d():
804 def d():
805 dirstate._map.dirfoldmap.get(b'a')
805 dirstate._map.dirfoldmap.get(b'a')
806 del dirstate._map.dirfoldmap
806 del dirstate._map.dirfoldmap
807 del dirstate._map._dirs
807 del dirstate._map._dirs
808 timer(d)
808 timer(d)
809 fm.end()
809 fm.end()
810
810
811 @command(b'perfdirstatewrite', formatteropts)
811 @command(b'perfdirstatewrite', formatteropts)
812 def perfdirstatewrite(ui, repo, **opts):
812 def perfdirstatewrite(ui, repo, **opts):
813 opts = _byteskwargs(opts)
813 opts = _byteskwargs(opts)
814 timer, fm = gettimer(ui, opts)
814 timer, fm = gettimer(ui, opts)
815 ds = repo.dirstate
815 ds = repo.dirstate
816 b"a" in ds
816 b"a" in ds
817 def d():
817 def d():
818 ds._dirty = True
818 ds._dirty = True
819 ds.write(repo.currenttransaction())
819 ds.write(repo.currenttransaction())
820 timer(d)
820 timer(d)
821 fm.end()
821 fm.end()
822
822
823 @command(b'perfmergecalculate',
823 @command(b'perfmergecalculate',
824 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
824 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
825 def perfmergecalculate(ui, repo, rev, **opts):
825 def perfmergecalculate(ui, repo, rev, **opts):
826 opts = _byteskwargs(opts)
826 opts = _byteskwargs(opts)
827 timer, fm = gettimer(ui, opts)
827 timer, fm = gettimer(ui, opts)
828 wctx = repo[None]
828 wctx = repo[None]
829 rctx = scmutil.revsingle(repo, rev, rev)
829 rctx = scmutil.revsingle(repo, rev, rev)
830 ancestor = wctx.ancestor(rctx)
830 ancestor = wctx.ancestor(rctx)
831 # we don't want working dir files to be stat'd in the benchmark, so prime
831 # we don't want working dir files to be stat'd in the benchmark, so prime
832 # that cache
832 # that cache
833 wctx.dirty()
833 wctx.dirty()
834 def d():
834 def d():
835 # acceptremote is True because we don't want prompts in the middle of
835 # acceptremote is True because we don't want prompts in the middle of
836 # our benchmark
836 # our benchmark
837 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
837 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
838 acceptremote=True, followcopies=True)
838 acceptremote=True, followcopies=True)
839 timer(d)
839 timer(d)
840 fm.end()
840 fm.end()
841
841
842 @command(b'perfpathcopies', [], b"REV REV")
842 @command(b'perfpathcopies', [], b"REV REV")
843 def perfpathcopies(ui, repo, rev1, rev2, **opts):
843 def perfpathcopies(ui, repo, rev1, rev2, **opts):
844 """benchmark the copy tracing logic"""
844 """benchmark the copy tracing logic"""
845 opts = _byteskwargs(opts)
845 opts = _byteskwargs(opts)
846 timer, fm = gettimer(ui, opts)
846 timer, fm = gettimer(ui, opts)
847 ctx1 = scmutil.revsingle(repo, rev1, rev1)
847 ctx1 = scmutil.revsingle(repo, rev1, rev1)
848 ctx2 = scmutil.revsingle(repo, rev2, rev2)
848 ctx2 = scmutil.revsingle(repo, rev2, rev2)
849 def d():
849 def d():
850 copies.pathcopies(ctx1, ctx2)
850 copies.pathcopies(ctx1, ctx2)
851 timer(d)
851 timer(d)
852 fm.end()
852 fm.end()
853
853
854 @command(b'perfphases',
854 @command(b'perfphases',
855 [(b'', b'full', False, b'include file reading time too'),
855 [(b'', b'full', False, b'include file reading time too'),
856 ], b"")
856 ], b"")
857 def perfphases(ui, repo, **opts):
857 def perfphases(ui, repo, **opts):
858 """benchmark phasesets computation"""
858 """benchmark phasesets computation"""
859 opts = _byteskwargs(opts)
859 opts = _byteskwargs(opts)
860 timer, fm = gettimer(ui, opts)
860 timer, fm = gettimer(ui, opts)
861 _phases = repo._phasecache
861 _phases = repo._phasecache
862 full = opts.get(b'full')
862 full = opts.get(b'full')
863 def d():
863 def d():
864 phases = _phases
864 phases = _phases
865 if full:
865 if full:
866 clearfilecache(repo, b'_phasecache')
866 clearfilecache(repo, b'_phasecache')
867 phases = repo._phasecache
867 phases = repo._phasecache
868 phases.invalidate()
868 phases.invalidate()
869 phases.loadphaserevs(repo)
869 phases.loadphaserevs(repo)
870 timer(d)
870 timer(d)
871 fm.end()
871 fm.end()
872
872
873 @command(b'perfphasesremote',
873 @command(b'perfphasesremote',
874 [], b"[DEST]")
874 [], b"[DEST]")
875 def perfphasesremote(ui, repo, dest=None, **opts):
875 def perfphasesremote(ui, repo, dest=None, **opts):
876 """benchmark time needed to analyse phases of the remote server"""
876 """benchmark time needed to analyse phases of the remote server"""
877 from mercurial.node import (
877 from mercurial.node import (
878 bin,
878 bin,
879 )
879 )
880 from mercurial import (
880 from mercurial import (
881 exchange,
881 exchange,
882 hg,
882 hg,
883 phases,
883 phases,
884 )
884 )
885 opts = _byteskwargs(opts)
885 opts = _byteskwargs(opts)
886 timer, fm = gettimer(ui, opts)
886 timer, fm = gettimer(ui, opts)
887
887
888 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
888 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
889 if not path:
889 if not path:
890 raise error.Abort((b'default repository not configured!'),
890 raise error.Abort((b'default repository not configured!'),
891 hint=(b"see 'hg help config.paths'"))
891 hint=(b"see 'hg help config.paths'"))
892 dest = path.pushloc or path.loc
892 dest = path.pushloc or path.loc
893 branches = (path.branch, opts.get(b'branch') or [])
893 branches = (path.branch, opts.get(b'branch') or [])
894 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
894 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
895 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
895 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
896 other = hg.peer(repo, opts, dest)
896 other = hg.peer(repo, opts, dest)
897
897
898 # easier to perform discovery through the operation
898 # easier to perform discovery through the operation
899 op = exchange.pushoperation(repo, other)
899 op = exchange.pushoperation(repo, other)
900 exchange._pushdiscoverychangeset(op)
900 exchange._pushdiscoverychangeset(op)
901
901
902 remotesubset = op.fallbackheads
902 remotesubset = op.fallbackheads
903
903
904 with other.commandexecutor() as e:
904 with other.commandexecutor() as e:
905 remotephases = e.callcommand(b'listkeys',
905 remotephases = e.callcommand(b'listkeys',
906 {b'namespace': b'phases'}).result()
906 {b'namespace': b'phases'}).result()
907 del other
907 del other
908 publishing = remotephases.get(b'publishing', False)
908 publishing = remotephases.get(b'publishing', False)
909 if publishing:
909 if publishing:
910 ui.status((b'publishing: yes\n'))
910 ui.status((b'publishing: yes\n'))
911 else:
911 else:
912 ui.status((b'publishing: no\n'))
912 ui.status((b'publishing: no\n'))
913
913
914 nodemap = repo.changelog.nodemap
914 nodemap = repo.changelog.nodemap
915 nonpublishroots = 0
915 nonpublishroots = 0
916 for nhex, phase in remotephases.iteritems():
916 for nhex, phase in remotephases.iteritems():
917 if nhex == b'publishing': # ignore data related to publish option
917 if nhex == b'publishing': # ignore data related to publish option
918 continue
918 continue
919 node = bin(nhex)
919 node = bin(nhex)
920 if node in nodemap and int(phase):
920 if node in nodemap and int(phase):
921 nonpublishroots += 1
921 nonpublishroots += 1
922 ui.status((b'number of roots: %d\n') % len(remotephases))
922 ui.status((b'number of roots: %d\n') % len(remotephases))
923 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
923 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
924 def d():
924 def d():
925 phases.remotephasessummary(repo,
925 phases.remotephasessummary(repo,
926 remotesubset,
926 remotesubset,
927 remotephases)
927 remotephases)
928 timer(d)
928 timer(d)
929 fm.end()
929 fm.end()
930
930
931 @command(b'perfmanifest',[
931 @command(b'perfmanifest',[
932 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
932 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
933 (b'', b'clear-disk', False, b'clear on-disk caches too'),
933 (b'', b'clear-disk', False, b'clear on-disk caches too'),
934 ] + formatteropts, b'REV|NODE')
934 ] + formatteropts, b'REV|NODE')
935 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
935 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
936 """benchmark the time to read a manifest from disk and return a usable
936 """benchmark the time to read a manifest from disk and return a usable
937 dict-like object
937 dict-like object
938
938
939 Manifest caches are cleared before retrieval."""
939 Manifest caches are cleared before retrieval."""
940 opts = _byteskwargs(opts)
940 opts = _byteskwargs(opts)
941 timer, fm = gettimer(ui, opts)
941 timer, fm = gettimer(ui, opts)
942 if not manifest_rev:
942 if not manifest_rev:
943 ctx = scmutil.revsingle(repo, rev, rev)
943 ctx = scmutil.revsingle(repo, rev, rev)
944 t = ctx.manifestnode()
944 t = ctx.manifestnode()
945 else:
945 else:
946 from mercurial.node import bin
946 from mercurial.node import bin
947
947
948 if len(rev) == 40:
948 if len(rev) == 40:
949 t = bin(rev)
949 t = bin(rev)
950 else:
950 else:
951 try:
951 try:
952 rev = int(rev)
952 rev = int(rev)
953
953
954 if util.safehasattr(repo.manifestlog, b'getstorage'):
954 if util.safehasattr(repo.manifestlog, b'getstorage'):
955 t = repo.manifestlog.getstorage(b'').node(rev)
955 t = repo.manifestlog.getstorage(b'').node(rev)
956 else:
956 else:
957 t = repo.manifestlog._revlog.lookup(rev)
957 t = repo.manifestlog._revlog.lookup(rev)
958 except ValueError:
958 except ValueError:
959 raise error.Abort(b'manifest revision must be integer or full '
959 raise error.Abort(b'manifest revision must be integer or full '
960 b'node')
960 b'node')
961 def d():
961 def d():
962 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
962 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
963 repo.manifestlog[t].read()
963 repo.manifestlog[t].read()
964 timer(d)
964 timer(d)
965 fm.end()
965 fm.end()
966
966
967 @command(b'perfchangeset', formatteropts)
967 @command(b'perfchangeset', formatteropts)
968 def perfchangeset(ui, repo, rev, **opts):
968 def perfchangeset(ui, repo, rev, **opts):
969 opts = _byteskwargs(opts)
969 opts = _byteskwargs(opts)
970 timer, fm = gettimer(ui, opts)
970 timer, fm = gettimer(ui, opts)
971 n = scmutil.revsingle(repo, rev).node()
971 n = scmutil.revsingle(repo, rev).node()
972 def d():
972 def d():
973 repo.changelog.read(n)
973 repo.changelog.read(n)
974 #repo.changelog._cache = None
974 #repo.changelog._cache = None
975 timer(d)
975 timer(d)
976 fm.end()
976 fm.end()
977
977
978 @command(b'perfignore', formatteropts)
978 @command(b'perfignore', formatteropts)
979 def perfignore(ui, repo, **opts):
979 def perfignore(ui, repo, **opts):
980 """benchmark operation related to computing ignore"""
980 """benchmark operation related to computing ignore"""
981 opts = _byteskwargs(opts)
981 opts = _byteskwargs(opts)
982 timer, fm = gettimer(ui, opts)
982 timer, fm = gettimer(ui, opts)
983 dirstate = repo.dirstate
983 dirstate = repo.dirstate
984
984
985 def setupone():
985 def setupone():
986 dirstate.invalidate()
986 dirstate.invalidate()
987 clearfilecache(dirstate, b'_ignore')
987 clearfilecache(dirstate, b'_ignore')
988
988
989 def runone():
989 def runone():
990 dirstate._ignore
990 dirstate._ignore
991
991
992 timer(runone, setup=setupone, title=b"load")
992 timer(runone, setup=setupone, title=b"load")
993 fm.end()
993 fm.end()
994
994
995 @command(b'perfindex', formatteropts)
995 @command(b'perfindex', formatteropts)
996 def perfindex(ui, repo, **opts):
996 def perfindex(ui, repo, **opts):
997 import mercurial.revlog
997 import mercurial.revlog
998 opts = _byteskwargs(opts)
998 opts = _byteskwargs(opts)
999 timer, fm = gettimer(ui, opts)
999 timer, fm = gettimer(ui, opts)
1000 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1000 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1001 n = repo[b"tip"].node()
1001 n = repo[b"tip"].node()
1002 svfs = getsvfs(repo)
1002 svfs = getsvfs(repo)
1003 def d():
1003 def d():
1004 cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
1004 cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
1005 cl.rev(n)
1005 cl.rev(n)
1006 timer(d)
1006 timer(d)
1007 fm.end()
1007 fm.end()
1008
1008
1009 @command(b'perfstartup', formatteropts)
1009 @command(b'perfstartup', formatteropts)
1010 def perfstartup(ui, repo, **opts):
1010 def perfstartup(ui, repo, **opts):
1011 opts = _byteskwargs(opts)
1011 opts = _byteskwargs(opts)
1012 timer, fm = gettimer(ui, opts)
1012 timer, fm = gettimer(ui, opts)
1013 def d():
1013 def d():
1014 if os.name != r'nt':
1014 if os.name != r'nt':
1015 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1015 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1016 fsencode(sys.argv[0]))
1016 fsencode(sys.argv[0]))
1017 else:
1017 else:
1018 os.environ[r'HGRCPATH'] = r' '
1018 os.environ[r'HGRCPATH'] = r' '
1019 os.system(r"%s version -q > NUL" % sys.argv[0])
1019 os.system(r"%s version -q > NUL" % sys.argv[0])
1020 timer(d)
1020 timer(d)
1021 fm.end()
1021 fm.end()
1022
1022
1023 @command(b'perfparents', formatteropts)
1023 @command(b'perfparents', formatteropts)
1024 def perfparents(ui, repo, **opts):
1024 def perfparents(ui, repo, **opts):
1025 opts = _byteskwargs(opts)
1025 opts = _byteskwargs(opts)
1026 timer, fm = gettimer(ui, opts)
1026 timer, fm = gettimer(ui, opts)
1027 # control the number of commits perfparents iterates over
1027 # control the number of commits perfparents iterates over
1028 # experimental config: perf.parentscount
1028 # experimental config: perf.parentscount
1029 count = getint(ui, b"perf", b"parentscount", 1000)
1029 count = getint(ui, b"perf", b"parentscount", 1000)
1030 if len(repo.changelog) < count:
1030 if len(repo.changelog) < count:
1031 raise error.Abort(b"repo needs %d commits for this test" % count)
1031 raise error.Abort(b"repo needs %d commits for this test" % count)
1032 repo = repo.unfiltered()
1032 repo = repo.unfiltered()
1033 nl = [repo.changelog.node(i) for i in _xrange(count)]
1033 nl = [repo.changelog.node(i) for i in _xrange(count)]
1034 def d():
1034 def d():
1035 for n in nl:
1035 for n in nl:
1036 repo.changelog.parents(n)
1036 repo.changelog.parents(n)
1037 timer(d)
1037 timer(d)
1038 fm.end()
1038 fm.end()
1039
1039
1040 @command(b'perfctxfiles', formatteropts)
1040 @command(b'perfctxfiles', formatteropts)
1041 def perfctxfiles(ui, repo, x, **opts):
1041 def perfctxfiles(ui, repo, x, **opts):
1042 opts = _byteskwargs(opts)
1042 opts = _byteskwargs(opts)
1043 x = int(x)
1043 x = int(x)
1044 timer, fm = gettimer(ui, opts)
1044 timer, fm = gettimer(ui, opts)
1045 def d():
1045 def d():
1046 len(repo[x].files())
1046 len(repo[x].files())
1047 timer(d)
1047 timer(d)
1048 fm.end()
1048 fm.end()
1049
1049
1050 @command(b'perfrawfiles', formatteropts)
1050 @command(b'perfrawfiles', formatteropts)
1051 def perfrawfiles(ui, repo, x, **opts):
1051 def perfrawfiles(ui, repo, x, **opts):
1052 opts = _byteskwargs(opts)
1052 opts = _byteskwargs(opts)
1053 x = int(x)
1053 x = int(x)
1054 timer, fm = gettimer(ui, opts)
1054 timer, fm = gettimer(ui, opts)
1055 cl = repo.changelog
1055 cl = repo.changelog
1056 def d():
1056 def d():
1057 len(cl.read(x)[3])
1057 len(cl.read(x)[3])
1058 timer(d)
1058 timer(d)
1059 fm.end()
1059 fm.end()
1060
1060
1061 @command(b'perflookup', formatteropts)
1061 @command(b'perflookup', formatteropts)
1062 def perflookup(ui, repo, rev, **opts):
1062 def perflookup(ui, repo, rev, **opts):
1063 opts = _byteskwargs(opts)
1063 opts = _byteskwargs(opts)
1064 timer, fm = gettimer(ui, opts)
1064 timer, fm = gettimer(ui, opts)
1065 timer(lambda: len(repo.lookup(rev)))
1065 timer(lambda: len(repo.lookup(rev)))
1066 fm.end()
1066 fm.end()
1067
1067
1068 @command(b'perflinelogedits',
1068 @command(b'perflinelogedits',
1069 [(b'n', b'edits', 10000, b'number of edits'),
1069 [(b'n', b'edits', 10000, b'number of edits'),
1070 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1070 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1071 ], norepo=True)
1071 ], norepo=True)
1072 def perflinelogedits(ui, **opts):
1072 def perflinelogedits(ui, **opts):
1073 from mercurial import linelog
1073 from mercurial import linelog
1074
1074
1075 opts = _byteskwargs(opts)
1075 opts = _byteskwargs(opts)
1076
1076
1077 edits = opts[b'edits']
1077 edits = opts[b'edits']
1078 maxhunklines = opts[b'max_hunk_lines']
1078 maxhunklines = opts[b'max_hunk_lines']
1079
1079
1080 maxb1 = 100000
1080 maxb1 = 100000
1081 random.seed(0)
1081 random.seed(0)
1082 randint = random.randint
1082 randint = random.randint
1083 currentlines = 0
1083 currentlines = 0
1084 arglist = []
1084 arglist = []
1085 for rev in _xrange(edits):
1085 for rev in _xrange(edits):
1086 a1 = randint(0, currentlines)
1086 a1 = randint(0, currentlines)
1087 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1087 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1088 b1 = randint(0, maxb1)
1088 b1 = randint(0, maxb1)
1089 b2 = randint(b1, b1 + maxhunklines)
1089 b2 = randint(b1, b1 + maxhunklines)
1090 currentlines += (b2 - b1) - (a2 - a1)
1090 currentlines += (b2 - b1) - (a2 - a1)
1091 arglist.append((rev, a1, a2, b1, b2))
1091 arglist.append((rev, a1, a2, b1, b2))
1092
1092
1093 def d():
1093 def d():
1094 ll = linelog.linelog()
1094 ll = linelog.linelog()
1095 for args in arglist:
1095 for args in arglist:
1096 ll.replacelines(*args)
1096 ll.replacelines(*args)
1097
1097
1098 timer, fm = gettimer(ui, opts)
1098 timer, fm = gettimer(ui, opts)
1099 timer(d)
1099 timer(d)
1100 fm.end()
1100 fm.end()
1101
1101
1102 @command(b'perfrevrange', formatteropts)
1102 @command(b'perfrevrange', formatteropts)
1103 def perfrevrange(ui, repo, *specs, **opts):
1103 def perfrevrange(ui, repo, *specs, **opts):
1104 opts = _byteskwargs(opts)
1104 opts = _byteskwargs(opts)
1105 timer, fm = gettimer(ui, opts)
1105 timer, fm = gettimer(ui, opts)
1106 revrange = scmutil.revrange
1106 revrange = scmutil.revrange
1107 timer(lambda: len(revrange(repo, specs)))
1107 timer(lambda: len(revrange(repo, specs)))
1108 fm.end()
1108 fm.end()
1109
1109
1110 @command(b'perfnodelookup', formatteropts)
1110 @command(b'perfnodelookup', formatteropts)
1111 def perfnodelookup(ui, repo, rev, **opts):
1111 def perfnodelookup(ui, repo, rev, **opts):
1112 opts = _byteskwargs(opts)
1112 opts = _byteskwargs(opts)
1113 timer, fm = gettimer(ui, opts)
1113 timer, fm = gettimer(ui, opts)
1114 import mercurial.revlog
1114 import mercurial.revlog
1115 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1115 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1116 n = scmutil.revsingle(repo, rev).node()
1116 n = scmutil.revsingle(repo, rev).node()
1117 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1117 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1118 def d():
1118 def d():
1119 cl.rev(n)
1119 cl.rev(n)
1120 clearcaches(cl)
1120 clearcaches(cl)
1121 timer(d)
1121 timer(d)
1122 fm.end()
1122 fm.end()
1123
1123
1124 @command(b'perflog',
1124 @command(b'perflog',
1125 [(b'', b'rename', False, b'ask log to follow renames')
1125 [(b'', b'rename', False, b'ask log to follow renames')
1126 ] + formatteropts)
1126 ] + formatteropts)
1127 def perflog(ui, repo, rev=None, **opts):
1127 def perflog(ui, repo, rev=None, **opts):
1128 opts = _byteskwargs(opts)
1128 opts = _byteskwargs(opts)
1129 if rev is None:
1129 if rev is None:
1130 rev=[]
1130 rev=[]
1131 timer, fm = gettimer(ui, opts)
1131 timer, fm = gettimer(ui, opts)
1132 ui.pushbuffer()
1132 ui.pushbuffer()
1133 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1133 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1134 copies=opts.get(b'rename')))
1134 copies=opts.get(b'rename')))
1135 ui.popbuffer()
1135 ui.popbuffer()
1136 fm.end()
1136 fm.end()
1137
1137
1138 @command(b'perfmoonwalk', formatteropts)
1138 @command(b'perfmoonwalk', formatteropts)
1139 def perfmoonwalk(ui, repo, **opts):
1139 def perfmoonwalk(ui, repo, **opts):
1140 """benchmark walking the changelog backwards
1140 """benchmark walking the changelog backwards
1141
1141
1142 This also loads the changelog data for each revision in the changelog.
1142 This also loads the changelog data for each revision in the changelog.
1143 """
1143 """
1144 opts = _byteskwargs(opts)
1144 opts = _byteskwargs(opts)
1145 timer, fm = gettimer(ui, opts)
1145 timer, fm = gettimer(ui, opts)
1146 def moonwalk():
1146 def moonwalk():
1147 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1147 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1148 ctx = repo[i]
1148 ctx = repo[i]
1149 ctx.branch() # read changelog data (in addition to the index)
1149 ctx.branch() # read changelog data (in addition to the index)
1150 timer(moonwalk)
1150 timer(moonwalk)
1151 fm.end()
1151 fm.end()
1152
1152
1153 @command(b'perftemplating',
1153 @command(b'perftemplating',
1154 [(b'r', b'rev', [], b'revisions to run the template on'),
1154 [(b'r', b'rev', [], b'revisions to run the template on'),
1155 ] + formatteropts)
1155 ] + formatteropts)
1156 def perftemplating(ui, repo, testedtemplate=None, **opts):
1156 def perftemplating(ui, repo, testedtemplate=None, **opts):
1157 """test the rendering time of a given template"""
1157 """test the rendering time of a given template"""
1158 if makelogtemplater is None:
1158 if makelogtemplater is None:
1159 raise error.Abort((b"perftemplating not available with this Mercurial"),
1159 raise error.Abort((b"perftemplating not available with this Mercurial"),
1160 hint=b"use 4.3 or later")
1160 hint=b"use 4.3 or later")
1161
1161
1162 opts = _byteskwargs(opts)
1162 opts = _byteskwargs(opts)
1163
1163
1164 nullui = ui.copy()
1164 nullui = ui.copy()
1165 nullui.fout = open(os.devnull, r'wb')
1165 nullui.fout = open(os.devnull, r'wb')
1166 nullui.disablepager()
1166 nullui.disablepager()
1167 revs = opts.get(b'rev')
1167 revs = opts.get(b'rev')
1168 if not revs:
1168 if not revs:
1169 revs = [b'all()']
1169 revs = [b'all()']
1170 revs = list(scmutil.revrange(repo, revs))
1170 revs = list(scmutil.revrange(repo, revs))
1171
1171
1172 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1172 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1173 b' {author|person}: {desc|firstline}\n')
1173 b' {author|person}: {desc|firstline}\n')
1174 if testedtemplate is None:
1174 if testedtemplate is None:
1175 testedtemplate = defaulttemplate
1175 testedtemplate = defaulttemplate
1176 displayer = makelogtemplater(nullui, repo, testedtemplate)
1176 displayer = makelogtemplater(nullui, repo, testedtemplate)
1177 def format():
1177 def format():
1178 for r in revs:
1178 for r in revs:
1179 ctx = repo[r]
1179 ctx = repo[r]
1180 displayer.show(ctx)
1180 displayer.show(ctx)
1181 displayer.flush(ctx)
1181 displayer.flush(ctx)
1182
1182
1183 timer, fm = gettimer(ui, opts)
1183 timer, fm = gettimer(ui, opts)
1184 timer(format)
1184 timer(format)
1185 fm.end()
1185 fm.end()
1186
1186
1187 @command(b'perfhelper-pathcopies', formatteropts +
1187 @command(b'perfhelper-pathcopies', formatteropts +
1188 [
1188 [
1189 (b'r', b'revs', [], b'restrict search to these revisions'),
1189 (b'r', b'revs', [], b'restrict search to these revisions'),
1190 (b'', b'timing', False, b'provides extra data (costly)'),
1190 (b'', b'timing', False, b'provides extra data (costly)'),
1191 ])
1191 ])
1192 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1192 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1193 """find statistic about potential parameters for the `perftracecopies`
1193 """find statistic about potential parameters for the `perftracecopies`
1194
1194
1195 This command find source-destination pair relevant for copytracing testing.
1195 This command find source-destination pair relevant for copytracing testing.
1196 It report value for some of the parameters that impact copy tracing time.
1196 It report value for some of the parameters that impact copy tracing time.
1197
1197
1198 If `--timing` is set, rename detection is run and the associated timing
1198 If `--timing` is set, rename detection is run and the associated timing
1199 will be reported. The extra details comes at the cost of a slower command
1199 will be reported. The extra details comes at the cost of a slower command
1200 execution.
1200 execution.
1201
1201
1202 Since the rename detection is only run once, other factors might easily
1202 Since the rename detection is only run once, other factors might easily
1203 affect the precision of the timing. However it should give a good
1203 affect the precision of the timing. However it should give a good
1204 approximation of which revision pairs are very costly.
1204 approximation of which revision pairs are very costly.
1205 """
1205 """
1206 opts = _byteskwargs(opts)
1206 opts = _byteskwargs(opts)
1207 fm = ui.formatter(b'perf', opts)
1207 fm = ui.formatter(b'perf', opts)
1208 dotiming = opts[b'timing']
1208 dotiming = opts[b'timing']
1209
1209
1210 if dotiming:
1210 if dotiming:
1211 header = '%12s %12s %12s %12s %12s %12s\n'
1211 header = '%12s %12s %12s %12s %12s %12s\n'
1212 output = ("%(source)12s %(destination)12s "
1212 output = ("%(source)12s %(destination)12s "
1213 "%(nbrevs)12d %(nbmissingfiles)12d "
1213 "%(nbrevs)12d %(nbmissingfiles)12d "
1214 "%(nbrenamedfiles)12d %(time)18.5f\n")
1214 "%(nbrenamedfiles)12d %(time)18.5f\n")
1215 header_names = ("source", "destination", "nb-revs", "nb-files",
1215 header_names = ("source", "destination", "nb-revs", "nb-files",
1216 "nb-renames", "time")
1216 "nb-renames", "time")
1217 fm.plain(header % header_names)
1217 fm.plain(header % header_names)
1218 else:
1218 else:
1219 header = '%12s %12s %12s %12s\n'
1219 header = '%12s %12s %12s %12s\n'
1220 output = ("%(source)12s %(destination)12s "
1220 output = ("%(source)12s %(destination)12s "
1221 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1221 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1222 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1222 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1223
1223
1224 if not revs:
1224 if not revs:
1225 revs = ['all()']
1225 revs = ['all()']
1226 revs = scmutil.revrange(repo, revs)
1226 revs = scmutil.revrange(repo, revs)
1227
1227
1228 roi = repo.revs('merge() and %ld', revs)
1228 roi = repo.revs('merge() and %ld', revs)
1229 for r in roi:
1229 for r in roi:
1230 ctx = repo[r]
1230 ctx = repo[r]
1231 p1 = ctx.p1().rev()
1231 p1 = ctx.p1().rev()
1232 p2 = ctx.p2().rev()
1232 p2 = ctx.p2().rev()
1233 bases = repo.changelog._commonancestorsheads(p1, p2)
1233 bases = repo.changelog._commonancestorsheads(p1, p2)
1234 for p in (p1, p2):
1234 for p in (p1, p2):
1235 for b in bases:
1235 for b in bases:
1236 base = repo[b]
1236 base = repo[b]
1237 parent = repo[p]
1237 parent = repo[p]
1238 missing = copies._computeforwardmissing(base, parent)
1238 missing = copies._computeforwardmissing(base, parent)
1239 if not missing:
1239 if not missing:
1240 continue
1240 continue
1241 data = {
1241 data = {
1242 b'source': base.hex(),
1242 b'source': base.hex(),
1243 b'destination': parent.hex(),
1243 b'destination': parent.hex(),
1244 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1244 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1245 b'nbmissingfiles': len(missing),
1245 b'nbmissingfiles': len(missing),
1246 }
1246 }
1247 if dotiming:
1247 if dotiming:
1248 begin = util.timer()
1248 begin = util.timer()
1249 renames = copies.pathcopies(base, parent)
1249 renames = copies.pathcopies(base, parent)
1250 end = util.timer()
1250 end = util.timer()
1251 # not very stable timing since we did only one run
1251 # not very stable timing since we did only one run
1252 data['time'] = end - begin
1252 data['time'] = end - begin
1253 data['nbrenamedfiles'] = len(renames)
1253 data['nbrenamedfiles'] = len(renames)
1254 fm.startitem()
1254 fm.startitem()
1255 fm.data(**data)
1255 fm.data(**data)
1256 out = data.copy()
1256 out = data.copy()
1257 out['source'] = fm.hexfunc(base.node())
1257 out['source'] = fm.hexfunc(base.node())
1258 out['destination'] = fm.hexfunc(parent.node())
1258 out['destination'] = fm.hexfunc(parent.node())
1259 fm.plain(output % out)
1259 fm.plain(output % out)
1260
1260
1261 fm.end()
1261 fm.end()
1262
1262
1263 @command(b'perfcca', formatteropts)
1263 @command(b'perfcca', formatteropts)
1264 def perfcca(ui, repo, **opts):
1264 def perfcca(ui, repo, **opts):
1265 opts = _byteskwargs(opts)
1265 opts = _byteskwargs(opts)
1266 timer, fm = gettimer(ui, opts)
1266 timer, fm = gettimer(ui, opts)
1267 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1267 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1268 fm.end()
1268 fm.end()
1269
1269
1270 @command(b'perffncacheload', formatteropts)
1270 @command(b'perffncacheload', formatteropts)
1271 def perffncacheload(ui, repo, **opts):
1271 def perffncacheload(ui, repo, **opts):
1272 opts = _byteskwargs(opts)
1272 opts = _byteskwargs(opts)
1273 timer, fm = gettimer(ui, opts)
1273 timer, fm = gettimer(ui, opts)
1274 s = repo.store
1274 s = repo.store
1275 def d():
1275 def d():
1276 s.fncache._load()
1276 s.fncache._load()
1277 timer(d)
1277 timer(d)
1278 fm.end()
1278 fm.end()
1279
1279
1280 @command(b'perffncachewrite', formatteropts)
1280 @command(b'perffncachewrite', formatteropts)
1281 def perffncachewrite(ui, repo, **opts):
1281 def perffncachewrite(ui, repo, **opts):
1282 opts = _byteskwargs(opts)
1282 opts = _byteskwargs(opts)
1283 timer, fm = gettimer(ui, opts)
1283 timer, fm = gettimer(ui, opts)
1284 s = repo.store
1284 s = repo.store
1285 lock = repo.lock()
1285 lock = repo.lock()
1286 s.fncache._load()
1286 s.fncache._load()
1287 tr = repo.transaction(b'perffncachewrite')
1287 tr = repo.transaction(b'perffncachewrite')
1288 tr.addbackup(b'fncache')
1288 tr.addbackup(b'fncache')
1289 def d():
1289 def d():
1290 s.fncache._dirty = True
1290 s.fncache._dirty = True
1291 s.fncache.write(tr)
1291 s.fncache.write(tr)
1292 timer(d)
1292 timer(d)
1293 tr.close()
1293 tr.close()
1294 lock.release()
1294 lock.release()
1295 fm.end()
1295 fm.end()
1296
1296
1297 @command(b'perffncacheencode', formatteropts)
1297 @command(b'perffncacheencode', formatteropts)
1298 def perffncacheencode(ui, repo, **opts):
1298 def perffncacheencode(ui, repo, **opts):
1299 opts = _byteskwargs(opts)
1299 opts = _byteskwargs(opts)
1300 timer, fm = gettimer(ui, opts)
1300 timer, fm = gettimer(ui, opts)
1301 s = repo.store
1301 s = repo.store
1302 s.fncache._load()
1302 s.fncache._load()
1303 def d():
1303 def d():
1304 for p in s.fncache.entries:
1304 for p in s.fncache.entries:
1305 s.encode(p)
1305 s.encode(p)
1306 timer(d)
1306 timer(d)
1307 fm.end()
1307 fm.end()
1308
1308
1309 def _bdiffworker(q, blocks, xdiff, ready, done):
1309 def _bdiffworker(q, blocks, xdiff, ready, done):
1310 while not done.is_set():
1310 while not done.is_set():
1311 pair = q.get()
1311 pair = q.get()
1312 while pair is not None:
1312 while pair is not None:
1313 if xdiff:
1313 if xdiff:
1314 mdiff.bdiff.xdiffblocks(*pair)
1314 mdiff.bdiff.xdiffblocks(*pair)
1315 elif blocks:
1315 elif blocks:
1316 mdiff.bdiff.blocks(*pair)
1316 mdiff.bdiff.blocks(*pair)
1317 else:
1317 else:
1318 mdiff.textdiff(*pair)
1318 mdiff.textdiff(*pair)
1319 q.task_done()
1319 q.task_done()
1320 pair = q.get()
1320 pair = q.get()
1321 q.task_done() # for the None one
1321 q.task_done() # for the None one
1322 with ready:
1322 with ready:
1323 ready.wait()
1323 ready.wait()
1324
1324
1325 def _manifestrevision(repo, mnode):
1325 def _manifestrevision(repo, mnode):
1326 ml = repo.manifestlog
1326 ml = repo.manifestlog
1327
1327
1328 if util.safehasattr(ml, b'getstorage'):
1328 if util.safehasattr(ml, b'getstorage'):
1329 store = ml.getstorage(b'')
1329 store = ml.getstorage(b'')
1330 else:
1330 else:
1331 store = ml._revlog
1331 store = ml._revlog
1332
1332
1333 return store.revision(mnode)
1333 return store.revision(mnode)
1334
1334
1335 @command(b'perfbdiff', revlogopts + formatteropts + [
1335 @command(b'perfbdiff', revlogopts + formatteropts + [
1336 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1336 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1337 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1337 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1338 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1338 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1339 (b'', b'blocks', False, b'test computing diffs into blocks'),
1339 (b'', b'blocks', False, b'test computing diffs into blocks'),
1340 (b'', b'xdiff', False, b'use xdiff algorithm'),
1340 (b'', b'xdiff', False, b'use xdiff algorithm'),
1341 ],
1341 ],
1342
1342
1343 b'-c|-m|FILE REV')
1343 b'-c|-m|FILE REV')
1344 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1344 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1345 """benchmark a bdiff between revisions
1345 """benchmark a bdiff between revisions
1346
1346
1347 By default, benchmark a bdiff between its delta parent and itself.
1347 By default, benchmark a bdiff between its delta parent and itself.
1348
1348
1349 With ``--count``, benchmark bdiffs between delta parents and self for N
1349 With ``--count``, benchmark bdiffs between delta parents and self for N
1350 revisions starting at the specified revision.
1350 revisions starting at the specified revision.
1351
1351
1352 With ``--alldata``, assume the requested revision is a changeset and
1352 With ``--alldata``, assume the requested revision is a changeset and
1353 measure bdiffs for all changes related to that changeset (manifest
1353 measure bdiffs for all changes related to that changeset (manifest
1354 and filelogs).
1354 and filelogs).
1355 """
1355 """
1356 opts = _byteskwargs(opts)
1356 opts = _byteskwargs(opts)
1357
1357
1358 if opts[b'xdiff'] and not opts[b'blocks']:
1358 if opts[b'xdiff'] and not opts[b'blocks']:
1359 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1359 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1360
1360
1361 if opts[b'alldata']:
1361 if opts[b'alldata']:
1362 opts[b'changelog'] = True
1362 opts[b'changelog'] = True
1363
1363
1364 if opts.get(b'changelog') or opts.get(b'manifest'):
1364 if opts.get(b'changelog') or opts.get(b'manifest'):
1365 file_, rev = None, file_
1365 file_, rev = None, file_
1366 elif rev is None:
1366 elif rev is None:
1367 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1367 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1368
1368
1369 blocks = opts[b'blocks']
1369 blocks = opts[b'blocks']
1370 xdiff = opts[b'xdiff']
1370 xdiff = opts[b'xdiff']
1371 textpairs = []
1371 textpairs = []
1372
1372
1373 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1373 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1374
1374
1375 startrev = r.rev(r.lookup(rev))
1375 startrev = r.rev(r.lookup(rev))
1376 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1376 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1377 if opts[b'alldata']:
1377 if opts[b'alldata']:
1378 # Load revisions associated with changeset.
1378 # Load revisions associated with changeset.
1379 ctx = repo[rev]
1379 ctx = repo[rev]
1380 mtext = _manifestrevision(repo, ctx.manifestnode())
1380 mtext = _manifestrevision(repo, ctx.manifestnode())
1381 for pctx in ctx.parents():
1381 for pctx in ctx.parents():
1382 pman = _manifestrevision(repo, pctx.manifestnode())
1382 pman = _manifestrevision(repo, pctx.manifestnode())
1383 textpairs.append((pman, mtext))
1383 textpairs.append((pman, mtext))
1384
1384
1385 # Load filelog revisions by iterating manifest delta.
1385 # Load filelog revisions by iterating manifest delta.
1386 man = ctx.manifest()
1386 man = ctx.manifest()
1387 pman = ctx.p1().manifest()
1387 pman = ctx.p1().manifest()
1388 for filename, change in pman.diff(man).items():
1388 for filename, change in pman.diff(man).items():
1389 fctx = repo.file(filename)
1389 fctx = repo.file(filename)
1390 f1 = fctx.revision(change[0][0] or -1)
1390 f1 = fctx.revision(change[0][0] or -1)
1391 f2 = fctx.revision(change[1][0] or -1)
1391 f2 = fctx.revision(change[1][0] or -1)
1392 textpairs.append((f1, f2))
1392 textpairs.append((f1, f2))
1393 else:
1393 else:
1394 dp = r.deltaparent(rev)
1394 dp = r.deltaparent(rev)
1395 textpairs.append((r.revision(dp), r.revision(rev)))
1395 textpairs.append((r.revision(dp), r.revision(rev)))
1396
1396
1397 withthreads = threads > 0
1397 withthreads = threads > 0
1398 if not withthreads:
1398 if not withthreads:
1399 def d():
1399 def d():
1400 for pair in textpairs:
1400 for pair in textpairs:
1401 if xdiff:
1401 if xdiff:
1402 mdiff.bdiff.xdiffblocks(*pair)
1402 mdiff.bdiff.xdiffblocks(*pair)
1403 elif blocks:
1403 elif blocks:
1404 mdiff.bdiff.blocks(*pair)
1404 mdiff.bdiff.blocks(*pair)
1405 else:
1405 else:
1406 mdiff.textdiff(*pair)
1406 mdiff.textdiff(*pair)
1407 else:
1407 else:
1408 q = queue()
1408 q = queue()
1409 for i in _xrange(threads):
1409 for i in _xrange(threads):
1410 q.put(None)
1410 q.put(None)
1411 ready = threading.Condition()
1411 ready = threading.Condition()
1412 done = threading.Event()
1412 done = threading.Event()
1413 for i in _xrange(threads):
1413 for i in _xrange(threads):
1414 threading.Thread(target=_bdiffworker,
1414 threading.Thread(target=_bdiffworker,
1415 args=(q, blocks, xdiff, ready, done)).start()
1415 args=(q, blocks, xdiff, ready, done)).start()
1416 q.join()
1416 q.join()
1417 def d():
1417 def d():
1418 for pair in textpairs:
1418 for pair in textpairs:
1419 q.put(pair)
1419 q.put(pair)
1420 for i in _xrange(threads):
1420 for i in _xrange(threads):
1421 q.put(None)
1421 q.put(None)
1422 with ready:
1422 with ready:
1423 ready.notify_all()
1423 ready.notify_all()
1424 q.join()
1424 q.join()
1425 timer, fm = gettimer(ui, opts)
1425 timer, fm = gettimer(ui, opts)
1426 timer(d)
1426 timer(d)
1427 fm.end()
1427 fm.end()
1428
1428
1429 if withthreads:
1429 if withthreads:
1430 done.set()
1430 done.set()
1431 for i in _xrange(threads):
1431 for i in _xrange(threads):
1432 q.put(None)
1432 q.put(None)
1433 with ready:
1433 with ready:
1434 ready.notify_all()
1434 ready.notify_all()
1435
1435
1436 @command(b'perfunidiff', revlogopts + formatteropts + [
1436 @command(b'perfunidiff', revlogopts + formatteropts + [
1437 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1437 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1438 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1438 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1439 ], b'-c|-m|FILE REV')
1439 ], b'-c|-m|FILE REV')
1440 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1440 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1441 """benchmark a unified diff between revisions
1441 """benchmark a unified diff between revisions
1442
1442
1443 This doesn't include any copy tracing - it's just a unified diff
1443 This doesn't include any copy tracing - it's just a unified diff
1444 of the texts.
1444 of the texts.
1445
1445
1446 By default, benchmark a diff between its delta parent and itself.
1446 By default, benchmark a diff between its delta parent and itself.
1447
1447
1448 With ``--count``, benchmark diffs between delta parents and self for N
1448 With ``--count``, benchmark diffs between delta parents and self for N
1449 revisions starting at the specified revision.
1449 revisions starting at the specified revision.
1450
1450
1451 With ``--alldata``, assume the requested revision is a changeset and
1451 With ``--alldata``, assume the requested revision is a changeset and
1452 measure diffs for all changes related to that changeset (manifest
1452 measure diffs for all changes related to that changeset (manifest
1453 and filelogs).
1453 and filelogs).
1454 """
1454 """
1455 opts = _byteskwargs(opts)
1455 opts = _byteskwargs(opts)
1456 if opts[b'alldata']:
1456 if opts[b'alldata']:
1457 opts[b'changelog'] = True
1457 opts[b'changelog'] = True
1458
1458
1459 if opts.get(b'changelog') or opts.get(b'manifest'):
1459 if opts.get(b'changelog') or opts.get(b'manifest'):
1460 file_, rev = None, file_
1460 file_, rev = None, file_
1461 elif rev is None:
1461 elif rev is None:
1462 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1462 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1463
1463
1464 textpairs = []
1464 textpairs = []
1465
1465
1466 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1466 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1467
1467
1468 startrev = r.rev(r.lookup(rev))
1468 startrev = r.rev(r.lookup(rev))
1469 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1469 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1470 if opts[b'alldata']:
1470 if opts[b'alldata']:
1471 # Load revisions associated with changeset.
1471 # Load revisions associated with changeset.
1472 ctx = repo[rev]
1472 ctx = repo[rev]
1473 mtext = _manifestrevision(repo, ctx.manifestnode())
1473 mtext = _manifestrevision(repo, ctx.manifestnode())
1474 for pctx in ctx.parents():
1474 for pctx in ctx.parents():
1475 pman = _manifestrevision(repo, pctx.manifestnode())
1475 pman = _manifestrevision(repo, pctx.manifestnode())
1476 textpairs.append((pman, mtext))
1476 textpairs.append((pman, mtext))
1477
1477
1478 # Load filelog revisions by iterating manifest delta.
1478 # Load filelog revisions by iterating manifest delta.
1479 man = ctx.manifest()
1479 man = ctx.manifest()
1480 pman = ctx.p1().manifest()
1480 pman = ctx.p1().manifest()
1481 for filename, change in pman.diff(man).items():
1481 for filename, change in pman.diff(man).items():
1482 fctx = repo.file(filename)
1482 fctx = repo.file(filename)
1483 f1 = fctx.revision(change[0][0] or -1)
1483 f1 = fctx.revision(change[0][0] or -1)
1484 f2 = fctx.revision(change[1][0] or -1)
1484 f2 = fctx.revision(change[1][0] or -1)
1485 textpairs.append((f1, f2))
1485 textpairs.append((f1, f2))
1486 else:
1486 else:
1487 dp = r.deltaparent(rev)
1487 dp = r.deltaparent(rev)
1488 textpairs.append((r.revision(dp), r.revision(rev)))
1488 textpairs.append((r.revision(dp), r.revision(rev)))
1489
1489
1490 def d():
1490 def d():
1491 for left, right in textpairs:
1491 for left, right in textpairs:
1492 # The date strings don't matter, so we pass empty strings.
1492 # The date strings don't matter, so we pass empty strings.
1493 headerlines, hunks = mdiff.unidiff(
1493 headerlines, hunks = mdiff.unidiff(
1494 left, b'', right, b'', b'left', b'right', binary=False)
1494 left, b'', right, b'', b'left', b'right', binary=False)
1495 # consume iterators in roughly the way patch.py does
1495 # consume iterators in roughly the way patch.py does
1496 b'\n'.join(headerlines)
1496 b'\n'.join(headerlines)
1497 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1497 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1498 timer, fm = gettimer(ui, opts)
1498 timer, fm = gettimer(ui, opts)
1499 timer(d)
1499 timer(d)
1500 fm.end()
1500 fm.end()
1501
1501
1502 @command(b'perfdiffwd', formatteropts)
1502 @command(b'perfdiffwd', formatteropts)
1503 def perfdiffwd(ui, repo, **opts):
1503 def perfdiffwd(ui, repo, **opts):
1504 """Profile diff of working directory changes"""
1504 """Profile diff of working directory changes"""
1505 opts = _byteskwargs(opts)
1505 opts = _byteskwargs(opts)
1506 timer, fm = gettimer(ui, opts)
1506 timer, fm = gettimer(ui, opts)
1507 options = {
1507 options = {
1508 'w': 'ignore_all_space',
1508 'w': 'ignore_all_space',
1509 'b': 'ignore_space_change',
1509 'b': 'ignore_space_change',
1510 'B': 'ignore_blank_lines',
1510 'B': 'ignore_blank_lines',
1511 }
1511 }
1512
1512
1513 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1513 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1514 opts = dict((options[c], b'1') for c in diffopt)
1514 opts = dict((options[c], b'1') for c in diffopt)
1515 def d():
1515 def d():
1516 ui.pushbuffer()
1516 ui.pushbuffer()
1517 commands.diff(ui, repo, **opts)
1517 commands.diff(ui, repo, **opts)
1518 ui.popbuffer()
1518 ui.popbuffer()
1519 diffopt = diffopt.encode('ascii')
1519 diffopt = diffopt.encode('ascii')
1520 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1520 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1521 timer(d, title=title)
1521 timer(d, title=title)
1522 fm.end()
1522 fm.end()
1523
1523
1524 @command(b'perfrevlogindex', revlogopts + formatteropts,
1524 @command(b'perfrevlogindex', revlogopts + formatteropts,
1525 b'-c|-m|FILE')
1525 b'-c|-m|FILE')
1526 def perfrevlogindex(ui, repo, file_=None, **opts):
1526 def perfrevlogindex(ui, repo, file_=None, **opts):
1527 """Benchmark operations against a revlog index.
1527 """Benchmark operations against a revlog index.
1528
1528
1529 This tests constructing a revlog instance, reading index data,
1529 This tests constructing a revlog instance, reading index data,
1530 parsing index data, and performing various operations related to
1530 parsing index data, and performing various operations related to
1531 index data.
1531 index data.
1532 """
1532 """
1533
1533
1534 opts = _byteskwargs(opts)
1534 opts = _byteskwargs(opts)
1535
1535
1536 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1536 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1537
1537
1538 opener = getattr(rl, 'opener') # trick linter
1538 opener = getattr(rl, 'opener') # trick linter
1539 indexfile = rl.indexfile
1539 indexfile = rl.indexfile
1540 data = opener.read(indexfile)
1540 data = opener.read(indexfile)
1541
1541
1542 header = struct.unpack(b'>I', data[0:4])[0]
1542 header = struct.unpack(b'>I', data[0:4])[0]
1543 version = header & 0xFFFF
1543 version = header & 0xFFFF
1544 if version == 1:
1544 if version == 1:
1545 revlogio = revlog.revlogio()
1545 revlogio = revlog.revlogio()
1546 inline = header & (1 << 16)
1546 inline = header & (1 << 16)
1547 else:
1547 else:
1548 raise error.Abort((b'unsupported revlog version: %d') % version)
1548 raise error.Abort((b'unsupported revlog version: %d') % version)
1549
1549
1550 rllen = len(rl)
1550 rllen = len(rl)
1551
1551
1552 node0 = rl.node(0)
1552 node0 = rl.node(0)
1553 node25 = rl.node(rllen // 4)
1553 node25 = rl.node(rllen // 4)
1554 node50 = rl.node(rllen // 2)
1554 node50 = rl.node(rllen // 2)
1555 node75 = rl.node(rllen // 4 * 3)
1555 node75 = rl.node(rllen // 4 * 3)
1556 node100 = rl.node(rllen - 1)
1556 node100 = rl.node(rllen - 1)
1557
1557
1558 allrevs = range(rllen)
1558 allrevs = range(rllen)
1559 allrevsrev = list(reversed(allrevs))
1559 allrevsrev = list(reversed(allrevs))
1560 allnodes = [rl.node(rev) for rev in range(rllen)]
1560 allnodes = [rl.node(rev) for rev in range(rllen)]
1561 allnodesrev = list(reversed(allnodes))
1561 allnodesrev = list(reversed(allnodes))
1562
1562
1563 def constructor():
1563 def constructor():
1564 revlog.revlog(opener, indexfile)
1564 revlog.revlog(opener, indexfile)
1565
1565
1566 def read():
1566 def read():
1567 with opener(indexfile) as fh:
1567 with opener(indexfile) as fh:
1568 fh.read()
1568 fh.read()
1569
1569
1570 def parseindex():
1570 def parseindex():
1571 revlogio.parseindex(data, inline)
1571 revlogio.parseindex(data, inline)
1572
1572
1573 def getentry(revornode):
1573 def getentry(revornode):
1574 index = revlogio.parseindex(data, inline)[0]
1574 index = revlogio.parseindex(data, inline)[0]
1575 index[revornode]
1575 index[revornode]
1576
1576
1577 def getentries(revs, count=1):
1577 def getentries(revs, count=1):
1578 index = revlogio.parseindex(data, inline)[0]
1578 index = revlogio.parseindex(data, inline)[0]
1579
1579
1580 for i in range(count):
1580 for i in range(count):
1581 for rev in revs:
1581 for rev in revs:
1582 index[rev]
1582 index[rev]
1583
1583
1584 def resolvenode(node):
1584 def resolvenode(node):
1585 nodemap = revlogio.parseindex(data, inline)[1]
1585 nodemap = revlogio.parseindex(data, inline)[1]
1586 # This only works for the C code.
1586 # This only works for the C code.
1587 if nodemap is None:
1587 if nodemap is None:
1588 return
1588 return
1589
1589
1590 try:
1590 try:
1591 nodemap[node]
1591 nodemap[node]
1592 except error.RevlogError:
1592 except error.RevlogError:
1593 pass
1593 pass
1594
1594
1595 def resolvenodes(nodes, count=1):
1595 def resolvenodes(nodes, count=1):
1596 nodemap = revlogio.parseindex(data, inline)[1]
1596 nodemap = revlogio.parseindex(data, inline)[1]
1597 if nodemap is None:
1597 if nodemap is None:
1598 return
1598 return
1599
1599
1600 for i in range(count):
1600 for i in range(count):
1601 for node in nodes:
1601 for node in nodes:
1602 try:
1602 try:
1603 nodemap[node]
1603 nodemap[node]
1604 except error.RevlogError:
1604 except error.RevlogError:
1605 pass
1605 pass
1606
1606
1607 benches = [
1607 benches = [
1608 (constructor, b'revlog constructor'),
1608 (constructor, b'revlog constructor'),
1609 (read, b'read'),
1609 (read, b'read'),
1610 (parseindex, b'create index object'),
1610 (parseindex, b'create index object'),
1611 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1611 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1612 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1612 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1613 (lambda: resolvenode(node0), b'look up node at rev 0'),
1613 (lambda: resolvenode(node0), b'look up node at rev 0'),
1614 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1614 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1615 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1615 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1616 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1616 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1617 (lambda: resolvenode(node100), b'look up node at tip'),
1617 (lambda: resolvenode(node100), b'look up node at tip'),
1618 # 2x variation is to measure caching impact.
1618 # 2x variation is to measure caching impact.
1619 (lambda: resolvenodes(allnodes),
1619 (lambda: resolvenodes(allnodes),
1620 b'look up all nodes (forward)'),
1620 b'look up all nodes (forward)'),
1621 (lambda: resolvenodes(allnodes, 2),
1621 (lambda: resolvenodes(allnodes, 2),
1622 b'look up all nodes 2x (forward)'),
1622 b'look up all nodes 2x (forward)'),
1623 (lambda: resolvenodes(allnodesrev),
1623 (lambda: resolvenodes(allnodesrev),
1624 b'look up all nodes (reverse)'),
1624 b'look up all nodes (reverse)'),
1625 (lambda: resolvenodes(allnodesrev, 2),
1625 (lambda: resolvenodes(allnodesrev, 2),
1626 b'look up all nodes 2x (reverse)'),
1626 b'look up all nodes 2x (reverse)'),
1627 (lambda: getentries(allrevs),
1627 (lambda: getentries(allrevs),
1628 b'retrieve all index entries (forward)'),
1628 b'retrieve all index entries (forward)'),
1629 (lambda: getentries(allrevs, 2),
1629 (lambda: getentries(allrevs, 2),
1630 b'retrieve all index entries 2x (forward)'),
1630 b'retrieve all index entries 2x (forward)'),
1631 (lambda: getentries(allrevsrev),
1631 (lambda: getentries(allrevsrev),
1632 b'retrieve all index entries (reverse)'),
1632 b'retrieve all index entries (reverse)'),
1633 (lambda: getentries(allrevsrev, 2),
1633 (lambda: getentries(allrevsrev, 2),
1634 b'retrieve all index entries 2x (reverse)'),
1634 b'retrieve all index entries 2x (reverse)'),
1635 ]
1635 ]
1636
1636
1637 for fn, title in benches:
1637 for fn, title in benches:
1638 timer, fm = gettimer(ui, opts)
1638 timer, fm = gettimer(ui, opts)
1639 timer(fn, title=title)
1639 timer(fn, title=title)
1640 fm.end()
1640 fm.end()
1641
1641
1642 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1642 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1643 [(b'd', b'dist', 100, b'distance between the revisions'),
1643 [(b'd', b'dist', 100, b'distance between the revisions'),
1644 (b's', b'startrev', 0, b'revision to start reading at'),
1644 (b's', b'startrev', 0, b'revision to start reading at'),
1645 (b'', b'reverse', False, b'read in reverse')],
1645 (b'', b'reverse', False, b'read in reverse')],
1646 b'-c|-m|FILE')
1646 b'-c|-m|FILE')
1647 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1647 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1648 **opts):
1648 **opts):
1649 """Benchmark reading a series of revisions from a revlog.
1649 """Benchmark reading a series of revisions from a revlog.
1650
1650
1651 By default, we read every ``-d/--dist`` revision from 0 to tip of
1651 By default, we read every ``-d/--dist`` revision from 0 to tip of
1652 the specified revlog.
1652 the specified revlog.
1653
1653
1654 The start revision can be defined via ``-s/--startrev``.
1654 The start revision can be defined via ``-s/--startrev``.
1655 """
1655 """
1656 opts = _byteskwargs(opts)
1656 opts = _byteskwargs(opts)
1657
1657
1658 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1658 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1659 rllen = getlen(ui)(rl)
1659 rllen = getlen(ui)(rl)
1660
1660
1661 if startrev < 0:
1661 if startrev < 0:
1662 startrev = rllen + startrev
1662 startrev = rllen + startrev
1663
1663
1664 def d():
1664 def d():
1665 rl.clearcaches()
1665 rl.clearcaches()
1666
1666
1667 beginrev = startrev
1667 beginrev = startrev
1668 endrev = rllen
1668 endrev = rllen
1669 dist = opts[b'dist']
1669 dist = opts[b'dist']
1670
1670
1671 if reverse:
1671 if reverse:
1672 beginrev, endrev = endrev - 1, beginrev - 1
1672 beginrev, endrev = endrev - 1, beginrev - 1
1673 dist = -1 * dist
1673 dist = -1 * dist
1674
1674
1675 for x in _xrange(beginrev, endrev, dist):
1675 for x in _xrange(beginrev, endrev, dist):
1676 # Old revisions don't support passing int.
1676 # Old revisions don't support passing int.
1677 n = rl.node(x)
1677 n = rl.node(x)
1678 rl.revision(n)
1678 rl.revision(n)
1679
1679
1680 timer, fm = gettimer(ui, opts)
1680 timer, fm = gettimer(ui, opts)
1681 timer(d)
1681 timer(d)
1682 fm.end()
1682 fm.end()
1683
1683
1684 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1684 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1685 [(b's', b'startrev', 1000, b'revision to start writing at'),
1685 [(b's', b'startrev', 1000, b'revision to start writing at'),
1686 (b'', b'stoprev', -1, b'last revision to write'),
1686 (b'', b'stoprev', -1, b'last revision to write'),
1687 (b'', b'count', 3, b'last revision to write'),
1687 (b'', b'count', 3, b'last revision to write'),
1688 (b'', b'details', False, b'print timing for every revisions tested'),
1688 (b'', b'details', False, b'print timing for every revisions tested'),
1689 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1689 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1690 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1690 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1691 ],
1691 ],
1692 b'-c|-m|FILE')
1692 b'-c|-m|FILE')
1693 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1693 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1694 """Benchmark writing a series of revisions to a revlog.
1694 """Benchmark writing a series of revisions to a revlog.
1695
1695
1696 Possible source values are:
1696 Possible source values are:
1697 * `full`: add from a full text (default).
1697 * `full`: add from a full text (default).
1698 * `parent-1`: add from a delta to the first parent
1698 * `parent-1`: add from a delta to the first parent
1699 * `parent-2`: add from a delta to the second parent if it exists
1699 * `parent-2`: add from a delta to the second parent if it exists
1700 (use a delta from the first parent otherwise)
1700 (use a delta from the first parent otherwise)
1701 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1701 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1702 * `storage`: add from the existing precomputed deltas
1702 * `storage`: add from the existing precomputed deltas
1703 """
1703 """
1704 opts = _byteskwargs(opts)
1704 opts = _byteskwargs(opts)
1705
1705
1706 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1706 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1707 rllen = getlen(ui)(rl)
1707 rllen = getlen(ui)(rl)
1708 if startrev < 0:
1708 if startrev < 0:
1709 startrev = rllen + startrev
1709 startrev = rllen + startrev
1710 if stoprev < 0:
1710 if stoprev < 0:
1711 stoprev = rllen + stoprev
1711 stoprev = rllen + stoprev
1712
1712
1713 lazydeltabase = opts['lazydeltabase']
1713 lazydeltabase = opts['lazydeltabase']
1714 source = opts['source']
1714 source = opts['source']
1715 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1715 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1716 b'storage')
1716 b'storage')
1717 if source not in validsource:
1717 if source not in validsource:
1718 raise error.Abort('invalid source type: %s' % source)
1718 raise error.Abort('invalid source type: %s' % source)
1719
1719
1720 ### actually gather results
1720 ### actually gather results
1721 count = opts['count']
1721 count = opts['count']
1722 if count <= 0:
1722 if count <= 0:
1723 raise error.Abort('invalide run count: %d' % count)
1723 raise error.Abort('invalide run count: %d' % count)
1724 allresults = []
1724 allresults = []
1725 for c in range(count):
1725 for c in range(count):
1726 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1726 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1727 lazydeltabase=lazydeltabase)
1727 lazydeltabase=lazydeltabase)
1728 allresults.append(timing)
1728 allresults.append(timing)
1729
1729
1730 ### consolidate the results in a single list
1730 ### consolidate the results in a single list
1731 results = []
1731 results = []
1732 for idx, (rev, t) in enumerate(allresults[0]):
1732 for idx, (rev, t) in enumerate(allresults[0]):
1733 ts = [t]
1733 ts = [t]
1734 for other in allresults[1:]:
1734 for other in allresults[1:]:
1735 orev, ot = other[idx]
1735 orev, ot = other[idx]
1736 assert orev == rev
1736 assert orev == rev
1737 ts.append(ot)
1737 ts.append(ot)
1738 results.append((rev, ts))
1738 results.append((rev, ts))
1739 resultcount = len(results)
1739 resultcount = len(results)
1740
1740
1741 ### Compute and display relevant statistics
1741 ### Compute and display relevant statistics
1742
1742
1743 # get a formatter
1743 # get a formatter
1744 fm = ui.formatter(b'perf', opts)
1744 fm = ui.formatter(b'perf', opts)
1745 displayall = ui.configbool(b"perf", b"all-timing", False)
1745 displayall = ui.configbool(b"perf", b"all-timing", False)
1746
1746
1747 # print individual details if requested
1747 # print individual details if requested
1748 if opts['details']:
1748 if opts['details']:
1749 for idx, item in enumerate(results, 1):
1749 for idx, item in enumerate(results, 1):
1750 rev, data = item
1750 rev, data = item
1751 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1751 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1752 formatone(fm, data, title=title, displayall=displayall)
1752 formatone(fm, data, title=title, displayall=displayall)
1753
1753
1754 # sorts results by median time
1754 # sorts results by median time
1755 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1755 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1756 # list of (name, index) to display)
1756 # list of (name, index) to display)
1757 relevants = [
1757 relevants = [
1758 ("min", 0),
1758 ("min", 0),
1759 ("10%", resultcount * 10 // 100),
1759 ("10%", resultcount * 10 // 100),
1760 ("25%", resultcount * 25 // 100),
1760 ("25%", resultcount * 25 // 100),
1761 ("50%", resultcount * 70 // 100),
1761 ("50%", resultcount * 70 // 100),
1762 ("75%", resultcount * 75 // 100),
1762 ("75%", resultcount * 75 // 100),
1763 ("90%", resultcount * 90 // 100),
1763 ("90%", resultcount * 90 // 100),
1764 ("95%", resultcount * 95 // 100),
1764 ("95%", resultcount * 95 // 100),
1765 ("99%", resultcount * 99 // 100),
1765 ("99%", resultcount * 99 // 100),
1766 ("max", -1),
1766 ("max", -1),
1767 ]
1767 ]
1768 if not ui.quiet:
1768 if not ui.quiet:
1769 for name, idx in relevants:
1769 for name, idx in relevants:
1770 data = results[idx]
1770 data = results[idx]
1771 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1771 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1772 formatone(fm, data[1], title=title, displayall=displayall)
1772 formatone(fm, data[1], title=title, displayall=displayall)
1773
1773
1774 # XXX summing that many float will not be very precise, we ignore this fact
1774 # XXX summing that many float will not be very precise, we ignore this fact
1775 # for now
1775 # for now
1776 totaltime = []
1776 totaltime = []
1777 for item in allresults:
1777 for item in allresults:
1778 totaltime.append((sum(x[1][0] for x in item),
1778 totaltime.append((sum(x[1][0] for x in item),
1779 sum(x[1][1] for x in item),
1779 sum(x[1][1] for x in item),
1780 sum(x[1][2] for x in item),)
1780 sum(x[1][2] for x in item),)
1781 )
1781 )
1782 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1782 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1783 displayall=displayall)
1783 displayall=displayall)
1784 fm.end()
1784 fm.end()
1785
1785
1786 class _faketr(object):
1786 class _faketr(object):
1787 def add(s, x, y, z=None):
1787 def add(s, x, y, z=None):
1788 return None
1788 return None
1789
1789
1790 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1790 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1791 lazydeltabase=True):
1791 lazydeltabase=True):
1792 timings = []
1792 timings = []
1793 tr = _faketr()
1793 tr = _faketr()
1794 with _temprevlog(ui, orig, startrev) as dest:
1794 with _temprevlog(ui, orig, startrev) as dest:
1795 dest._lazydeltabase = lazydeltabase
1795 dest._lazydeltabase = lazydeltabase
1796 revs = list(orig.revs(startrev, stoprev))
1796 revs = list(orig.revs(startrev, stoprev))
1797 total = len(revs)
1797 total = len(revs)
1798 topic = 'adding'
1798 topic = 'adding'
1799 if runidx is not None:
1799 if runidx is not None:
1800 topic += ' (run #%d)' % runidx
1800 topic += ' (run #%d)' % runidx
1801 for idx, rev in enumerate(revs):
1801 for idx, rev in enumerate(revs):
1802 ui.progress(topic, idx, unit='revs', total=total)
1802 ui.progress(topic, idx, unit='revs', total=total)
1803 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1803 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1804 with timeone() as r:
1804 with timeone() as r:
1805 dest.addrawrevision(*addargs, **addkwargs)
1805 dest.addrawrevision(*addargs, **addkwargs)
1806 timings.append((rev, r[0]))
1806 timings.append((rev, r[0]))
1807 ui.progress(topic, total, unit='revs', total=total)
1807 ui.progress(topic, total, unit='revs', total=total)
1808 ui.progress(topic, None, unit='revs', total=total)
1808 ui.progress(topic, None, unit='revs', total=total)
1809 return timings
1809 return timings
1810
1810
1811 def _getrevisionseed(orig, rev, tr, source):
1811 def _getrevisionseed(orig, rev, tr, source):
1812 from mercurial.node import nullid
1812 from mercurial.node import nullid
1813
1813
1814 linkrev = orig.linkrev(rev)
1814 linkrev = orig.linkrev(rev)
1815 node = orig.node(rev)
1815 node = orig.node(rev)
1816 p1, p2 = orig.parents(node)
1816 p1, p2 = orig.parents(node)
1817 flags = orig.flags(rev)
1817 flags = orig.flags(rev)
1818 cachedelta = None
1818 cachedelta = None
1819 text = None
1819 text = None
1820
1820
1821 if source == b'full':
1821 if source == b'full':
1822 text = orig.revision(rev)
1822 text = orig.revision(rev)
1823 elif source == b'parent-1':
1823 elif source == b'parent-1':
1824 baserev = orig.rev(p1)
1824 baserev = orig.rev(p1)
1825 cachedelta = (baserev, orig.revdiff(p1, rev))
1825 cachedelta = (baserev, orig.revdiff(p1, rev))
1826 elif source == b'parent-2':
1826 elif source == b'parent-2':
1827 parent = p2
1827 parent = p2
1828 if p2 == nullid:
1828 if p2 == nullid:
1829 parent = p1
1829 parent = p1
1830 baserev = orig.rev(parent)
1830 baserev = orig.rev(parent)
1831 cachedelta = (baserev, orig.revdiff(parent, rev))
1831 cachedelta = (baserev, orig.revdiff(parent, rev))
1832 elif source == b'parent-smallest':
1832 elif source == b'parent-smallest':
1833 p1diff = orig.revdiff(p1, rev)
1833 p1diff = orig.revdiff(p1, rev)
1834 parent = p1
1834 parent = p1
1835 diff = p1diff
1835 diff = p1diff
1836 if p2 != nullid:
1836 if p2 != nullid:
1837 p2diff = orig.revdiff(p2, rev)
1837 p2diff = orig.revdiff(p2, rev)
1838 if len(p1diff) > len(p2diff):
1838 if len(p1diff) > len(p2diff):
1839 parent = p2
1839 parent = p2
1840 diff = p2diff
1840 diff = p2diff
1841 baserev = orig.rev(parent)
1841 baserev = orig.rev(parent)
1842 cachedelta = (baserev, diff)
1842 cachedelta = (baserev, diff)
1843 elif source == b'storage':
1843 elif source == b'storage':
1844 baserev = orig.deltaparent(rev)
1844 baserev = orig.deltaparent(rev)
1845 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1845 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1846
1846
1847 return ((text, tr, linkrev, p1, p2),
1847 return ((text, tr, linkrev, p1, p2),
1848 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1848 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1849
1849
1850 @contextlib.contextmanager
1850 @contextlib.contextmanager
1851 def _temprevlog(ui, orig, truncaterev):
1851 def _temprevlog(ui, orig, truncaterev):
1852 from mercurial import vfs as vfsmod
1852 from mercurial import vfs as vfsmod
1853
1853
1854 if orig._inline:
1854 if orig._inline:
1855 raise error.Abort('not supporting inline revlog (yet)')
1855 raise error.Abort('not supporting inline revlog (yet)')
1856
1856
1857 origindexpath = orig.opener.join(orig.indexfile)
1857 origindexpath = orig.opener.join(orig.indexfile)
1858 origdatapath = orig.opener.join(orig.datafile)
1858 origdatapath = orig.opener.join(orig.datafile)
1859 indexname = 'revlog.i'
1859 indexname = 'revlog.i'
1860 dataname = 'revlog.d'
1860 dataname = 'revlog.d'
1861
1861
1862 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1862 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1863 try:
1863 try:
1864 # copy the data file in a temporary directory
1864 # copy the data file in a temporary directory
1865 ui.debug('copying data in %s\n' % tmpdir)
1865 ui.debug('copying data in %s\n' % tmpdir)
1866 destindexpath = os.path.join(tmpdir, 'revlog.i')
1866 destindexpath = os.path.join(tmpdir, 'revlog.i')
1867 destdatapath = os.path.join(tmpdir, 'revlog.d')
1867 destdatapath = os.path.join(tmpdir, 'revlog.d')
1868 shutil.copyfile(origindexpath, destindexpath)
1868 shutil.copyfile(origindexpath, destindexpath)
1869 shutil.copyfile(origdatapath, destdatapath)
1869 shutil.copyfile(origdatapath, destdatapath)
1870
1870
1871 # remove the data we want to add again
1871 # remove the data we want to add again
1872 ui.debug('truncating data to be rewritten\n')
1872 ui.debug('truncating data to be rewritten\n')
1873 with open(destindexpath, 'ab') as index:
1873 with open(destindexpath, 'ab') as index:
1874 index.seek(0)
1874 index.seek(0)
1875 index.truncate(truncaterev * orig._io.size)
1875 index.truncate(truncaterev * orig._io.size)
1876 with open(destdatapath, 'ab') as data:
1876 with open(destdatapath, 'ab') as data:
1877 data.seek(0)
1877 data.seek(0)
1878 data.truncate(orig.start(truncaterev))
1878 data.truncate(orig.start(truncaterev))
1879
1879
1880 # instantiate a new revlog from the temporary copy
1880 # instantiate a new revlog from the temporary copy
1881 ui.debug('truncating adding to be rewritten\n')
1881 ui.debug('truncating adding to be rewritten\n')
1882 vfs = vfsmod.vfs(tmpdir)
1882 vfs = vfsmod.vfs(tmpdir)
1883 vfs.options = getattr(orig.opener, 'options', None)
1883 vfs.options = getattr(orig.opener, 'options', None)
1884
1884
1885 dest = revlog.revlog(vfs,
1885 dest = revlog.revlog(vfs,
1886 indexfile=indexname,
1886 indexfile=indexname,
1887 datafile=dataname)
1887 datafile=dataname)
1888 if dest._inline:
1888 if dest._inline:
1889 raise error.Abort('not supporting inline revlog (yet)')
1889 raise error.Abort('not supporting inline revlog (yet)')
1890 # make sure internals are initialized
1890 # make sure internals are initialized
1891 dest.revision(len(dest) - 1)
1891 dest.revision(len(dest) - 1)
1892 yield dest
1892 yield dest
1893 del dest, vfs
1893 del dest, vfs
1894 finally:
1894 finally:
1895 shutil.rmtree(tmpdir, True)
1895 shutil.rmtree(tmpdir, True)
1896
1896
1897 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1897 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1898 [(b'e', b'engines', b'', b'compression engines to use'),
1898 [(b'e', b'engines', b'', b'compression engines to use'),
1899 (b's', b'startrev', 0, b'revision to start at')],
1899 (b's', b'startrev', 0, b'revision to start at')],
1900 b'-c|-m|FILE')
1900 b'-c|-m|FILE')
1901 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1901 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1902 """Benchmark operations on revlog chunks.
1902 """Benchmark operations on revlog chunks.
1903
1903
1904 Logically, each revlog is a collection of fulltext revisions. However,
1904 Logically, each revlog is a collection of fulltext revisions. However,
1905 stored within each revlog are "chunks" of possibly compressed data. This
1905 stored within each revlog are "chunks" of possibly compressed data. This
1906 data needs to be read and decompressed or compressed and written.
1906 data needs to be read and decompressed or compressed and written.
1907
1907
1908 This command measures the time it takes to read+decompress and recompress
1908 This command measures the time it takes to read+decompress and recompress
1909 chunks in a revlog. It effectively isolates I/O and compression performance.
1909 chunks in a revlog. It effectively isolates I/O and compression performance.
1910 For measurements of higher-level operations like resolving revisions,
1910 For measurements of higher-level operations like resolving revisions,
1911 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1911 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1912 """
1912 """
1913 opts = _byteskwargs(opts)
1913 opts = _byteskwargs(opts)
1914
1914
1915 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1915 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1916
1916
1917 # _chunkraw was renamed to _getsegmentforrevs.
1917 # _chunkraw was renamed to _getsegmentforrevs.
1918 try:
1918 try:
1919 segmentforrevs = rl._getsegmentforrevs
1919 segmentforrevs = rl._getsegmentforrevs
1920 except AttributeError:
1920 except AttributeError:
1921 segmentforrevs = rl._chunkraw
1921 segmentforrevs = rl._chunkraw
1922
1922
1923 # Verify engines argument.
1923 # Verify engines argument.
1924 if engines:
1924 if engines:
1925 engines = set(e.strip() for e in engines.split(b','))
1925 engines = set(e.strip() for e in engines.split(b','))
1926 for engine in engines:
1926 for engine in engines:
1927 try:
1927 try:
1928 util.compressionengines[engine]
1928 util.compressionengines[engine]
1929 except KeyError:
1929 except KeyError:
1930 raise error.Abort(b'unknown compression engine: %s' % engine)
1930 raise error.Abort(b'unknown compression engine: %s' % engine)
1931 else:
1931 else:
1932 engines = []
1932 engines = []
1933 for e in util.compengines:
1933 for e in util.compengines:
1934 engine = util.compengines[e]
1934 engine = util.compengines[e]
1935 try:
1935 try:
1936 if engine.available():
1936 if engine.available():
1937 engine.revlogcompressor().compress(b'dummy')
1937 engine.revlogcompressor().compress(b'dummy')
1938 engines.append(e)
1938 engines.append(e)
1939 except NotImplementedError:
1939 except NotImplementedError:
1940 pass
1940 pass
1941
1941
1942 revs = list(rl.revs(startrev, len(rl) - 1))
1942 revs = list(rl.revs(startrev, len(rl) - 1))
1943
1943
1944 def rlfh(rl):
1944 def rlfh(rl):
1945 if rl._inline:
1945 if rl._inline:
1946 return getsvfs(repo)(rl.indexfile)
1946 return getsvfs(repo)(rl.indexfile)
1947 else:
1947 else:
1948 return getsvfs(repo)(rl.datafile)
1948 return getsvfs(repo)(rl.datafile)
1949
1949
1950 def doread():
1950 def doread():
1951 rl.clearcaches()
1951 rl.clearcaches()
1952 for rev in revs:
1952 for rev in revs:
1953 segmentforrevs(rev, rev)
1953 segmentforrevs(rev, rev)
1954
1954
1955 def doreadcachedfh():
1955 def doreadcachedfh():
1956 rl.clearcaches()
1956 rl.clearcaches()
1957 fh = rlfh(rl)
1957 fh = rlfh(rl)
1958 for rev in revs:
1958 for rev in revs:
1959 segmentforrevs(rev, rev, df=fh)
1959 segmentforrevs(rev, rev, df=fh)
1960
1960
1961 def doreadbatch():
1961 def doreadbatch():
1962 rl.clearcaches()
1962 rl.clearcaches()
1963 segmentforrevs(revs[0], revs[-1])
1963 segmentforrevs(revs[0], revs[-1])
1964
1964
1965 def doreadbatchcachedfh():
1965 def doreadbatchcachedfh():
1966 rl.clearcaches()
1966 rl.clearcaches()
1967 fh = rlfh(rl)
1967 fh = rlfh(rl)
1968 segmentforrevs(revs[0], revs[-1], df=fh)
1968 segmentforrevs(revs[0], revs[-1], df=fh)
1969
1969
1970 def dochunk():
1970 def dochunk():
1971 rl.clearcaches()
1971 rl.clearcaches()
1972 fh = rlfh(rl)
1972 fh = rlfh(rl)
1973 for rev in revs:
1973 for rev in revs:
1974 rl._chunk(rev, df=fh)
1974 rl._chunk(rev, df=fh)
1975
1975
1976 chunks = [None]
1976 chunks = [None]
1977
1977
1978 def dochunkbatch():
1978 def dochunkbatch():
1979 rl.clearcaches()
1979 rl.clearcaches()
1980 fh = rlfh(rl)
1980 fh = rlfh(rl)
1981 # Save chunks as a side-effect.
1981 # Save chunks as a side-effect.
1982 chunks[0] = rl._chunks(revs, df=fh)
1982 chunks[0] = rl._chunks(revs, df=fh)
1983
1983
1984 def docompress(compressor):
1984 def docompress(compressor):
1985 rl.clearcaches()
1985 rl.clearcaches()
1986
1986
1987 try:
1987 try:
1988 # Swap in the requested compression engine.
1988 # Swap in the requested compression engine.
1989 oldcompressor = rl._compressor
1989 oldcompressor = rl._compressor
1990 rl._compressor = compressor
1990 rl._compressor = compressor
1991 for chunk in chunks[0]:
1991 for chunk in chunks[0]:
1992 rl.compress(chunk)
1992 rl.compress(chunk)
1993 finally:
1993 finally:
1994 rl._compressor = oldcompressor
1994 rl._compressor = oldcompressor
1995
1995
1996 benches = [
1996 benches = [
1997 (lambda: doread(), b'read'),
1997 (lambda: doread(), b'read'),
1998 (lambda: doreadcachedfh(), b'read w/ reused fd'),
1998 (lambda: doreadcachedfh(), b'read w/ reused fd'),
1999 (lambda: doreadbatch(), b'read batch'),
1999 (lambda: doreadbatch(), b'read batch'),
2000 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2000 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2001 (lambda: dochunk(), b'chunk'),
2001 (lambda: dochunk(), b'chunk'),
2002 (lambda: dochunkbatch(), b'chunk batch'),
2002 (lambda: dochunkbatch(), b'chunk batch'),
2003 ]
2003 ]
2004
2004
2005 for engine in sorted(engines):
2005 for engine in sorted(engines):
2006 compressor = util.compengines[engine].revlogcompressor()
2006 compressor = util.compengines[engine].revlogcompressor()
2007 benches.append((functools.partial(docompress, compressor),
2007 benches.append((functools.partial(docompress, compressor),
2008 b'compress w/ %s' % engine))
2008 b'compress w/ %s' % engine))
2009
2009
2010 for fn, title in benches:
2010 for fn, title in benches:
2011 timer, fm = gettimer(ui, opts)
2011 timer, fm = gettimer(ui, opts)
2012 timer(fn, title=title)
2012 timer(fn, title=title)
2013 fm.end()
2013 fm.end()
2014
2014
2015 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2015 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2016 [(b'', b'cache', False, b'use caches instead of clearing')],
2016 [(b'', b'cache', False, b'use caches instead of clearing')],
2017 b'-c|-m|FILE REV')
2017 b'-c|-m|FILE REV')
2018 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2018 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2019 """Benchmark obtaining a revlog revision.
2019 """Benchmark obtaining a revlog revision.
2020
2020
2021 Obtaining a revlog revision consists of roughly the following steps:
2021 Obtaining a revlog revision consists of roughly the following steps:
2022
2022
2023 1. Compute the delta chain
2023 1. Compute the delta chain
2024 2. Slice the delta chain if applicable
2024 2. Slice the delta chain if applicable
2025 3. Obtain the raw chunks for that delta chain
2025 3. Obtain the raw chunks for that delta chain
2026 4. Decompress each raw chunk
2026 4. Decompress each raw chunk
2027 5. Apply binary patches to obtain fulltext
2027 5. Apply binary patches to obtain fulltext
2028 6. Verify hash of fulltext
2028 6. Verify hash of fulltext
2029
2029
2030 This command measures the time spent in each of these phases.
2030 This command measures the time spent in each of these phases.
2031 """
2031 """
2032 opts = _byteskwargs(opts)
2032 opts = _byteskwargs(opts)
2033
2033
2034 if opts.get(b'changelog') or opts.get(b'manifest'):
2034 if opts.get(b'changelog') or opts.get(b'manifest'):
2035 file_, rev = None, file_
2035 file_, rev = None, file_
2036 elif rev is None:
2036 elif rev is None:
2037 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2037 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2038
2038
2039 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2039 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2040
2040
2041 # _chunkraw was renamed to _getsegmentforrevs.
2041 # _chunkraw was renamed to _getsegmentforrevs.
2042 try:
2042 try:
2043 segmentforrevs = r._getsegmentforrevs
2043 segmentforrevs = r._getsegmentforrevs
2044 except AttributeError:
2044 except AttributeError:
2045 segmentforrevs = r._chunkraw
2045 segmentforrevs = r._chunkraw
2046
2046
2047 node = r.lookup(rev)
2047 node = r.lookup(rev)
2048 rev = r.rev(node)
2048 rev = r.rev(node)
2049
2049
2050 def getrawchunks(data, chain):
2050 def getrawchunks(data, chain):
2051 start = r.start
2051 start = r.start
2052 length = r.length
2052 length = r.length
2053 inline = r._inline
2053 inline = r._inline
2054 iosize = r._io.size
2054 iosize = r._io.size
2055 buffer = util.buffer
2055 buffer = util.buffer
2056
2056
2057 chunks = []
2057 chunks = []
2058 ladd = chunks.append
2058 ladd = chunks.append
2059 for idx, item in enumerate(chain):
2059 for idx, item in enumerate(chain):
2060 offset = start(item[0])
2060 offset = start(item[0])
2061 bits = data[idx]
2061 bits = data[idx]
2062 for rev in item:
2062 for rev in item:
2063 chunkstart = start(rev)
2063 chunkstart = start(rev)
2064 if inline:
2064 if inline:
2065 chunkstart += (rev + 1) * iosize
2065 chunkstart += (rev + 1) * iosize
2066 chunklength = length(rev)
2066 chunklength = length(rev)
2067 ladd(buffer(bits, chunkstart - offset, chunklength))
2067 ladd(buffer(bits, chunkstart - offset, chunklength))
2068
2068
2069 return chunks
2069 return chunks
2070
2070
2071 def dodeltachain(rev):
2071 def dodeltachain(rev):
2072 if not cache:
2072 if not cache:
2073 r.clearcaches()
2073 r.clearcaches()
2074 r._deltachain(rev)
2074 r._deltachain(rev)
2075
2075
2076 def doread(chain):
2076 def doread(chain):
2077 if not cache:
2077 if not cache:
2078 r.clearcaches()
2078 r.clearcaches()
2079 for item in slicedchain:
2079 for item in slicedchain:
2080 segmentforrevs(item[0], item[-1])
2080 segmentforrevs(item[0], item[-1])
2081
2081
2082 def doslice(r, chain, size):
2082 def doslice(r, chain, size):
2083 for s in slicechunk(r, chain, targetsize=size):
2083 for s in slicechunk(r, chain, targetsize=size):
2084 pass
2084 pass
2085
2085
2086 def dorawchunks(data, chain):
2086 def dorawchunks(data, chain):
2087 if not cache:
2087 if not cache:
2088 r.clearcaches()
2088 r.clearcaches()
2089 getrawchunks(data, chain)
2089 getrawchunks(data, chain)
2090
2090
2091 def dodecompress(chunks):
2091 def dodecompress(chunks):
2092 decomp = r.decompress
2092 decomp = r.decompress
2093 for chunk in chunks:
2093 for chunk in chunks:
2094 decomp(chunk)
2094 decomp(chunk)
2095
2095
2096 def dopatch(text, bins):
2096 def dopatch(text, bins):
2097 if not cache:
2097 if not cache:
2098 r.clearcaches()
2098 r.clearcaches()
2099 mdiff.patches(text, bins)
2099 mdiff.patches(text, bins)
2100
2100
2101 def dohash(text):
2101 def dohash(text):
2102 if not cache:
2102 if not cache:
2103 r.clearcaches()
2103 r.clearcaches()
2104 r.checkhash(text, node, rev=rev)
2104 r.checkhash(text, node, rev=rev)
2105
2105
2106 def dorevision():
2106 def dorevision():
2107 if not cache:
2107 if not cache:
2108 r.clearcaches()
2108 r.clearcaches()
2109 r.revision(node)
2109 r.revision(node)
2110
2110
2111 try:
2111 try:
2112 from mercurial.revlogutils.deltas import slicechunk
2112 from mercurial.revlogutils.deltas import slicechunk
2113 except ImportError:
2113 except ImportError:
2114 slicechunk = getattr(revlog, '_slicechunk', None)
2114 slicechunk = getattr(revlog, '_slicechunk', None)
2115
2115
2116 size = r.length(rev)
2116 size = r.length(rev)
2117 chain = r._deltachain(rev)[0]
2117 chain = r._deltachain(rev)[0]
2118 if not getattr(r, '_withsparseread', False):
2118 if not getattr(r, '_withsparseread', False):
2119 slicedchain = (chain,)
2119 slicedchain = (chain,)
2120 else:
2120 else:
2121 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2121 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2122 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2122 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2123 rawchunks = getrawchunks(data, slicedchain)
2123 rawchunks = getrawchunks(data, slicedchain)
2124 bins = r._chunks(chain)
2124 bins = r._chunks(chain)
2125 text = bytes(bins[0])
2125 text = bytes(bins[0])
2126 bins = bins[1:]
2126 bins = bins[1:]
2127 text = mdiff.patches(text, bins)
2127 text = mdiff.patches(text, bins)
2128
2128
2129 benches = [
2129 benches = [
2130 (lambda: dorevision(), b'full'),
2130 (lambda: dorevision(), b'full'),
2131 (lambda: dodeltachain(rev), b'deltachain'),
2131 (lambda: dodeltachain(rev), b'deltachain'),
2132 (lambda: doread(chain), b'read'),
2132 (lambda: doread(chain), b'read'),
2133 ]
2133 ]
2134
2134
2135 if getattr(r, '_withsparseread', False):
2135 if getattr(r, '_withsparseread', False):
2136 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2136 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2137 benches.append(slicing)
2137 benches.append(slicing)
2138
2138
2139 benches.extend([
2139 benches.extend([
2140 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2140 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2141 (lambda: dodecompress(rawchunks), b'decompress'),
2141 (lambda: dodecompress(rawchunks), b'decompress'),
2142 (lambda: dopatch(text, bins), b'patch'),
2142 (lambda: dopatch(text, bins), b'patch'),
2143 (lambda: dohash(text), b'hash'),
2143 (lambda: dohash(text), b'hash'),
2144 ])
2144 ])
2145
2145
2146 timer, fm = gettimer(ui, opts)
2146 timer, fm = gettimer(ui, opts)
2147 for fn, title in benches:
2147 for fn, title in benches:
2148 timer(fn, title=title)
2148 timer(fn, title=title)
2149 fm.end()
2149 fm.end()
2150
2150
2151 @command(b'perfrevset',
2151 @command(b'perfrevset',
2152 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2152 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2153 (b'', b'contexts', False, b'obtain changectx for each revision')]
2153 (b'', b'contexts', False, b'obtain changectx for each revision')]
2154 + formatteropts, b"REVSET")
2154 + formatteropts, b"REVSET")
2155 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2155 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2156 """benchmark the execution time of a revset
2156 """benchmark the execution time of a revset
2157
2157
2158 Use the --clean option if need to evaluate the impact of build volatile
2158 Use the --clean option if need to evaluate the impact of build volatile
2159 revisions set cache on the revset execution. Volatile cache hold filtered
2159 revisions set cache on the revset execution. Volatile cache hold filtered
2160 and obsolete related cache."""
2160 and obsolete related cache."""
2161 opts = _byteskwargs(opts)
2161 opts = _byteskwargs(opts)
2162
2162
2163 timer, fm = gettimer(ui, opts)
2163 timer, fm = gettimer(ui, opts)
2164 def d():
2164 def d():
2165 if clear:
2165 if clear:
2166 repo.invalidatevolatilesets()
2166 repo.invalidatevolatilesets()
2167 if contexts:
2167 if contexts:
2168 for ctx in repo.set(expr): pass
2168 for ctx in repo.set(expr): pass
2169 else:
2169 else:
2170 for r in repo.revs(expr): pass
2170 for r in repo.revs(expr): pass
2171 timer(d)
2171 timer(d)
2172 fm.end()
2172 fm.end()
2173
2173
2174 @command(b'perfvolatilesets',
2174 @command(b'perfvolatilesets',
2175 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2175 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2176 ] + formatteropts)
2176 ] + formatteropts)
2177 def perfvolatilesets(ui, repo, *names, **opts):
2177 def perfvolatilesets(ui, repo, *names, **opts):
2178 """benchmark the computation of various volatile set
2178 """benchmark the computation of various volatile set
2179
2179
2180 Volatile set computes element related to filtering and obsolescence."""
2180 Volatile set computes element related to filtering and obsolescence."""
2181 opts = _byteskwargs(opts)
2181 opts = _byteskwargs(opts)
2182 timer, fm = gettimer(ui, opts)
2182 timer, fm = gettimer(ui, opts)
2183 repo = repo.unfiltered()
2183 repo = repo.unfiltered()
2184
2184
2185 def getobs(name):
2185 def getobs(name):
2186 def d():
2186 def d():
2187 repo.invalidatevolatilesets()
2187 repo.invalidatevolatilesets()
2188 if opts[b'clear_obsstore']:
2188 if opts[b'clear_obsstore']:
2189 clearfilecache(repo, b'obsstore')
2189 clearfilecache(repo, b'obsstore')
2190 obsolete.getrevs(repo, name)
2190 obsolete.getrevs(repo, name)
2191 return d
2191 return d
2192
2192
2193 allobs = sorted(obsolete.cachefuncs)
2193 allobs = sorted(obsolete.cachefuncs)
2194 if names:
2194 if names:
2195 allobs = [n for n in allobs if n in names]
2195 allobs = [n for n in allobs if n in names]
2196
2196
2197 for name in allobs:
2197 for name in allobs:
2198 timer(getobs(name), title=name)
2198 timer(getobs(name), title=name)
2199
2199
2200 def getfiltered(name):
2200 def getfiltered(name):
2201 def d():
2201 def d():
2202 repo.invalidatevolatilesets()
2202 repo.invalidatevolatilesets()
2203 if opts[b'clear_obsstore']:
2203 if opts[b'clear_obsstore']:
2204 clearfilecache(repo, b'obsstore')
2204 clearfilecache(repo, b'obsstore')
2205 repoview.filterrevs(repo, name)
2205 repoview.filterrevs(repo, name)
2206 return d
2206 return d
2207
2207
2208 allfilter = sorted(repoview.filtertable)
2208 allfilter = sorted(repoview.filtertable)
2209 if names:
2209 if names:
2210 allfilter = [n for n in allfilter if n in names]
2210 allfilter = [n for n in allfilter if n in names]
2211
2211
2212 for name in allfilter:
2212 for name in allfilter:
2213 timer(getfiltered(name), title=name)
2213 timer(getfiltered(name), title=name)
2214 fm.end()
2214 fm.end()
2215
2215
2216 @command(b'perfbranchmap',
2216 @command(b'perfbranchmap',
2217 [(b'f', b'full', False,
2217 [(b'f', b'full', False,
2218 b'Includes build time of subset'),
2218 b'Includes build time of subset'),
2219 (b'', b'clear-revbranch', False,
2219 (b'', b'clear-revbranch', False,
2220 b'purge the revbranch cache between computation'),
2220 b'purge the revbranch cache between computation'),
2221 ] + formatteropts)
2221 ] + formatteropts)
2222 def perfbranchmap(ui, repo, *filternames, **opts):
2222 def perfbranchmap(ui, repo, *filternames, **opts):
2223 """benchmark the update of a branchmap
2223 """benchmark the update of a branchmap
2224
2224
2225 This benchmarks the full repo.branchmap() call with read and write disabled
2225 This benchmarks the full repo.branchmap() call with read and write disabled
2226 """
2226 """
2227 opts = _byteskwargs(opts)
2227 opts = _byteskwargs(opts)
2228 full = opts.get(b"full", False)
2228 full = opts.get(b"full", False)
2229 clear_revbranch = opts.get(b"clear_revbranch", False)
2229 clear_revbranch = opts.get(b"clear_revbranch", False)
2230 timer, fm = gettimer(ui, opts)
2230 timer, fm = gettimer(ui, opts)
2231 def getbranchmap(filtername):
2231 def getbranchmap(filtername):
2232 """generate a benchmark function for the filtername"""
2232 """generate a benchmark function for the filtername"""
2233 if filtername is None:
2233 if filtername is None:
2234 view = repo
2234 view = repo
2235 else:
2235 else:
2236 view = repo.filtered(filtername)
2236 view = repo.filtered(filtername)
2237 def d():
2237 def d():
2238 if clear_revbranch:
2238 if clear_revbranch:
2239 repo.revbranchcache()._clear()
2239 repo.revbranchcache()._clear()
2240 if full:
2240 if full:
2241 view._branchcaches.clear()
2241 view._branchcaches.clear()
2242 else:
2242 else:
2243 view._branchcaches.pop(filtername, None)
2243 view._branchcaches.pop(filtername, None)
2244 view.branchmap()
2244 view.branchmap()
2245 return d
2245 return d
2246 # add filter in smaller subset to bigger subset
2246 # add filter in smaller subset to bigger subset
2247 possiblefilters = set(repoview.filtertable)
2247 possiblefilters = set(repoview.filtertable)
2248 if filternames:
2248 if filternames:
2249 possiblefilters &= set(filternames)
2249 possiblefilters &= set(filternames)
2250 subsettable = getbranchmapsubsettable()
2250 subsettable = getbranchmapsubsettable()
2251 allfilters = []
2251 allfilters = []
2252 while possiblefilters:
2252 while possiblefilters:
2253 for name in possiblefilters:
2253 for name in possiblefilters:
2254 subset = subsettable.get(name)
2254 subset = subsettable.get(name)
2255 if subset not in possiblefilters:
2255 if subset not in possiblefilters:
2256 break
2256 break
2257 else:
2257 else:
2258 assert False, b'subset cycle %s!' % possiblefilters
2258 assert False, b'subset cycle %s!' % possiblefilters
2259 allfilters.append(name)
2259 allfilters.append(name)
2260 possiblefilters.remove(name)
2260 possiblefilters.remove(name)
2261
2261
2262 # warm the cache
2262 # warm the cache
2263 if not full:
2263 if not full:
2264 for name in allfilters:
2264 for name in allfilters:
2265 repo.filtered(name).branchmap()
2265 repo.filtered(name).branchmap()
2266 if not filternames or b'unfiltered' in filternames:
2266 if not filternames or b'unfiltered' in filternames:
2267 # add unfiltered
2267 # add unfiltered
2268 allfilters.append(None)
2268 allfilters.append(None)
2269
2269
2270 branchcacheread = safeattrsetter(branchmap, b'read')
2270 branchcacheread = safeattrsetter(branchmap, b'read')
2271 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2271 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2272 branchcacheread.set(lambda repo: None)
2272 branchcacheread.set(lambda repo: None)
2273 branchcachewrite.set(lambda bc, repo: None)
2273 branchcachewrite.set(lambda bc, repo: None)
2274 try:
2274 try:
2275 for name in allfilters:
2275 for name in allfilters:
2276 printname = name
2276 printname = name
2277 if name is None:
2277 if name is None:
2278 printname = b'unfiltered'
2278 printname = b'unfiltered'
2279 timer(getbranchmap(name), title=str(printname))
2279 timer(getbranchmap(name), title=str(printname))
2280 finally:
2280 finally:
2281 branchcacheread.restore()
2281 branchcacheread.restore()
2282 branchcachewrite.restore()
2282 branchcachewrite.restore()
2283 fm.end()
2283 fm.end()
2284
2284
2285 @command(b'perfbranchmapupdate', [
2286 (b'', b'base', [], b'subset of revision to start from'),
2287 (b'', b'target', [], b'subset of revision to end with'),
2288 ] + formatteropts)
2289 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2290 """benchmark branchmap update from for <base> revs to <target> revs
2291
2292 Examples:
2293
2294 # update for the one last revision
2295 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2296
2297 $ update for change coming with a new branch
2298 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2299 """
2300 from mercurial import branchmap
2301 opts = _byteskwargs(opts)
2302 timer, fm = gettimer(ui, opts)
2303 x = [None] # used to pass data between closure
2304
2305 # we use a `list` here to avoid possible side effect from smartset
2306 baserevs = list(scmutil.revrange(repo, base))
2307 targetrevs = list(scmutil.revrange(repo, target))
2308 if not baserevs:
2309 raise error.Abort(b'no revisions selected for --base')
2310 if not targetrevs:
2311 raise error.Abort(b'no revisions selected for --target')
2312
2313 # make sure the target branchmap also contains the one in the base
2314 targetrevs = list(set(baserevs) | set(targetrevs))
2315 targetrevs.sort()
2316
2317 cl = repo.changelog
2318 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2319 allbaserevs.sort()
2320 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2321
2322 newrevs = list(alltargetrevs.difference(allbaserevs))
2323 newrevs.sort()
2324
2325 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2326 ui.status(msg % (len(allbaserevs), len(newrevs)))
2327
2328 base = branchmap.branchcache()
2329 base.update(repo, allbaserevs)
2330
2331 def setup():
2332 x[0] = base.copy()
2333
2334 def bench():
2335 x[0].update(repo, newrevs)
2336
2337 timer(bench, setup=setup)
2338 fm.end()
2339
2285 @command(b'perfbranchmapload', [
2340 @command(b'perfbranchmapload', [
2286 (b'f', b'filter', b'', b'Specify repoview filter'),
2341 (b'f', b'filter', b'', b'Specify repoview filter'),
2287 (b'', b'list', False, b'List brachmap filter caches'),
2342 (b'', b'list', False, b'List brachmap filter caches'),
2288 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2343 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2289
2344
2290 ] + formatteropts)
2345 ] + formatteropts)
2291 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2346 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2292 """benchmark reading the branchmap"""
2347 """benchmark reading the branchmap"""
2293 opts = _byteskwargs(opts)
2348 opts = _byteskwargs(opts)
2294 clearrevlogs = opts[b'clear_revlogs']
2349 clearrevlogs = opts[b'clear_revlogs']
2295
2350
2296 if list:
2351 if list:
2297 for name, kind, st in repo.cachevfs.readdir(stat=True):
2352 for name, kind, st in repo.cachevfs.readdir(stat=True):
2298 if name.startswith(b'branch2'):
2353 if name.startswith(b'branch2'):
2299 filtername = name.partition(b'-')[2] or b'unfiltered'
2354 filtername = name.partition(b'-')[2] or b'unfiltered'
2300 ui.status(b'%s - %s\n'
2355 ui.status(b'%s - %s\n'
2301 % (filtername, util.bytecount(st.st_size)))
2356 % (filtername, util.bytecount(st.st_size)))
2302 return
2357 return
2303 if not filter:
2358 if not filter:
2304 filter = None
2359 filter = None
2305 subsettable = getbranchmapsubsettable()
2360 subsettable = getbranchmapsubsettable()
2306 if filter is None:
2361 if filter is None:
2307 repo = repo.unfiltered()
2362 repo = repo.unfiltered()
2308 else:
2363 else:
2309 repo = repoview.repoview(repo, filter)
2364 repo = repoview.repoview(repo, filter)
2310
2365
2311 repo.branchmap() # make sure we have a relevant, up to date branchmap
2366 repo.branchmap() # make sure we have a relevant, up to date branchmap
2312
2367
2313 currentfilter = filter
2368 currentfilter = filter
2314 # try once without timer, the filter may not be cached
2369 # try once without timer, the filter may not be cached
2315 while branchmap.read(repo) is None:
2370 while branchmap.read(repo) is None:
2316 currentfilter = subsettable.get(currentfilter)
2371 currentfilter = subsettable.get(currentfilter)
2317 if currentfilter is None:
2372 if currentfilter is None:
2318 raise error.Abort(b'No branchmap cached for %s repo'
2373 raise error.Abort(b'No branchmap cached for %s repo'
2319 % (filter or b'unfiltered'))
2374 % (filter or b'unfiltered'))
2320 repo = repo.filtered(currentfilter)
2375 repo = repo.filtered(currentfilter)
2321 timer, fm = gettimer(ui, opts)
2376 timer, fm = gettimer(ui, opts)
2322 def setup():
2377 def setup():
2323 if clearrevlogs:
2378 if clearrevlogs:
2324 clearchangelog(repo)
2379 clearchangelog(repo)
2325 def bench():
2380 def bench():
2326 branchmap.read(repo)
2381 branchmap.read(repo)
2327 timer(bench, setup=setup)
2382 timer(bench, setup=setup)
2328 fm.end()
2383 fm.end()
2329
2384
2330 @command(b'perfloadmarkers')
2385 @command(b'perfloadmarkers')
2331 def perfloadmarkers(ui, repo):
2386 def perfloadmarkers(ui, repo):
2332 """benchmark the time to parse the on-disk markers for a repo
2387 """benchmark the time to parse the on-disk markers for a repo
2333
2388
2334 Result is the number of markers in the repo."""
2389 Result is the number of markers in the repo."""
2335 timer, fm = gettimer(ui)
2390 timer, fm = gettimer(ui)
2336 svfs = getsvfs(repo)
2391 svfs = getsvfs(repo)
2337 timer(lambda: len(obsolete.obsstore(svfs)))
2392 timer(lambda: len(obsolete.obsstore(svfs)))
2338 fm.end()
2393 fm.end()
2339
2394
2340 @command(b'perflrucachedict', formatteropts +
2395 @command(b'perflrucachedict', formatteropts +
2341 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2396 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2342 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2397 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2343 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2398 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2344 (b'', b'size', 4, b'size of cache'),
2399 (b'', b'size', 4, b'size of cache'),
2345 (b'', b'gets', 10000, b'number of key lookups'),
2400 (b'', b'gets', 10000, b'number of key lookups'),
2346 (b'', b'sets', 10000, b'number of key sets'),
2401 (b'', b'sets', 10000, b'number of key sets'),
2347 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2402 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2348 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2403 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2349 norepo=True)
2404 norepo=True)
2350 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2405 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2351 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2406 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2352 opts = _byteskwargs(opts)
2407 opts = _byteskwargs(opts)
2353
2408
2354 def doinit():
2409 def doinit():
2355 for i in _xrange(10000):
2410 for i in _xrange(10000):
2356 util.lrucachedict(size)
2411 util.lrucachedict(size)
2357
2412
2358 costrange = list(range(mincost, maxcost + 1))
2413 costrange = list(range(mincost, maxcost + 1))
2359
2414
2360 values = []
2415 values = []
2361 for i in _xrange(size):
2416 for i in _xrange(size):
2362 values.append(random.randint(0, _maxint))
2417 values.append(random.randint(0, _maxint))
2363
2418
2364 # Get mode fills the cache and tests raw lookup performance with no
2419 # Get mode fills the cache and tests raw lookup performance with no
2365 # eviction.
2420 # eviction.
2366 getseq = []
2421 getseq = []
2367 for i in _xrange(gets):
2422 for i in _xrange(gets):
2368 getseq.append(random.choice(values))
2423 getseq.append(random.choice(values))
2369
2424
2370 def dogets():
2425 def dogets():
2371 d = util.lrucachedict(size)
2426 d = util.lrucachedict(size)
2372 for v in values:
2427 for v in values:
2373 d[v] = v
2428 d[v] = v
2374 for key in getseq:
2429 for key in getseq:
2375 value = d[key]
2430 value = d[key]
2376 value # silence pyflakes warning
2431 value # silence pyflakes warning
2377
2432
2378 def dogetscost():
2433 def dogetscost():
2379 d = util.lrucachedict(size, maxcost=costlimit)
2434 d = util.lrucachedict(size, maxcost=costlimit)
2380 for i, v in enumerate(values):
2435 for i, v in enumerate(values):
2381 d.insert(v, v, cost=costs[i])
2436 d.insert(v, v, cost=costs[i])
2382 for key in getseq:
2437 for key in getseq:
2383 try:
2438 try:
2384 value = d[key]
2439 value = d[key]
2385 value # silence pyflakes warning
2440 value # silence pyflakes warning
2386 except KeyError:
2441 except KeyError:
2387 pass
2442 pass
2388
2443
2389 # Set mode tests insertion speed with cache eviction.
2444 # Set mode tests insertion speed with cache eviction.
2390 setseq = []
2445 setseq = []
2391 costs = []
2446 costs = []
2392 for i in _xrange(sets):
2447 for i in _xrange(sets):
2393 setseq.append(random.randint(0, _maxint))
2448 setseq.append(random.randint(0, _maxint))
2394 costs.append(random.choice(costrange))
2449 costs.append(random.choice(costrange))
2395
2450
2396 def doinserts():
2451 def doinserts():
2397 d = util.lrucachedict(size)
2452 d = util.lrucachedict(size)
2398 for v in setseq:
2453 for v in setseq:
2399 d.insert(v, v)
2454 d.insert(v, v)
2400
2455
2401 def doinsertscost():
2456 def doinsertscost():
2402 d = util.lrucachedict(size, maxcost=costlimit)
2457 d = util.lrucachedict(size, maxcost=costlimit)
2403 for i, v in enumerate(setseq):
2458 for i, v in enumerate(setseq):
2404 d.insert(v, v, cost=costs[i])
2459 d.insert(v, v, cost=costs[i])
2405
2460
2406 def dosets():
2461 def dosets():
2407 d = util.lrucachedict(size)
2462 d = util.lrucachedict(size)
2408 for v in setseq:
2463 for v in setseq:
2409 d[v] = v
2464 d[v] = v
2410
2465
2411 # Mixed mode randomly performs gets and sets with eviction.
2466 # Mixed mode randomly performs gets and sets with eviction.
2412 mixedops = []
2467 mixedops = []
2413 for i in _xrange(mixed):
2468 for i in _xrange(mixed):
2414 r = random.randint(0, 100)
2469 r = random.randint(0, 100)
2415 if r < mixedgetfreq:
2470 if r < mixedgetfreq:
2416 op = 0
2471 op = 0
2417 else:
2472 else:
2418 op = 1
2473 op = 1
2419
2474
2420 mixedops.append((op,
2475 mixedops.append((op,
2421 random.randint(0, size * 2),
2476 random.randint(0, size * 2),
2422 random.choice(costrange)))
2477 random.choice(costrange)))
2423
2478
2424 def domixed():
2479 def domixed():
2425 d = util.lrucachedict(size)
2480 d = util.lrucachedict(size)
2426
2481
2427 for op, v, cost in mixedops:
2482 for op, v, cost in mixedops:
2428 if op == 0:
2483 if op == 0:
2429 try:
2484 try:
2430 d[v]
2485 d[v]
2431 except KeyError:
2486 except KeyError:
2432 pass
2487 pass
2433 else:
2488 else:
2434 d[v] = v
2489 d[v] = v
2435
2490
2436 def domixedcost():
2491 def domixedcost():
2437 d = util.lrucachedict(size, maxcost=costlimit)
2492 d = util.lrucachedict(size, maxcost=costlimit)
2438
2493
2439 for op, v, cost in mixedops:
2494 for op, v, cost in mixedops:
2440 if op == 0:
2495 if op == 0:
2441 try:
2496 try:
2442 d[v]
2497 d[v]
2443 except KeyError:
2498 except KeyError:
2444 pass
2499 pass
2445 else:
2500 else:
2446 d.insert(v, v, cost=cost)
2501 d.insert(v, v, cost=cost)
2447
2502
2448 benches = [
2503 benches = [
2449 (doinit, b'init'),
2504 (doinit, b'init'),
2450 ]
2505 ]
2451
2506
2452 if costlimit:
2507 if costlimit:
2453 benches.extend([
2508 benches.extend([
2454 (dogetscost, b'gets w/ cost limit'),
2509 (dogetscost, b'gets w/ cost limit'),
2455 (doinsertscost, b'inserts w/ cost limit'),
2510 (doinsertscost, b'inserts w/ cost limit'),
2456 (domixedcost, b'mixed w/ cost limit'),
2511 (domixedcost, b'mixed w/ cost limit'),
2457 ])
2512 ])
2458 else:
2513 else:
2459 benches.extend([
2514 benches.extend([
2460 (dogets, b'gets'),
2515 (dogets, b'gets'),
2461 (doinserts, b'inserts'),
2516 (doinserts, b'inserts'),
2462 (dosets, b'sets'),
2517 (dosets, b'sets'),
2463 (domixed, b'mixed')
2518 (domixed, b'mixed')
2464 ])
2519 ])
2465
2520
2466 for fn, title in benches:
2521 for fn, title in benches:
2467 timer, fm = gettimer(ui, opts)
2522 timer, fm = gettimer(ui, opts)
2468 timer(fn, title=title)
2523 timer(fn, title=title)
2469 fm.end()
2524 fm.end()
2470
2525
2471 @command(b'perfwrite', formatteropts)
2526 @command(b'perfwrite', formatteropts)
2472 def perfwrite(ui, repo, **opts):
2527 def perfwrite(ui, repo, **opts):
2473 """microbenchmark ui.write
2528 """microbenchmark ui.write
2474 """
2529 """
2475 opts = _byteskwargs(opts)
2530 opts = _byteskwargs(opts)
2476
2531
2477 timer, fm = gettimer(ui, opts)
2532 timer, fm = gettimer(ui, opts)
2478 def write():
2533 def write():
2479 for i in range(100000):
2534 for i in range(100000):
2480 ui.write((b'Testing write performance\n'))
2535 ui.write((b'Testing write performance\n'))
2481 timer(write)
2536 timer(write)
2482 fm.end()
2537 fm.end()
2483
2538
2484 def uisetup(ui):
2539 def uisetup(ui):
2485 if (util.safehasattr(cmdutil, b'openrevlog') and
2540 if (util.safehasattr(cmdutil, b'openrevlog') and
2486 not util.safehasattr(commands, b'debugrevlogopts')):
2541 not util.safehasattr(commands, b'debugrevlogopts')):
2487 # for "historical portability":
2542 # for "historical portability":
2488 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2543 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2489 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2544 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2490 # openrevlog() should cause failure, because it has been
2545 # openrevlog() should cause failure, because it has been
2491 # available since 3.5 (or 49c583ca48c4).
2546 # available since 3.5 (or 49c583ca48c4).
2492 def openrevlog(orig, repo, cmd, file_, opts):
2547 def openrevlog(orig, repo, cmd, file_, opts):
2493 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2548 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2494 raise error.Abort(b"This version doesn't support --dir option",
2549 raise error.Abort(b"This version doesn't support --dir option",
2495 hint=b"use 3.5 or later")
2550 hint=b"use 3.5 or later")
2496 return orig(repo, cmd, file_, opts)
2551 return orig(repo, cmd, file_, opts)
2497 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2552 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
@@ -1,289 +1,294 b''
1 #require test-repo
1 #require test-repo
2
2
3 Set vars:
3 Set vars:
4
4
5 $ . "$TESTDIR/helpers-testrepo.sh"
5 $ . "$TESTDIR/helpers-testrepo.sh"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
7
7
8 Prepare repo:
8 Prepare repo:
9
9
10 $ hg init
10 $ hg init
11
11
12 $ echo this is file a > a
12 $ echo this is file a > a
13 $ hg add a
13 $ hg add a
14 $ hg commit -m first
14 $ hg commit -m first
15
15
16 $ echo adding to file a >> a
16 $ echo adding to file a >> a
17 $ hg commit -m second
17 $ hg commit -m second
18
18
19 $ echo adding more to file a >> a
19 $ echo adding more to file a >> a
20 $ hg commit -m third
20 $ hg commit -m third
21
21
22 $ hg up -r 0
22 $ hg up -r 0
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 $ echo merge-this >> a
24 $ echo merge-this >> a
25 $ hg commit -m merge-able
25 $ hg commit -m merge-able
26 created new head
26 created new head
27
27
28 $ hg up -r 2
28 $ hg up -r 2
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30
30
31 perfstatus
31 perfstatus
32
32
33 $ cat >> $HGRCPATH << EOF
33 $ cat >> $HGRCPATH << EOF
34 > [extensions]
34 > [extensions]
35 > perfstatusext=$CONTRIBDIR/perf.py
35 > perfstatusext=$CONTRIBDIR/perf.py
36 > [perf]
36 > [perf]
37 > presleep=0
37 > presleep=0
38 > stub=on
38 > stub=on
39 > parentscount=1
39 > parentscount=1
40 > EOF
40 > EOF
41 $ hg help perfstatusext
41 $ hg help perfstatusext
42 perfstatusext extension - helper extension to measure performance
42 perfstatusext extension - helper extension to measure performance
43
43
44 list of commands:
44 list of commands:
45
45
46 perfaddremove
46 perfaddremove
47 (no help text available)
47 (no help text available)
48 perfancestors
48 perfancestors
49 (no help text available)
49 (no help text available)
50 perfancestorset
50 perfancestorset
51 (no help text available)
51 (no help text available)
52 perfannotate (no help text available)
52 perfannotate (no help text available)
53 perfbdiff benchmark a bdiff between revisions
53 perfbdiff benchmark a bdiff between revisions
54 perfbookmarks
54 perfbookmarks
55 benchmark parsing bookmarks from disk to memory
55 benchmark parsing bookmarks from disk to memory
56 perfbranchmap
56 perfbranchmap
57 benchmark the update of a branchmap
57 benchmark the update of a branchmap
58 perfbranchmapload
58 perfbranchmapload
59 benchmark reading the branchmap
59 benchmark reading the branchmap
60 perfbranchmapupdate
61 benchmark branchmap update from for <base> revs to <target>
62 revs
60 perfbundleread
63 perfbundleread
61 Benchmark reading of bundle files.
64 Benchmark reading of bundle files.
62 perfcca (no help text available)
65 perfcca (no help text available)
63 perfchangegroupchangelog
66 perfchangegroupchangelog
64 Benchmark producing a changelog group for a changegroup.
67 Benchmark producing a changelog group for a changegroup.
65 perfchangeset
68 perfchangeset
66 (no help text available)
69 (no help text available)
67 perfctxfiles (no help text available)
70 perfctxfiles (no help text available)
68 perfdiffwd Profile diff of working directory changes
71 perfdiffwd Profile diff of working directory changes
69 perfdirfoldmap
72 perfdirfoldmap
70 (no help text available)
73 (no help text available)
71 perfdirs (no help text available)
74 perfdirs (no help text available)
72 perfdirstate (no help text available)
75 perfdirstate (no help text available)
73 perfdirstatedirs
76 perfdirstatedirs
74 (no help text available)
77 (no help text available)
75 perfdirstatefoldmap
78 perfdirstatefoldmap
76 (no help text available)
79 (no help text available)
77 perfdirstatewrite
80 perfdirstatewrite
78 (no help text available)
81 (no help text available)
79 perffncacheencode
82 perffncacheencode
80 (no help text available)
83 (no help text available)
81 perffncacheload
84 perffncacheload
82 (no help text available)
85 (no help text available)
83 perffncachewrite
86 perffncachewrite
84 (no help text available)
87 (no help text available)
85 perfheads (no help text available)
88 perfheads (no help text available)
86 perfhelper-pathcopies
89 perfhelper-pathcopies
87 find statistic about potential parameters for the
90 find statistic about potential parameters for the
88 'perftracecopies'
91 'perftracecopies'
89 perfignore benchmark operation related to computing ignore
92 perfignore benchmark operation related to computing ignore
90 perfindex (no help text available)
93 perfindex (no help text available)
91 perflinelogedits
94 perflinelogedits
92 (no help text available)
95 (no help text available)
93 perfloadmarkers
96 perfloadmarkers
94 benchmark the time to parse the on-disk markers for a repo
97 benchmark the time to parse the on-disk markers for a repo
95 perflog (no help text available)
98 perflog (no help text available)
96 perflookup (no help text available)
99 perflookup (no help text available)
97 perflrucachedict
100 perflrucachedict
98 (no help text available)
101 (no help text available)
99 perfmanifest benchmark the time to read a manifest from disk and return a
102 perfmanifest benchmark the time to read a manifest from disk and return a
100 usable
103 usable
101 perfmergecalculate
104 perfmergecalculate
102 (no help text available)
105 (no help text available)
103 perfmoonwalk benchmark walking the changelog backwards
106 perfmoonwalk benchmark walking the changelog backwards
104 perfnodelookup
107 perfnodelookup
105 (no help text available)
108 (no help text available)
106 perfparents (no help text available)
109 perfparents (no help text available)
107 perfpathcopies
110 perfpathcopies
108 benchmark the copy tracing logic
111 benchmark the copy tracing logic
109 perfphases benchmark phasesets computation
112 perfphases benchmark phasesets computation
110 perfphasesremote
113 perfphasesremote
111 benchmark time needed to analyse phases of the remote server
114 benchmark time needed to analyse phases of the remote server
112 perfrawfiles (no help text available)
115 perfrawfiles (no help text available)
113 perfrevlogchunks
116 perfrevlogchunks
114 Benchmark operations on revlog chunks.
117 Benchmark operations on revlog chunks.
115 perfrevlogindex
118 perfrevlogindex
116 Benchmark operations against a revlog index.
119 Benchmark operations against a revlog index.
117 perfrevlogrevision
120 perfrevlogrevision
118 Benchmark obtaining a revlog revision.
121 Benchmark obtaining a revlog revision.
119 perfrevlogrevisions
122 perfrevlogrevisions
120 Benchmark reading a series of revisions from a revlog.
123 Benchmark reading a series of revisions from a revlog.
121 perfrevlogwrite
124 perfrevlogwrite
122 Benchmark writing a series of revisions to a revlog.
125 Benchmark writing a series of revisions to a revlog.
123 perfrevrange (no help text available)
126 perfrevrange (no help text available)
124 perfrevset benchmark the execution time of a revset
127 perfrevset benchmark the execution time of a revset
125 perfstartup (no help text available)
128 perfstartup (no help text available)
126 perfstatus (no help text available)
129 perfstatus (no help text available)
127 perftags (no help text available)
130 perftags (no help text available)
128 perftemplating
131 perftemplating
129 test the rendering time of a given template
132 test the rendering time of a given template
130 perfunidiff benchmark a unified diff between revisions
133 perfunidiff benchmark a unified diff between revisions
131 perfvolatilesets
134 perfvolatilesets
132 benchmark the computation of various volatile set
135 benchmark the computation of various volatile set
133 perfwalk (no help text available)
136 perfwalk (no help text available)
134 perfwrite microbenchmark ui.write
137 perfwrite microbenchmark ui.write
135
138
136 (use 'hg help -v perfstatusext' to show built-in aliases and global options)
139 (use 'hg help -v perfstatusext' to show built-in aliases and global options)
137 $ hg perfaddremove
140 $ hg perfaddremove
138 $ hg perfancestors
141 $ hg perfancestors
139 $ hg perfancestorset 2
142 $ hg perfancestorset 2
140 $ hg perfannotate a
143 $ hg perfannotate a
141 $ hg perfbdiff -c 1
144 $ hg perfbdiff -c 1
142 $ hg perfbdiff --alldata 1
145 $ hg perfbdiff --alldata 1
143 $ hg perfunidiff -c 1
146 $ hg perfunidiff -c 1
144 $ hg perfunidiff --alldata 1
147 $ hg perfunidiff --alldata 1
145 $ hg perfbookmarks
148 $ hg perfbookmarks
146 $ hg perfbranchmap
149 $ hg perfbranchmap
147 $ hg perfbranchmapload
150 $ hg perfbranchmapload
151 $ hg perfbranchmapupdate --base "not tip" --target "tip"
152 benchmark of branchmap with 3 revisions with 1 new ones
148 $ hg perfcca
153 $ hg perfcca
149 $ hg perfchangegroupchangelog
154 $ hg perfchangegroupchangelog
150 $ hg perfchangegroupchangelog --cgversion 01
155 $ hg perfchangegroupchangelog --cgversion 01
151 $ hg perfchangeset 2
156 $ hg perfchangeset 2
152 $ hg perfctxfiles 2
157 $ hg perfctxfiles 2
153 $ hg perfdiffwd
158 $ hg perfdiffwd
154 $ hg perfdirfoldmap
159 $ hg perfdirfoldmap
155 $ hg perfdirs
160 $ hg perfdirs
156 $ hg perfdirstate
161 $ hg perfdirstate
157 $ hg perfdirstatedirs
162 $ hg perfdirstatedirs
158 $ hg perfdirstatefoldmap
163 $ hg perfdirstatefoldmap
159 $ hg perfdirstatewrite
164 $ hg perfdirstatewrite
160 #if repofncache
165 #if repofncache
161 $ hg perffncacheencode
166 $ hg perffncacheencode
162 $ hg perffncacheload
167 $ hg perffncacheload
163 $ hg debugrebuildfncache
168 $ hg debugrebuildfncache
164 fncache already up to date
169 fncache already up to date
165 $ hg perffncachewrite
170 $ hg perffncachewrite
166 $ hg debugrebuildfncache
171 $ hg debugrebuildfncache
167 fncache already up to date
172 fncache already up to date
168 #endif
173 #endif
169 $ hg perfheads
174 $ hg perfheads
170 $ hg perfignore
175 $ hg perfignore
171 $ hg perfindex
176 $ hg perfindex
172 $ hg perflinelogedits -n 1
177 $ hg perflinelogedits -n 1
173 $ hg perfloadmarkers
178 $ hg perfloadmarkers
174 $ hg perflog
179 $ hg perflog
175 $ hg perflookup 2
180 $ hg perflookup 2
176 $ hg perflrucache
181 $ hg perflrucache
177 $ hg perfmanifest 2
182 $ hg perfmanifest 2
178 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
183 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
179 $ hg perfmanifest -m 44fe2c8352bb
184 $ hg perfmanifest -m 44fe2c8352bb
180 abort: manifest revision must be integer or full node
185 abort: manifest revision must be integer or full node
181 [255]
186 [255]
182 $ hg perfmergecalculate -r 3
187 $ hg perfmergecalculate -r 3
183 $ hg perfmoonwalk
188 $ hg perfmoonwalk
184 $ hg perfnodelookup 2
189 $ hg perfnodelookup 2
185 $ hg perfpathcopies 1 2
190 $ hg perfpathcopies 1 2
186 $ hg perfrawfiles 2
191 $ hg perfrawfiles 2
187 $ hg perfrevlogindex -c
192 $ hg perfrevlogindex -c
188 #if reporevlogstore
193 #if reporevlogstore
189 $ hg perfrevlogrevisions .hg/store/data/a.i
194 $ hg perfrevlogrevisions .hg/store/data/a.i
190 #endif
195 #endif
191 $ hg perfrevlogrevision -m 0
196 $ hg perfrevlogrevision -m 0
192 $ hg perfrevlogchunks -c
197 $ hg perfrevlogchunks -c
193 $ hg perfrevrange
198 $ hg perfrevrange
194 $ hg perfrevset 'all()'
199 $ hg perfrevset 'all()'
195 $ hg perfstartup
200 $ hg perfstartup
196 $ hg perfstatus
201 $ hg perfstatus
197 $ hg perftags
202 $ hg perftags
198 $ hg perftemplating
203 $ hg perftemplating
199 $ hg perfvolatilesets
204 $ hg perfvolatilesets
200 $ hg perfwalk
205 $ hg perfwalk
201 $ hg perfparents
206 $ hg perfparents
202
207
203 test actual output
208 test actual output
204 ------------------
209 ------------------
205
210
206 normal output:
211 normal output:
207
212
208 $ hg perfheads --config perf.stub=no
213 $ hg perfheads --config perf.stub=no
209 ! wall * comb * user * sys * (best of *) (glob)
214 ! wall * comb * user * sys * (best of *) (glob)
210
215
211 detailed output:
216 detailed output:
212
217
213 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
218 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
214 ! wall * comb * user * sys * (best of *) (glob)
219 ! wall * comb * user * sys * (best of *) (glob)
215 ! wall * comb * user * sys * (max of *) (glob)
220 ! wall * comb * user * sys * (max of *) (glob)
216 ! wall * comb * user * sys * (avg of *) (glob)
221 ! wall * comb * user * sys * (avg of *) (glob)
217 ! wall * comb * user * sys * (median of *) (glob)
222 ! wall * comb * user * sys * (median of *) (glob)
218
223
219 test json output
224 test json output
220 ----------------
225 ----------------
221
226
222 normal output:
227 normal output:
223
228
224 $ hg perfheads --template json --config perf.stub=no
229 $ hg perfheads --template json --config perf.stub=no
225 [
230 [
226 {
231 {
227 "comb": *, (glob)
232 "comb": *, (glob)
228 "count": *, (glob)
233 "count": *, (glob)
229 "sys": *, (glob)
234 "sys": *, (glob)
230 "user": *, (glob)
235 "user": *, (glob)
231 "wall": * (glob)
236 "wall": * (glob)
232 }
237 }
233 ]
238 ]
234
239
235 detailed output:
240 detailed output:
236
241
237 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
242 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
238 [
243 [
239 {
244 {
240 "avg.comb": *, (glob)
245 "avg.comb": *, (glob)
241 "avg.count": *, (glob)
246 "avg.count": *, (glob)
242 "avg.sys": *, (glob)
247 "avg.sys": *, (glob)
243 "avg.user": *, (glob)
248 "avg.user": *, (glob)
244 "avg.wall": *, (glob)
249 "avg.wall": *, (glob)
245 "comb": *, (glob)
250 "comb": *, (glob)
246 "count": *, (glob)
251 "count": *, (glob)
247 "max.comb": *, (glob)
252 "max.comb": *, (glob)
248 "max.count": *, (glob)
253 "max.count": *, (glob)
249 "max.sys": *, (glob)
254 "max.sys": *, (glob)
250 "max.user": *, (glob)
255 "max.user": *, (glob)
251 "max.wall": *, (glob)
256 "max.wall": *, (glob)
252 "median.comb": *, (glob)
257 "median.comb": *, (glob)
253 "median.count": *, (glob)
258 "median.count": *, (glob)
254 "median.sys": *, (glob)
259 "median.sys": *, (glob)
255 "median.user": *, (glob)
260 "median.user": *, (glob)
256 "median.wall": *, (glob)
261 "median.wall": *, (glob)
257 "sys": *, (glob)
262 "sys": *, (glob)
258 "user": *, (glob)
263 "user": *, (glob)
259 "wall": * (glob)
264 "wall": * (glob)
260 }
265 }
261 ]
266 ]
262
267
263 Check perf.py for historical portability
268 Check perf.py for historical portability
264 ----------------------------------------
269 ----------------------------------------
265
270
266 $ cd "$TESTDIR/.."
271 $ cd "$TESTDIR/.."
267
272
268 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
273 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
269 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
274 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
270 > "$TESTDIR"/check-perf-code.py contrib/perf.py
275 > "$TESTDIR"/check-perf-code.py contrib/perf.py
271 contrib/perf.py:\d+: (re)
276 contrib/perf.py:\d+: (re)
272 > from mercurial import (
277 > from mercurial import (
273 import newer module separately in try clause for early Mercurial
278 import newer module separately in try clause for early Mercurial
274 contrib/perf.py:\d+: (re)
279 contrib/perf.py:\d+: (re)
275 > from mercurial import (
280 > from mercurial import (
276 import newer module separately in try clause for early Mercurial
281 import newer module separately in try clause for early Mercurial
277 contrib/perf.py:\d+: (re)
282 contrib/perf.py:\d+: (re)
278 > origindexpath = orig.opener.join(orig.indexfile)
283 > origindexpath = orig.opener.join(orig.indexfile)
279 use getvfs()/getsvfs() for early Mercurial
284 use getvfs()/getsvfs() for early Mercurial
280 contrib/perf.py:\d+: (re)
285 contrib/perf.py:\d+: (re)
281 > origdatapath = orig.opener.join(orig.datafile)
286 > origdatapath = orig.opener.join(orig.datafile)
282 use getvfs()/getsvfs() for early Mercurial
287 use getvfs()/getsvfs() for early Mercurial
283 contrib/perf.py:\d+: (re)
288 contrib/perf.py:\d+: (re)
284 > vfs = vfsmod.vfs(tmpdir)
289 > vfs = vfsmod.vfs(tmpdir)
285 use getvfs()/getsvfs() for early Mercurial
290 use getvfs()/getsvfs() for early Mercurial
286 contrib/perf.py:\d+: (re)
291 contrib/perf.py:\d+: (re)
287 > vfs.options = getattr(orig.opener, 'options', None)
292 > vfs.options = getattr(orig.opener, 'options', None)
288 use getvfs()/getsvfs() for early Mercurial
293 use getvfs()/getsvfs() for early Mercurial
289 [1]
294 [1]
General Comments 0
You need to be logged in to leave comments. Login now