##// END OF EJS Templates
perf: make `clearfilecache` helper work with any object...
Boris Feld -
r40719:d7936a9d default
parent child Browse files
Show More
@@ -1,2370 +1,2372 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance'''
2 '''helper extension to measure performance'''
3
3
4 # "historical portability" policy of perf.py:
4 # "historical portability" policy of perf.py:
5 #
5 #
6 # We have to do:
6 # We have to do:
7 # - make perf.py "loadable" with as wide Mercurial version as possible
7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 # This doesn't mean that perf commands work correctly with that Mercurial.
8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 # - make historical perf command work correctly with as wide Mercurial
10 # - make historical perf command work correctly with as wide Mercurial
11 # version as possible
11 # version as possible
12 #
12 #
13 # We have to do, if possible with reasonable cost:
13 # We have to do, if possible with reasonable cost:
14 # - make recent perf command for historical feature work correctly
14 # - make recent perf command for historical feature work correctly
15 # with early Mercurial
15 # with early Mercurial
16 #
16 #
17 # We don't have to do:
17 # We don't have to do:
18 # - make perf command for recent feature work correctly with early
18 # - make perf command for recent feature work correctly with early
19 # Mercurial
19 # Mercurial
20
20
21 from __future__ import absolute_import
21 from __future__ import absolute_import
22 import contextlib
22 import contextlib
23 import functools
23 import functools
24 import gc
24 import gc
25 import os
25 import os
26 import random
26 import random
27 import shutil
27 import shutil
28 import struct
28 import struct
29 import sys
29 import sys
30 import tempfile
30 import tempfile
31 import threading
31 import threading
32 import time
32 import time
33 from mercurial import (
33 from mercurial import (
34 changegroup,
34 changegroup,
35 cmdutil,
35 cmdutil,
36 commands,
36 commands,
37 copies,
37 copies,
38 error,
38 error,
39 extensions,
39 extensions,
40 mdiff,
40 mdiff,
41 merge,
41 merge,
42 revlog,
42 revlog,
43 util,
43 util,
44 )
44 )
45
45
46 # for "historical portability":
46 # for "historical portability":
47 # try to import modules separately (in dict order), and ignore
47 # try to import modules separately (in dict order), and ignore
48 # failure, because these aren't available with early Mercurial
48 # failure, because these aren't available with early Mercurial
49 try:
49 try:
50 from mercurial import branchmap # since 2.5 (or bcee63733aad)
50 from mercurial import branchmap # since 2.5 (or bcee63733aad)
51 except ImportError:
51 except ImportError:
52 pass
52 pass
53 try:
53 try:
54 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
54 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
55 except ImportError:
55 except ImportError:
56 pass
56 pass
57 try:
57 try:
58 from mercurial import registrar # since 3.7 (or 37d50250b696)
58 from mercurial import registrar # since 3.7 (or 37d50250b696)
59 dir(registrar) # forcibly load it
59 dir(registrar) # forcibly load it
60 except ImportError:
60 except ImportError:
61 registrar = None
61 registrar = None
62 try:
62 try:
63 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
63 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
64 except ImportError:
64 except ImportError:
65 pass
65 pass
66 try:
66 try:
67 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
67 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
68 except ImportError:
68 except ImportError:
69 pass
69 pass
70
70
71 def identity(a):
71 def identity(a):
72 return a
72 return a
73
73
74 try:
74 try:
75 from mercurial import pycompat
75 from mercurial import pycompat
76 getargspec = pycompat.getargspec # added to module after 4.5
76 getargspec = pycompat.getargspec # added to module after 4.5
77 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
77 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
78 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
78 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
79 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
79 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
80 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
80 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
81 if pycompat.ispy3:
81 if pycompat.ispy3:
82 _maxint = sys.maxsize # per py3 docs for replacing maxint
82 _maxint = sys.maxsize # per py3 docs for replacing maxint
83 else:
83 else:
84 _maxint = sys.maxint
84 _maxint = sys.maxint
85 except (ImportError, AttributeError):
85 except (ImportError, AttributeError):
86 import inspect
86 import inspect
87 getargspec = inspect.getargspec
87 getargspec = inspect.getargspec
88 _byteskwargs = identity
88 _byteskwargs = identity
89 fsencode = identity # no py3 support
89 fsencode = identity # no py3 support
90 _maxint = sys.maxint # no py3 support
90 _maxint = sys.maxint # no py3 support
91 _sysstr = lambda x: x # no py3 support
91 _sysstr = lambda x: x # no py3 support
92 _xrange = xrange
92 _xrange = xrange
93
93
94 try:
94 try:
95 # 4.7+
95 # 4.7+
96 queue = pycompat.queue.Queue
96 queue = pycompat.queue.Queue
97 except (AttributeError, ImportError):
97 except (AttributeError, ImportError):
98 # <4.7.
98 # <4.7.
99 try:
99 try:
100 queue = pycompat.queue
100 queue = pycompat.queue
101 except (AttributeError, ImportError):
101 except (AttributeError, ImportError):
102 queue = util.queue
102 queue = util.queue
103
103
104 try:
104 try:
105 from mercurial import logcmdutil
105 from mercurial import logcmdutil
106 makelogtemplater = logcmdutil.maketemplater
106 makelogtemplater = logcmdutil.maketemplater
107 except (AttributeError, ImportError):
107 except (AttributeError, ImportError):
108 try:
108 try:
109 makelogtemplater = cmdutil.makelogtemplater
109 makelogtemplater = cmdutil.makelogtemplater
110 except (AttributeError, ImportError):
110 except (AttributeError, ImportError):
111 makelogtemplater = None
111 makelogtemplater = None
112
112
113 # for "historical portability":
113 # for "historical portability":
114 # define util.safehasattr forcibly, because util.safehasattr has been
114 # define util.safehasattr forcibly, because util.safehasattr has been
115 # available since 1.9.3 (or 94b200a11cf7)
115 # available since 1.9.3 (or 94b200a11cf7)
116 _undefined = object()
116 _undefined = object()
117 def safehasattr(thing, attr):
117 def safehasattr(thing, attr):
118 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
118 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
119 setattr(util, 'safehasattr', safehasattr)
119 setattr(util, 'safehasattr', safehasattr)
120
120
121 # for "historical portability":
121 # for "historical portability":
122 # define util.timer forcibly, because util.timer has been available
122 # define util.timer forcibly, because util.timer has been available
123 # since ae5d60bb70c9
123 # since ae5d60bb70c9
124 if safehasattr(time, 'perf_counter'):
124 if safehasattr(time, 'perf_counter'):
125 util.timer = time.perf_counter
125 util.timer = time.perf_counter
126 elif os.name == b'nt':
126 elif os.name == b'nt':
127 util.timer = time.clock
127 util.timer = time.clock
128 else:
128 else:
129 util.timer = time.time
129 util.timer = time.time
130
130
131 # for "historical portability":
131 # for "historical portability":
132 # use locally defined empty option list, if formatteropts isn't
132 # use locally defined empty option list, if formatteropts isn't
133 # available, because commands.formatteropts has been available since
133 # available, because commands.formatteropts has been available since
134 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
134 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
135 # available since 2.2 (or ae5f92e154d3)
135 # available since 2.2 (or ae5f92e154d3)
136 formatteropts = getattr(cmdutil, "formatteropts",
136 formatteropts = getattr(cmdutil, "formatteropts",
137 getattr(commands, "formatteropts", []))
137 getattr(commands, "formatteropts", []))
138
138
139 # for "historical portability":
139 # for "historical portability":
140 # use locally defined option list, if debugrevlogopts isn't available,
140 # use locally defined option list, if debugrevlogopts isn't available,
141 # because commands.debugrevlogopts has been available since 3.7 (or
141 # because commands.debugrevlogopts has been available since 3.7 (or
142 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
142 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
143 # since 1.9 (or a79fea6b3e77).
143 # since 1.9 (or a79fea6b3e77).
144 revlogopts = getattr(cmdutil, "debugrevlogopts",
144 revlogopts = getattr(cmdutil, "debugrevlogopts",
145 getattr(commands, "debugrevlogopts", [
145 getattr(commands, "debugrevlogopts", [
146 (b'c', b'changelog', False, (b'open changelog')),
146 (b'c', b'changelog', False, (b'open changelog')),
147 (b'm', b'manifest', False, (b'open manifest')),
147 (b'm', b'manifest', False, (b'open manifest')),
148 (b'', b'dir', False, (b'open directory manifest')),
148 (b'', b'dir', False, (b'open directory manifest')),
149 ]))
149 ]))
150
150
151 cmdtable = {}
151 cmdtable = {}
152
152
153 # for "historical portability":
153 # for "historical portability":
154 # define parsealiases locally, because cmdutil.parsealiases has been
154 # define parsealiases locally, because cmdutil.parsealiases has been
155 # available since 1.5 (or 6252852b4332)
155 # available since 1.5 (or 6252852b4332)
156 def parsealiases(cmd):
156 def parsealiases(cmd):
157 return cmd.split(b"|")
157 return cmd.split(b"|")
158
158
159 if safehasattr(registrar, 'command'):
159 if safehasattr(registrar, 'command'):
160 command = registrar.command(cmdtable)
160 command = registrar.command(cmdtable)
161 elif safehasattr(cmdutil, 'command'):
161 elif safehasattr(cmdutil, 'command'):
162 command = cmdutil.command(cmdtable)
162 command = cmdutil.command(cmdtable)
163 if b'norepo' not in getargspec(command).args:
163 if b'norepo' not in getargspec(command).args:
164 # for "historical portability":
164 # for "historical portability":
165 # wrap original cmdutil.command, because "norepo" option has
165 # wrap original cmdutil.command, because "norepo" option has
166 # been available since 3.1 (or 75a96326cecb)
166 # been available since 3.1 (or 75a96326cecb)
167 _command = command
167 _command = command
168 def command(name, options=(), synopsis=None, norepo=False):
168 def command(name, options=(), synopsis=None, norepo=False):
169 if norepo:
169 if norepo:
170 commands.norepo += b' %s' % b' '.join(parsealiases(name))
170 commands.norepo += b' %s' % b' '.join(parsealiases(name))
171 return _command(name, list(options), synopsis)
171 return _command(name, list(options), synopsis)
172 else:
172 else:
173 # for "historical portability":
173 # for "historical portability":
174 # define "@command" annotation locally, because cmdutil.command
174 # define "@command" annotation locally, because cmdutil.command
175 # has been available since 1.9 (or 2daa5179e73f)
175 # has been available since 1.9 (or 2daa5179e73f)
176 def command(name, options=(), synopsis=None, norepo=False):
176 def command(name, options=(), synopsis=None, norepo=False):
177 def decorator(func):
177 def decorator(func):
178 if synopsis:
178 if synopsis:
179 cmdtable[name] = func, list(options), synopsis
179 cmdtable[name] = func, list(options), synopsis
180 else:
180 else:
181 cmdtable[name] = func, list(options)
181 cmdtable[name] = func, list(options)
182 if norepo:
182 if norepo:
183 commands.norepo += b' %s' % b' '.join(parsealiases(name))
183 commands.norepo += b' %s' % b' '.join(parsealiases(name))
184 return func
184 return func
185 return decorator
185 return decorator
186
186
187 try:
187 try:
188 import mercurial.registrar
188 import mercurial.registrar
189 import mercurial.configitems
189 import mercurial.configitems
190 configtable = {}
190 configtable = {}
191 configitem = mercurial.registrar.configitem(configtable)
191 configitem = mercurial.registrar.configitem(configtable)
192 configitem(b'perf', b'presleep',
192 configitem(b'perf', b'presleep',
193 default=mercurial.configitems.dynamicdefault,
193 default=mercurial.configitems.dynamicdefault,
194 )
194 )
195 configitem(b'perf', b'stub',
195 configitem(b'perf', b'stub',
196 default=mercurial.configitems.dynamicdefault,
196 default=mercurial.configitems.dynamicdefault,
197 )
197 )
198 configitem(b'perf', b'parentscount',
198 configitem(b'perf', b'parentscount',
199 default=mercurial.configitems.dynamicdefault,
199 default=mercurial.configitems.dynamicdefault,
200 )
200 )
201 configitem(b'perf', b'all-timing',
201 configitem(b'perf', b'all-timing',
202 default=mercurial.configitems.dynamicdefault,
202 default=mercurial.configitems.dynamicdefault,
203 )
203 )
204 except (ImportError, AttributeError):
204 except (ImportError, AttributeError):
205 pass
205 pass
206
206
207 def getlen(ui):
207 def getlen(ui):
208 if ui.configbool(b"perf", b"stub", False):
208 if ui.configbool(b"perf", b"stub", False):
209 return lambda x: 1
209 return lambda x: 1
210 return len
210 return len
211
211
212 def gettimer(ui, opts=None):
212 def gettimer(ui, opts=None):
213 """return a timer function and formatter: (timer, formatter)
213 """return a timer function and formatter: (timer, formatter)
214
214
215 This function exists to gather the creation of formatter in a single
215 This function exists to gather the creation of formatter in a single
216 place instead of duplicating it in all performance commands."""
216 place instead of duplicating it in all performance commands."""
217
217
218 # enforce an idle period before execution to counteract power management
218 # enforce an idle period before execution to counteract power management
219 # experimental config: perf.presleep
219 # experimental config: perf.presleep
220 time.sleep(getint(ui, b"perf", b"presleep", 1))
220 time.sleep(getint(ui, b"perf", b"presleep", 1))
221
221
222 if opts is None:
222 if opts is None:
223 opts = {}
223 opts = {}
224 # redirect all to stderr unless buffer api is in use
224 # redirect all to stderr unless buffer api is in use
225 if not ui._buffers:
225 if not ui._buffers:
226 ui = ui.copy()
226 ui = ui.copy()
227 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
227 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
228 if uifout:
228 if uifout:
229 # for "historical portability":
229 # for "historical portability":
230 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
230 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
231 uifout.set(ui.ferr)
231 uifout.set(ui.ferr)
232
232
233 # get a formatter
233 # get a formatter
234 uiformatter = getattr(ui, 'formatter', None)
234 uiformatter = getattr(ui, 'formatter', None)
235 if uiformatter:
235 if uiformatter:
236 fm = uiformatter(b'perf', opts)
236 fm = uiformatter(b'perf', opts)
237 else:
237 else:
238 # for "historical portability":
238 # for "historical portability":
239 # define formatter locally, because ui.formatter has been
239 # define formatter locally, because ui.formatter has been
240 # available since 2.2 (or ae5f92e154d3)
240 # available since 2.2 (or ae5f92e154d3)
241 from mercurial import node
241 from mercurial import node
242 class defaultformatter(object):
242 class defaultformatter(object):
243 """Minimized composition of baseformatter and plainformatter
243 """Minimized composition of baseformatter and plainformatter
244 """
244 """
245 def __init__(self, ui, topic, opts):
245 def __init__(self, ui, topic, opts):
246 self._ui = ui
246 self._ui = ui
247 if ui.debugflag:
247 if ui.debugflag:
248 self.hexfunc = node.hex
248 self.hexfunc = node.hex
249 else:
249 else:
250 self.hexfunc = node.short
250 self.hexfunc = node.short
251 def __nonzero__(self):
251 def __nonzero__(self):
252 return False
252 return False
253 __bool__ = __nonzero__
253 __bool__ = __nonzero__
254 def startitem(self):
254 def startitem(self):
255 pass
255 pass
256 def data(self, **data):
256 def data(self, **data):
257 pass
257 pass
258 def write(self, fields, deftext, *fielddata, **opts):
258 def write(self, fields, deftext, *fielddata, **opts):
259 self._ui.write(deftext % fielddata, **opts)
259 self._ui.write(deftext % fielddata, **opts)
260 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
260 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
261 if cond:
261 if cond:
262 self._ui.write(deftext % fielddata, **opts)
262 self._ui.write(deftext % fielddata, **opts)
263 def plain(self, text, **opts):
263 def plain(self, text, **opts):
264 self._ui.write(text, **opts)
264 self._ui.write(text, **opts)
265 def end(self):
265 def end(self):
266 pass
266 pass
267 fm = defaultformatter(ui, b'perf', opts)
267 fm = defaultformatter(ui, b'perf', opts)
268
268
269 # stub function, runs code only once instead of in a loop
269 # stub function, runs code only once instead of in a loop
270 # experimental config: perf.stub
270 # experimental config: perf.stub
271 if ui.configbool(b"perf", b"stub", False):
271 if ui.configbool(b"perf", b"stub", False):
272 return functools.partial(stub_timer, fm), fm
272 return functools.partial(stub_timer, fm), fm
273
273
274 # experimental config: perf.all-timing
274 # experimental config: perf.all-timing
275 displayall = ui.configbool(b"perf", b"all-timing", False)
275 displayall = ui.configbool(b"perf", b"all-timing", False)
276 return functools.partial(_timer, fm, displayall=displayall), fm
276 return functools.partial(_timer, fm, displayall=displayall), fm
277
277
278 def stub_timer(fm, func, setup=None, title=None):
278 def stub_timer(fm, func, setup=None, title=None):
279 func()
279 func()
280
280
281 @contextlib.contextmanager
281 @contextlib.contextmanager
282 def timeone():
282 def timeone():
283 r = []
283 r = []
284 ostart = os.times()
284 ostart = os.times()
285 cstart = util.timer()
285 cstart = util.timer()
286 yield r
286 yield r
287 cstop = util.timer()
287 cstop = util.timer()
288 ostop = os.times()
288 ostop = os.times()
289 a, b = ostart, ostop
289 a, b = ostart, ostop
290 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
290 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
291
291
292 def _timer(fm, func, setup=None, title=None, displayall=False):
292 def _timer(fm, func, setup=None, title=None, displayall=False):
293 gc.collect()
293 gc.collect()
294 results = []
294 results = []
295 begin = util.timer()
295 begin = util.timer()
296 count = 0
296 count = 0
297 while True:
297 while True:
298 if setup is not None:
298 if setup is not None:
299 setup()
299 setup()
300 with timeone() as item:
300 with timeone() as item:
301 r = func()
301 r = func()
302 count += 1
302 count += 1
303 results.append(item[0])
303 results.append(item[0])
304 cstop = util.timer()
304 cstop = util.timer()
305 if cstop - begin > 3 and count >= 100:
305 if cstop - begin > 3 and count >= 100:
306 break
306 break
307 if cstop - begin > 10 and count >= 3:
307 if cstop - begin > 10 and count >= 3:
308 break
308 break
309
309
310 formatone(fm, results, title=title, result=r,
310 formatone(fm, results, title=title, result=r,
311 displayall=displayall)
311 displayall=displayall)
312
312
313 def formatone(fm, timings, title=None, result=None, displayall=False):
313 def formatone(fm, timings, title=None, result=None, displayall=False):
314
314
315 count = len(timings)
315 count = len(timings)
316
316
317 fm.startitem()
317 fm.startitem()
318
318
319 if title:
319 if title:
320 fm.write(b'title', b'! %s\n', title)
320 fm.write(b'title', b'! %s\n', title)
321 if result:
321 if result:
322 fm.write(b'result', b'! result: %s\n', result)
322 fm.write(b'result', b'! result: %s\n', result)
323 def display(role, entry):
323 def display(role, entry):
324 prefix = b''
324 prefix = b''
325 if role != b'best':
325 if role != b'best':
326 prefix = b'%s.' % role
326 prefix = b'%s.' % role
327 fm.plain(b'!')
327 fm.plain(b'!')
328 fm.write(prefix + b'wall', b' wall %f', entry[0])
328 fm.write(prefix + b'wall', b' wall %f', entry[0])
329 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
329 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
330 fm.write(prefix + b'user', b' user %f', entry[1])
330 fm.write(prefix + b'user', b' user %f', entry[1])
331 fm.write(prefix + b'sys', b' sys %f', entry[2])
331 fm.write(prefix + b'sys', b' sys %f', entry[2])
332 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
332 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
333 fm.plain(b'\n')
333 fm.plain(b'\n')
334 timings.sort()
334 timings.sort()
335 min_val = timings[0]
335 min_val = timings[0]
336 display(b'best', min_val)
336 display(b'best', min_val)
337 if displayall:
337 if displayall:
338 max_val = timings[-1]
338 max_val = timings[-1]
339 display(b'max', max_val)
339 display(b'max', max_val)
340 avg = tuple([sum(x) / count for x in zip(*timings)])
340 avg = tuple([sum(x) / count for x in zip(*timings)])
341 display(b'avg', avg)
341 display(b'avg', avg)
342 median = timings[len(timings) // 2]
342 median = timings[len(timings) // 2]
343 display(b'median', median)
343 display(b'median', median)
344
344
345 # utilities for historical portability
345 # utilities for historical portability
346
346
347 def getint(ui, section, name, default):
347 def getint(ui, section, name, default):
348 # for "historical portability":
348 # for "historical portability":
349 # ui.configint has been available since 1.9 (or fa2b596db182)
349 # ui.configint has been available since 1.9 (or fa2b596db182)
350 v = ui.config(section, name, None)
350 v = ui.config(section, name, None)
351 if v is None:
351 if v is None:
352 return default
352 return default
353 try:
353 try:
354 return int(v)
354 return int(v)
355 except ValueError:
355 except ValueError:
356 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
356 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
357 % (section, name, v))
357 % (section, name, v))
358
358
359 def safeattrsetter(obj, name, ignoremissing=False):
359 def safeattrsetter(obj, name, ignoremissing=False):
360 """Ensure that 'obj' has 'name' attribute before subsequent setattr
360 """Ensure that 'obj' has 'name' attribute before subsequent setattr
361
361
362 This function is aborted, if 'obj' doesn't have 'name' attribute
362 This function is aborted, if 'obj' doesn't have 'name' attribute
363 at runtime. This avoids overlooking removal of an attribute, which
363 at runtime. This avoids overlooking removal of an attribute, which
364 breaks assumption of performance measurement, in the future.
364 breaks assumption of performance measurement, in the future.
365
365
366 This function returns the object to (1) assign a new value, and
366 This function returns the object to (1) assign a new value, and
367 (2) restore an original value to the attribute.
367 (2) restore an original value to the attribute.
368
368
369 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
369 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
370 abortion, and this function returns None. This is useful to
370 abortion, and this function returns None. This is useful to
371 examine an attribute, which isn't ensured in all Mercurial
371 examine an attribute, which isn't ensured in all Mercurial
372 versions.
372 versions.
373 """
373 """
374 if not util.safehasattr(obj, name):
374 if not util.safehasattr(obj, name):
375 if ignoremissing:
375 if ignoremissing:
376 return None
376 return None
377 raise error.Abort((b"missing attribute %s of %s might break assumption"
377 raise error.Abort((b"missing attribute %s of %s might break assumption"
378 b" of performance measurement") % (name, obj))
378 b" of performance measurement") % (name, obj))
379
379
380 origvalue = getattr(obj, _sysstr(name))
380 origvalue = getattr(obj, _sysstr(name))
381 class attrutil(object):
381 class attrutil(object):
382 def set(self, newvalue):
382 def set(self, newvalue):
383 setattr(obj, _sysstr(name), newvalue)
383 setattr(obj, _sysstr(name), newvalue)
384 def restore(self):
384 def restore(self):
385 setattr(obj, _sysstr(name), origvalue)
385 setattr(obj, _sysstr(name), origvalue)
386
386
387 return attrutil()
387 return attrutil()
388
388
389 # utilities to examine each internal API changes
389 # utilities to examine each internal API changes
390
390
391 def getbranchmapsubsettable():
391 def getbranchmapsubsettable():
392 # for "historical portability":
392 # for "historical portability":
393 # subsettable is defined in:
393 # subsettable is defined in:
394 # - branchmap since 2.9 (or 175c6fd8cacc)
394 # - branchmap since 2.9 (or 175c6fd8cacc)
395 # - repoview since 2.5 (or 59a9f18d4587)
395 # - repoview since 2.5 (or 59a9f18d4587)
396 for mod in (branchmap, repoview):
396 for mod in (branchmap, repoview):
397 subsettable = getattr(mod, 'subsettable', None)
397 subsettable = getattr(mod, 'subsettable', None)
398 if subsettable:
398 if subsettable:
399 return subsettable
399 return subsettable
400
400
401 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
401 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
402 # branchmap and repoview modules exist, but subsettable attribute
402 # branchmap and repoview modules exist, but subsettable attribute
403 # doesn't)
403 # doesn't)
404 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
404 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
405 hint=b"use 2.5 or later")
405 hint=b"use 2.5 or later")
406
406
407 def getsvfs(repo):
407 def getsvfs(repo):
408 """Return appropriate object to access files under .hg/store
408 """Return appropriate object to access files under .hg/store
409 """
409 """
410 # for "historical portability":
410 # for "historical portability":
411 # repo.svfs has been available since 2.3 (or 7034365089bf)
411 # repo.svfs has been available since 2.3 (or 7034365089bf)
412 svfs = getattr(repo, 'svfs', None)
412 svfs = getattr(repo, 'svfs', None)
413 if svfs:
413 if svfs:
414 return svfs
414 return svfs
415 else:
415 else:
416 return getattr(repo, 'sopener')
416 return getattr(repo, 'sopener')
417
417
418 def getvfs(repo):
418 def getvfs(repo):
419 """Return appropriate object to access files under .hg
419 """Return appropriate object to access files under .hg
420 """
420 """
421 # for "historical portability":
421 # for "historical portability":
422 # repo.vfs has been available since 2.3 (or 7034365089bf)
422 # repo.vfs has been available since 2.3 (or 7034365089bf)
423 vfs = getattr(repo, 'vfs', None)
423 vfs = getattr(repo, 'vfs', None)
424 if vfs:
424 if vfs:
425 return vfs
425 return vfs
426 else:
426 else:
427 return getattr(repo, 'opener')
427 return getattr(repo, 'opener')
428
428
429 def repocleartagscachefunc(repo):
429 def repocleartagscachefunc(repo):
430 """Return the function to clear tags cache according to repo internal API
430 """Return the function to clear tags cache according to repo internal API
431 """
431 """
432 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
432 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
433 # in this case, setattr(repo, '_tagscache', None) or so isn't
433 # in this case, setattr(repo, '_tagscache', None) or so isn't
434 # correct way to clear tags cache, because existing code paths
434 # correct way to clear tags cache, because existing code paths
435 # expect _tagscache to be a structured object.
435 # expect _tagscache to be a structured object.
436 def clearcache():
436 def clearcache():
437 # _tagscache has been filteredpropertycache since 2.5 (or
437 # _tagscache has been filteredpropertycache since 2.5 (or
438 # 98c867ac1330), and delattr() can't work in such case
438 # 98c867ac1330), and delattr() can't work in such case
439 if b'_tagscache' in vars(repo):
439 if b'_tagscache' in vars(repo):
440 del repo.__dict__[b'_tagscache']
440 del repo.__dict__[b'_tagscache']
441 return clearcache
441 return clearcache
442
442
443 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
443 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
444 if repotags: # since 1.4 (or 5614a628d173)
444 if repotags: # since 1.4 (or 5614a628d173)
445 return lambda : repotags.set(None)
445 return lambda : repotags.set(None)
446
446
447 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
447 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
448 if repotagscache: # since 0.6 (or d7df759d0e97)
448 if repotagscache: # since 0.6 (or d7df759d0e97)
449 return lambda : repotagscache.set(None)
449 return lambda : repotagscache.set(None)
450
450
451 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
451 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
452 # this point, but it isn't so problematic, because:
452 # this point, but it isn't so problematic, because:
453 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
453 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
454 # in perftags() causes failure soon
454 # in perftags() causes failure soon
455 # - perf.py itself has been available since 1.1 (or eb240755386d)
455 # - perf.py itself has been available since 1.1 (or eb240755386d)
456 raise error.Abort((b"tags API of this hg command is unknown"))
456 raise error.Abort((b"tags API of this hg command is unknown"))
457
457
458 # utilities to clear cache
458 # utilities to clear cache
459
459
460 def clearfilecache(repo, attrname):
460 def clearfilecache(obj, attrname):
461 unfi = repo.unfiltered()
461 unfiltered = getattr(obj, 'unfiltered', None)
462 if attrname in vars(unfi):
462 if unfiltered is not None:
463 delattr(unfi, attrname)
463 obj = obj.unfiltered()
464 unfi._filecache.pop(attrname, None)
464 if attrname in vars(obj):
465 delattr(obj, attrname)
466 obj._filecache.pop(attrname, None)
465
467
466 # perf commands
468 # perf commands
467
469
468 @command(b'perfwalk', formatteropts)
470 @command(b'perfwalk', formatteropts)
469 def perfwalk(ui, repo, *pats, **opts):
471 def perfwalk(ui, repo, *pats, **opts):
470 opts = _byteskwargs(opts)
472 opts = _byteskwargs(opts)
471 timer, fm = gettimer(ui, opts)
473 timer, fm = gettimer(ui, opts)
472 m = scmutil.match(repo[None], pats, {})
474 m = scmutil.match(repo[None], pats, {})
473 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
475 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
474 ignored=False))))
476 ignored=False))))
475 fm.end()
477 fm.end()
476
478
477 @command(b'perfannotate', formatteropts)
479 @command(b'perfannotate', formatteropts)
478 def perfannotate(ui, repo, f, **opts):
480 def perfannotate(ui, repo, f, **opts):
479 opts = _byteskwargs(opts)
481 opts = _byteskwargs(opts)
480 timer, fm = gettimer(ui, opts)
482 timer, fm = gettimer(ui, opts)
481 fc = repo[b'.'][f]
483 fc = repo[b'.'][f]
482 timer(lambda: len(fc.annotate(True)))
484 timer(lambda: len(fc.annotate(True)))
483 fm.end()
485 fm.end()
484
486
485 @command(b'perfstatus',
487 @command(b'perfstatus',
486 [(b'u', b'unknown', False,
488 [(b'u', b'unknown', False,
487 b'ask status to look for unknown files')] + formatteropts)
489 b'ask status to look for unknown files')] + formatteropts)
488 def perfstatus(ui, repo, **opts):
490 def perfstatus(ui, repo, **opts):
489 opts = _byteskwargs(opts)
491 opts = _byteskwargs(opts)
490 #m = match.always(repo.root, repo.getcwd())
492 #m = match.always(repo.root, repo.getcwd())
491 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
493 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
492 # False))))
494 # False))))
493 timer, fm = gettimer(ui, opts)
495 timer, fm = gettimer(ui, opts)
494 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
496 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
495 fm.end()
497 fm.end()
496
498
497 @command(b'perfaddremove', formatteropts)
499 @command(b'perfaddremove', formatteropts)
498 def perfaddremove(ui, repo, **opts):
500 def perfaddremove(ui, repo, **opts):
499 opts = _byteskwargs(opts)
501 opts = _byteskwargs(opts)
500 timer, fm = gettimer(ui, opts)
502 timer, fm = gettimer(ui, opts)
501 try:
503 try:
502 oldquiet = repo.ui.quiet
504 oldquiet = repo.ui.quiet
503 repo.ui.quiet = True
505 repo.ui.quiet = True
504 matcher = scmutil.match(repo[None])
506 matcher = scmutil.match(repo[None])
505 opts[b'dry_run'] = True
507 opts[b'dry_run'] = True
506 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
508 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
507 finally:
509 finally:
508 repo.ui.quiet = oldquiet
510 repo.ui.quiet = oldquiet
509 fm.end()
511 fm.end()
510
512
511 def clearcaches(cl):
513 def clearcaches(cl):
512 # behave somewhat consistently across internal API changes
514 # behave somewhat consistently across internal API changes
513 if util.safehasattr(cl, b'clearcaches'):
515 if util.safehasattr(cl, b'clearcaches'):
514 cl.clearcaches()
516 cl.clearcaches()
515 elif util.safehasattr(cl, b'_nodecache'):
517 elif util.safehasattr(cl, b'_nodecache'):
516 from mercurial.node import nullid, nullrev
518 from mercurial.node import nullid, nullrev
517 cl._nodecache = {nullid: nullrev}
519 cl._nodecache = {nullid: nullrev}
518 cl._nodepos = None
520 cl._nodepos = None
519
521
520 @command(b'perfheads', formatteropts)
522 @command(b'perfheads', formatteropts)
521 def perfheads(ui, repo, **opts):
523 def perfheads(ui, repo, **opts):
522 opts = _byteskwargs(opts)
524 opts = _byteskwargs(opts)
523 timer, fm = gettimer(ui, opts)
525 timer, fm = gettimer(ui, opts)
524 cl = repo.changelog
526 cl = repo.changelog
525 def d():
527 def d():
526 len(cl.headrevs())
528 len(cl.headrevs())
527 clearcaches(cl)
529 clearcaches(cl)
528 timer(d)
530 timer(d)
529 fm.end()
531 fm.end()
530
532
531 @command(b'perftags', formatteropts)
533 @command(b'perftags', formatteropts)
532 def perftags(ui, repo, **opts):
534 def perftags(ui, repo, **opts):
533 import mercurial.changelog
535 import mercurial.changelog
534 import mercurial.manifest
536 import mercurial.manifest
535
537
536 opts = _byteskwargs(opts)
538 opts = _byteskwargs(opts)
537 timer, fm = gettimer(ui, opts)
539 timer, fm = gettimer(ui, opts)
538 svfs = getsvfs(repo)
540 svfs = getsvfs(repo)
539 repocleartagscache = repocleartagscachefunc(repo)
541 repocleartagscache = repocleartagscachefunc(repo)
540 def s():
542 def s():
541 repo.changelog = mercurial.changelog.changelog(svfs)
543 repo.changelog = mercurial.changelog.changelog(svfs)
542 rootmanifest = mercurial.manifest.manifestrevlog(svfs)
544 rootmanifest = mercurial.manifest.manifestrevlog(svfs)
543 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo,
545 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo,
544 rootmanifest)
546 rootmanifest)
545 repocleartagscache()
547 repocleartagscache()
546 def t():
548 def t():
547 return len(repo.tags())
549 return len(repo.tags())
548 timer(t, setup=s)
550 timer(t, setup=s)
549 fm.end()
551 fm.end()
550
552
551 @command(b'perfancestors', formatteropts)
553 @command(b'perfancestors', formatteropts)
552 def perfancestors(ui, repo, **opts):
554 def perfancestors(ui, repo, **opts):
553 opts = _byteskwargs(opts)
555 opts = _byteskwargs(opts)
554 timer, fm = gettimer(ui, opts)
556 timer, fm = gettimer(ui, opts)
555 heads = repo.changelog.headrevs()
557 heads = repo.changelog.headrevs()
556 def d():
558 def d():
557 for a in repo.changelog.ancestors(heads):
559 for a in repo.changelog.ancestors(heads):
558 pass
560 pass
559 timer(d)
561 timer(d)
560 fm.end()
562 fm.end()
561
563
562 @command(b'perfancestorset', formatteropts)
564 @command(b'perfancestorset', formatteropts)
563 def perfancestorset(ui, repo, revset, **opts):
565 def perfancestorset(ui, repo, revset, **opts):
564 opts = _byteskwargs(opts)
566 opts = _byteskwargs(opts)
565 timer, fm = gettimer(ui, opts)
567 timer, fm = gettimer(ui, opts)
566 revs = repo.revs(revset)
568 revs = repo.revs(revset)
567 heads = repo.changelog.headrevs()
569 heads = repo.changelog.headrevs()
568 def d():
570 def d():
569 s = repo.changelog.ancestors(heads)
571 s = repo.changelog.ancestors(heads)
570 for rev in revs:
572 for rev in revs:
571 rev in s
573 rev in s
572 timer(d)
574 timer(d)
573 fm.end()
575 fm.end()
574
576
575 @command(b'perfbookmarks', formatteropts)
577 @command(b'perfbookmarks', formatteropts)
576 def perfbookmarks(ui, repo, **opts):
578 def perfbookmarks(ui, repo, **opts):
577 """benchmark parsing bookmarks from disk to memory"""
579 """benchmark parsing bookmarks from disk to memory"""
578 opts = _byteskwargs(opts)
580 opts = _byteskwargs(opts)
579 timer, fm = gettimer(ui, opts)
581 timer, fm = gettimer(ui, opts)
580
582
581 def s():
583 def s():
582 clearfilecache(repo, b'_bookmarks')
584 clearfilecache(repo, b'_bookmarks')
583 def d():
585 def d():
584 repo._bookmarks
586 repo._bookmarks
585 timer(d, setup=s)
587 timer(d, setup=s)
586 fm.end()
588 fm.end()
587
589
588 @command(b'perfbundleread', formatteropts, b'BUNDLE')
590 @command(b'perfbundleread', formatteropts, b'BUNDLE')
589 def perfbundleread(ui, repo, bundlepath, **opts):
591 def perfbundleread(ui, repo, bundlepath, **opts):
590 """Benchmark reading of bundle files.
592 """Benchmark reading of bundle files.
591
593
592 This command is meant to isolate the I/O part of bundle reading as
594 This command is meant to isolate the I/O part of bundle reading as
593 much as possible.
595 much as possible.
594 """
596 """
595 from mercurial import (
597 from mercurial import (
596 bundle2,
598 bundle2,
597 exchange,
599 exchange,
598 streamclone,
600 streamclone,
599 )
601 )
600
602
601 opts = _byteskwargs(opts)
603 opts = _byteskwargs(opts)
602
604
603 def makebench(fn):
605 def makebench(fn):
604 def run():
606 def run():
605 with open(bundlepath, b'rb') as fh:
607 with open(bundlepath, b'rb') as fh:
606 bundle = exchange.readbundle(ui, fh, bundlepath)
608 bundle = exchange.readbundle(ui, fh, bundlepath)
607 fn(bundle)
609 fn(bundle)
608
610
609 return run
611 return run
610
612
611 def makereadnbytes(size):
613 def makereadnbytes(size):
612 def run():
614 def run():
613 with open(bundlepath, b'rb') as fh:
615 with open(bundlepath, b'rb') as fh:
614 bundle = exchange.readbundle(ui, fh, bundlepath)
616 bundle = exchange.readbundle(ui, fh, bundlepath)
615 while bundle.read(size):
617 while bundle.read(size):
616 pass
618 pass
617
619
618 return run
620 return run
619
621
620 def makestdioread(size):
622 def makestdioread(size):
621 def run():
623 def run():
622 with open(bundlepath, b'rb') as fh:
624 with open(bundlepath, b'rb') as fh:
623 while fh.read(size):
625 while fh.read(size):
624 pass
626 pass
625
627
626 return run
628 return run
627
629
628 # bundle1
630 # bundle1
629
631
630 def deltaiter(bundle):
632 def deltaiter(bundle):
631 for delta in bundle.deltaiter():
633 for delta in bundle.deltaiter():
632 pass
634 pass
633
635
634 def iterchunks(bundle):
636 def iterchunks(bundle):
635 for chunk in bundle.getchunks():
637 for chunk in bundle.getchunks():
636 pass
638 pass
637
639
638 # bundle2
640 # bundle2
639
641
640 def forwardchunks(bundle):
642 def forwardchunks(bundle):
641 for chunk in bundle._forwardchunks():
643 for chunk in bundle._forwardchunks():
642 pass
644 pass
643
645
644 def iterparts(bundle):
646 def iterparts(bundle):
645 for part in bundle.iterparts():
647 for part in bundle.iterparts():
646 pass
648 pass
647
649
648 def iterpartsseekable(bundle):
650 def iterpartsseekable(bundle):
649 for part in bundle.iterparts(seekable=True):
651 for part in bundle.iterparts(seekable=True):
650 pass
652 pass
651
653
652 def seek(bundle):
654 def seek(bundle):
653 for part in bundle.iterparts(seekable=True):
655 for part in bundle.iterparts(seekable=True):
654 part.seek(0, os.SEEK_END)
656 part.seek(0, os.SEEK_END)
655
657
656 def makepartreadnbytes(size):
658 def makepartreadnbytes(size):
657 def run():
659 def run():
658 with open(bundlepath, b'rb') as fh:
660 with open(bundlepath, b'rb') as fh:
659 bundle = exchange.readbundle(ui, fh, bundlepath)
661 bundle = exchange.readbundle(ui, fh, bundlepath)
660 for part in bundle.iterparts():
662 for part in bundle.iterparts():
661 while part.read(size):
663 while part.read(size):
662 pass
664 pass
663
665
664 return run
666 return run
665
667
666 benches = [
668 benches = [
667 (makestdioread(8192), b'read(8k)'),
669 (makestdioread(8192), b'read(8k)'),
668 (makestdioread(16384), b'read(16k)'),
670 (makestdioread(16384), b'read(16k)'),
669 (makestdioread(32768), b'read(32k)'),
671 (makestdioread(32768), b'read(32k)'),
670 (makestdioread(131072), b'read(128k)'),
672 (makestdioread(131072), b'read(128k)'),
671 ]
673 ]
672
674
673 with open(bundlepath, b'rb') as fh:
675 with open(bundlepath, b'rb') as fh:
674 bundle = exchange.readbundle(ui, fh, bundlepath)
676 bundle = exchange.readbundle(ui, fh, bundlepath)
675
677
676 if isinstance(bundle, changegroup.cg1unpacker):
678 if isinstance(bundle, changegroup.cg1unpacker):
677 benches.extend([
679 benches.extend([
678 (makebench(deltaiter), b'cg1 deltaiter()'),
680 (makebench(deltaiter), b'cg1 deltaiter()'),
679 (makebench(iterchunks), b'cg1 getchunks()'),
681 (makebench(iterchunks), b'cg1 getchunks()'),
680 (makereadnbytes(8192), b'cg1 read(8k)'),
682 (makereadnbytes(8192), b'cg1 read(8k)'),
681 (makereadnbytes(16384), b'cg1 read(16k)'),
683 (makereadnbytes(16384), b'cg1 read(16k)'),
682 (makereadnbytes(32768), b'cg1 read(32k)'),
684 (makereadnbytes(32768), b'cg1 read(32k)'),
683 (makereadnbytes(131072), b'cg1 read(128k)'),
685 (makereadnbytes(131072), b'cg1 read(128k)'),
684 ])
686 ])
685 elif isinstance(bundle, bundle2.unbundle20):
687 elif isinstance(bundle, bundle2.unbundle20):
686 benches.extend([
688 benches.extend([
687 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
689 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
688 (makebench(iterparts), b'bundle2 iterparts()'),
690 (makebench(iterparts), b'bundle2 iterparts()'),
689 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
691 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
690 (makebench(seek), b'bundle2 part seek()'),
692 (makebench(seek), b'bundle2 part seek()'),
691 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
693 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
692 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
694 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
693 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
695 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
694 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
696 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
695 ])
697 ])
696 elif isinstance(bundle, streamclone.streamcloneapplier):
698 elif isinstance(bundle, streamclone.streamcloneapplier):
697 raise error.Abort(b'stream clone bundles not supported')
699 raise error.Abort(b'stream clone bundles not supported')
698 else:
700 else:
699 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
701 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
700
702
701 for fn, title in benches:
703 for fn, title in benches:
702 timer, fm = gettimer(ui, opts)
704 timer, fm = gettimer(ui, opts)
703 timer(fn, title=title)
705 timer(fn, title=title)
704 fm.end()
706 fm.end()
705
707
706 @command(b'perfchangegroupchangelog', formatteropts +
708 @command(b'perfchangegroupchangelog', formatteropts +
707 [(b'', b'version', b'02', b'changegroup version'),
709 [(b'', b'version', b'02', b'changegroup version'),
708 (b'r', b'rev', b'', b'revisions to add to changegroup')])
710 (b'r', b'rev', b'', b'revisions to add to changegroup')])
709 def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts):
711 def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts):
710 """Benchmark producing a changelog group for a changegroup.
712 """Benchmark producing a changelog group for a changegroup.
711
713
712 This measures the time spent processing the changelog during a
714 This measures the time spent processing the changelog during a
713 bundle operation. This occurs during `hg bundle` and on a server
715 bundle operation. This occurs during `hg bundle` and on a server
714 processing a `getbundle` wire protocol request (handles clones
716 processing a `getbundle` wire protocol request (handles clones
715 and pull requests).
717 and pull requests).
716
718
717 By default, all revisions are added to the changegroup.
719 By default, all revisions are added to the changegroup.
718 """
720 """
719 opts = _byteskwargs(opts)
721 opts = _byteskwargs(opts)
720 cl = repo.changelog
722 cl = repo.changelog
721 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
723 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
722 bundler = changegroup.getbundler(version, repo)
724 bundler = changegroup.getbundler(version, repo)
723
725
724 def d():
726 def d():
725 state, chunks = bundler._generatechangelog(cl, nodes)
727 state, chunks = bundler._generatechangelog(cl, nodes)
726 for chunk in chunks:
728 for chunk in chunks:
727 pass
729 pass
728
730
729 timer, fm = gettimer(ui, opts)
731 timer, fm = gettimer(ui, opts)
730
732
731 # Terminal printing can interfere with timing. So disable it.
733 # Terminal printing can interfere with timing. So disable it.
732 with ui.configoverride({(b'progress', b'disable'): True}):
734 with ui.configoverride({(b'progress', b'disable'): True}):
733 timer(d)
735 timer(d)
734
736
735 fm.end()
737 fm.end()
736
738
737 @command(b'perfdirs', formatteropts)
739 @command(b'perfdirs', formatteropts)
738 def perfdirs(ui, repo, **opts):
740 def perfdirs(ui, repo, **opts):
739 opts = _byteskwargs(opts)
741 opts = _byteskwargs(opts)
740 timer, fm = gettimer(ui, opts)
742 timer, fm = gettimer(ui, opts)
741 dirstate = repo.dirstate
743 dirstate = repo.dirstate
742 b'a' in dirstate
744 b'a' in dirstate
743 def d():
745 def d():
744 dirstate.hasdir(b'a')
746 dirstate.hasdir(b'a')
745 del dirstate._map._dirs
747 del dirstate._map._dirs
746 timer(d)
748 timer(d)
747 fm.end()
749 fm.end()
748
750
749 @command(b'perfdirstate', formatteropts)
751 @command(b'perfdirstate', formatteropts)
750 def perfdirstate(ui, repo, **opts):
752 def perfdirstate(ui, repo, **opts):
751 opts = _byteskwargs(opts)
753 opts = _byteskwargs(opts)
752 timer, fm = gettimer(ui, opts)
754 timer, fm = gettimer(ui, opts)
753 b"a" in repo.dirstate
755 b"a" in repo.dirstate
754 def d():
756 def d():
755 repo.dirstate.invalidate()
757 repo.dirstate.invalidate()
756 b"a" in repo.dirstate
758 b"a" in repo.dirstate
757 timer(d)
759 timer(d)
758 fm.end()
760 fm.end()
759
761
760 @command(b'perfdirstatedirs', formatteropts)
762 @command(b'perfdirstatedirs', formatteropts)
761 def perfdirstatedirs(ui, repo, **opts):
763 def perfdirstatedirs(ui, repo, **opts):
762 opts = _byteskwargs(opts)
764 opts = _byteskwargs(opts)
763 timer, fm = gettimer(ui, opts)
765 timer, fm = gettimer(ui, opts)
764 b"a" in repo.dirstate
766 b"a" in repo.dirstate
765 def d():
767 def d():
766 repo.dirstate.hasdir(b"a")
768 repo.dirstate.hasdir(b"a")
767 del repo.dirstate._map._dirs
769 del repo.dirstate._map._dirs
768 timer(d)
770 timer(d)
769 fm.end()
771 fm.end()
770
772
771 @command(b'perfdirstatefoldmap', formatteropts)
773 @command(b'perfdirstatefoldmap', formatteropts)
772 def perfdirstatefoldmap(ui, repo, **opts):
774 def perfdirstatefoldmap(ui, repo, **opts):
773 opts = _byteskwargs(opts)
775 opts = _byteskwargs(opts)
774 timer, fm = gettimer(ui, opts)
776 timer, fm = gettimer(ui, opts)
775 dirstate = repo.dirstate
777 dirstate = repo.dirstate
776 b'a' in dirstate
778 b'a' in dirstate
777 def d():
779 def d():
778 dirstate._map.filefoldmap.get(b'a')
780 dirstate._map.filefoldmap.get(b'a')
779 del dirstate._map.filefoldmap
781 del dirstate._map.filefoldmap
780 timer(d)
782 timer(d)
781 fm.end()
783 fm.end()
782
784
783 @command(b'perfdirfoldmap', formatteropts)
785 @command(b'perfdirfoldmap', formatteropts)
784 def perfdirfoldmap(ui, repo, **opts):
786 def perfdirfoldmap(ui, repo, **opts):
785 opts = _byteskwargs(opts)
787 opts = _byteskwargs(opts)
786 timer, fm = gettimer(ui, opts)
788 timer, fm = gettimer(ui, opts)
787 dirstate = repo.dirstate
789 dirstate = repo.dirstate
788 b'a' in dirstate
790 b'a' in dirstate
789 def d():
791 def d():
790 dirstate._map.dirfoldmap.get(b'a')
792 dirstate._map.dirfoldmap.get(b'a')
791 del dirstate._map.dirfoldmap
793 del dirstate._map.dirfoldmap
792 del dirstate._map._dirs
794 del dirstate._map._dirs
793 timer(d)
795 timer(d)
794 fm.end()
796 fm.end()
795
797
796 @command(b'perfdirstatewrite', formatteropts)
798 @command(b'perfdirstatewrite', formatteropts)
797 def perfdirstatewrite(ui, repo, **opts):
799 def perfdirstatewrite(ui, repo, **opts):
798 opts = _byteskwargs(opts)
800 opts = _byteskwargs(opts)
799 timer, fm = gettimer(ui, opts)
801 timer, fm = gettimer(ui, opts)
800 ds = repo.dirstate
802 ds = repo.dirstate
801 b"a" in ds
803 b"a" in ds
802 def d():
804 def d():
803 ds._dirty = True
805 ds._dirty = True
804 ds.write(repo.currenttransaction())
806 ds.write(repo.currenttransaction())
805 timer(d)
807 timer(d)
806 fm.end()
808 fm.end()
807
809
808 @command(b'perfmergecalculate',
810 @command(b'perfmergecalculate',
809 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
811 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
810 def perfmergecalculate(ui, repo, rev, **opts):
812 def perfmergecalculate(ui, repo, rev, **opts):
811 opts = _byteskwargs(opts)
813 opts = _byteskwargs(opts)
812 timer, fm = gettimer(ui, opts)
814 timer, fm = gettimer(ui, opts)
813 wctx = repo[None]
815 wctx = repo[None]
814 rctx = scmutil.revsingle(repo, rev, rev)
816 rctx = scmutil.revsingle(repo, rev, rev)
815 ancestor = wctx.ancestor(rctx)
817 ancestor = wctx.ancestor(rctx)
816 # we don't want working dir files to be stat'd in the benchmark, so prime
818 # we don't want working dir files to be stat'd in the benchmark, so prime
817 # that cache
819 # that cache
818 wctx.dirty()
820 wctx.dirty()
819 def d():
821 def d():
820 # acceptremote is True because we don't want prompts in the middle of
822 # acceptremote is True because we don't want prompts in the middle of
821 # our benchmark
823 # our benchmark
822 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
824 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
823 acceptremote=True, followcopies=True)
825 acceptremote=True, followcopies=True)
824 timer(d)
826 timer(d)
825 fm.end()
827 fm.end()
826
828
827 @command(b'perfpathcopies', [], b"REV REV")
829 @command(b'perfpathcopies', [], b"REV REV")
828 def perfpathcopies(ui, repo, rev1, rev2, **opts):
830 def perfpathcopies(ui, repo, rev1, rev2, **opts):
829 opts = _byteskwargs(opts)
831 opts = _byteskwargs(opts)
830 timer, fm = gettimer(ui, opts)
832 timer, fm = gettimer(ui, opts)
831 ctx1 = scmutil.revsingle(repo, rev1, rev1)
833 ctx1 = scmutil.revsingle(repo, rev1, rev1)
832 ctx2 = scmutil.revsingle(repo, rev2, rev2)
834 ctx2 = scmutil.revsingle(repo, rev2, rev2)
833 def d():
835 def d():
834 copies.pathcopies(ctx1, ctx2)
836 copies.pathcopies(ctx1, ctx2)
835 timer(d)
837 timer(d)
836 fm.end()
838 fm.end()
837
839
838 @command(b'perfphases',
840 @command(b'perfphases',
839 [(b'', b'full', False, b'include file reading time too'),
841 [(b'', b'full', False, b'include file reading time too'),
840 ], b"")
842 ], b"")
841 def perfphases(ui, repo, **opts):
843 def perfphases(ui, repo, **opts):
842 """benchmark phasesets computation"""
844 """benchmark phasesets computation"""
843 opts = _byteskwargs(opts)
845 opts = _byteskwargs(opts)
844 timer, fm = gettimer(ui, opts)
846 timer, fm = gettimer(ui, opts)
845 _phases = repo._phasecache
847 _phases = repo._phasecache
846 full = opts.get(b'full')
848 full = opts.get(b'full')
847 def d():
849 def d():
848 phases = _phases
850 phases = _phases
849 if full:
851 if full:
850 clearfilecache(repo, b'_phasecache')
852 clearfilecache(repo, b'_phasecache')
851 phases = repo._phasecache
853 phases = repo._phasecache
852 phases.invalidate()
854 phases.invalidate()
853 phases.loadphaserevs(repo)
855 phases.loadphaserevs(repo)
854 timer(d)
856 timer(d)
855 fm.end()
857 fm.end()
856
858
857 @command(b'perfphasesremote',
859 @command(b'perfphasesremote',
858 [], b"[DEST]")
860 [], b"[DEST]")
859 def perfphasesremote(ui, repo, dest=None, **opts):
861 def perfphasesremote(ui, repo, dest=None, **opts):
860 """benchmark time needed to analyse phases of the remote server"""
862 """benchmark time needed to analyse phases of the remote server"""
861 from mercurial.node import (
863 from mercurial.node import (
862 bin,
864 bin,
863 )
865 )
864 from mercurial import (
866 from mercurial import (
865 exchange,
867 exchange,
866 hg,
868 hg,
867 phases,
869 phases,
868 )
870 )
869 opts = _byteskwargs(opts)
871 opts = _byteskwargs(opts)
870 timer, fm = gettimer(ui, opts)
872 timer, fm = gettimer(ui, opts)
871
873
872 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
874 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
873 if not path:
875 if not path:
874 raise error.Abort((b'default repository not configured!'),
876 raise error.Abort((b'default repository not configured!'),
875 hint=(b"see 'hg help config.paths'"))
877 hint=(b"see 'hg help config.paths'"))
876 dest = path.pushloc or path.loc
878 dest = path.pushloc or path.loc
877 branches = (path.branch, opts.get(b'branch') or [])
879 branches = (path.branch, opts.get(b'branch') or [])
878 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
880 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
879 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
881 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
880 other = hg.peer(repo, opts, dest)
882 other = hg.peer(repo, opts, dest)
881
883
882 # easier to perform discovery through the operation
884 # easier to perform discovery through the operation
883 op = exchange.pushoperation(repo, other)
885 op = exchange.pushoperation(repo, other)
884 exchange._pushdiscoverychangeset(op)
886 exchange._pushdiscoverychangeset(op)
885
887
886 remotesubset = op.fallbackheads
888 remotesubset = op.fallbackheads
887
889
888 with other.commandexecutor() as e:
890 with other.commandexecutor() as e:
889 remotephases = e.callcommand(b'listkeys',
891 remotephases = e.callcommand(b'listkeys',
890 {b'namespace': b'phases'}).result()
892 {b'namespace': b'phases'}).result()
891 del other
893 del other
892 publishing = remotephases.get(b'publishing', False)
894 publishing = remotephases.get(b'publishing', False)
893 if publishing:
895 if publishing:
894 ui.status((b'publishing: yes\n'))
896 ui.status((b'publishing: yes\n'))
895 else:
897 else:
896 ui.status((b'publishing: no\n'))
898 ui.status((b'publishing: no\n'))
897
899
898 nodemap = repo.changelog.nodemap
900 nodemap = repo.changelog.nodemap
899 nonpublishroots = 0
901 nonpublishroots = 0
900 for nhex, phase in remotephases.iteritems():
902 for nhex, phase in remotephases.iteritems():
901 if nhex == b'publishing': # ignore data related to publish option
903 if nhex == b'publishing': # ignore data related to publish option
902 continue
904 continue
903 node = bin(nhex)
905 node = bin(nhex)
904 if node in nodemap and int(phase):
906 if node in nodemap and int(phase):
905 nonpublishroots += 1
907 nonpublishroots += 1
906 ui.status((b'number of roots: %d\n') % len(remotephases))
908 ui.status((b'number of roots: %d\n') % len(remotephases))
907 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
909 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
908 def d():
910 def d():
909 phases.remotephasessummary(repo,
911 phases.remotephasessummary(repo,
910 remotesubset,
912 remotesubset,
911 remotephases)
913 remotephases)
912 timer(d)
914 timer(d)
913 fm.end()
915 fm.end()
914
916
915 @command(b'perfmanifest',[
917 @command(b'perfmanifest',[
916 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
918 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
917 (b'', b'clear-disk', False, b'clear on-disk caches too'),
919 (b'', b'clear-disk', False, b'clear on-disk caches too'),
918 ] + formatteropts, b'REV|NODE')
920 ] + formatteropts, b'REV|NODE')
919 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
921 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
920 """benchmark the time to read a manifest from disk and return a usable
922 """benchmark the time to read a manifest from disk and return a usable
921 dict-like object
923 dict-like object
922
924
923 Manifest caches are cleared before retrieval."""
925 Manifest caches are cleared before retrieval."""
924 opts = _byteskwargs(opts)
926 opts = _byteskwargs(opts)
925 timer, fm = gettimer(ui, opts)
927 timer, fm = gettimer(ui, opts)
926 if not manifest_rev:
928 if not manifest_rev:
927 ctx = scmutil.revsingle(repo, rev, rev)
929 ctx = scmutil.revsingle(repo, rev, rev)
928 t = ctx.manifestnode()
930 t = ctx.manifestnode()
929 else:
931 else:
930 from mercurial.node import bin
932 from mercurial.node import bin
931
933
932 if len(rev) == 40:
934 if len(rev) == 40:
933 t = bin(rev)
935 t = bin(rev)
934 else:
936 else:
935 try:
937 try:
936 rev = int(rev)
938 rev = int(rev)
937
939
938 if util.safehasattr(repo.manifestlog, b'getstorage'):
940 if util.safehasattr(repo.manifestlog, b'getstorage'):
939 t = repo.manifestlog.getstorage(b'').node(rev)
941 t = repo.manifestlog.getstorage(b'').node(rev)
940 else:
942 else:
941 t = repo.manifestlog._revlog.lookup(rev)
943 t = repo.manifestlog._revlog.lookup(rev)
942 except ValueError:
944 except ValueError:
943 raise error.Abort(b'manifest revision must be integer or full '
945 raise error.Abort(b'manifest revision must be integer or full '
944 b'node')
946 b'node')
945 def d():
947 def d():
946 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
948 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
947 repo.manifestlog[t].read()
949 repo.manifestlog[t].read()
948 timer(d)
950 timer(d)
949 fm.end()
951 fm.end()
950
952
951 @command(b'perfchangeset', formatteropts)
953 @command(b'perfchangeset', formatteropts)
952 def perfchangeset(ui, repo, rev, **opts):
954 def perfchangeset(ui, repo, rev, **opts):
953 opts = _byteskwargs(opts)
955 opts = _byteskwargs(opts)
954 timer, fm = gettimer(ui, opts)
956 timer, fm = gettimer(ui, opts)
955 n = scmutil.revsingle(repo, rev).node()
957 n = scmutil.revsingle(repo, rev).node()
956 def d():
958 def d():
957 repo.changelog.read(n)
959 repo.changelog.read(n)
958 #repo.changelog._cache = None
960 #repo.changelog._cache = None
959 timer(d)
961 timer(d)
960 fm.end()
962 fm.end()
961
963
962 @command(b'perfindex', formatteropts)
964 @command(b'perfindex', formatteropts)
963 def perfindex(ui, repo, **opts):
965 def perfindex(ui, repo, **opts):
964 import mercurial.revlog
966 import mercurial.revlog
965 opts = _byteskwargs(opts)
967 opts = _byteskwargs(opts)
966 timer, fm = gettimer(ui, opts)
968 timer, fm = gettimer(ui, opts)
967 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
969 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
968 n = repo[b"tip"].node()
970 n = repo[b"tip"].node()
969 svfs = getsvfs(repo)
971 svfs = getsvfs(repo)
970 def d():
972 def d():
971 cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
973 cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
972 cl.rev(n)
974 cl.rev(n)
973 timer(d)
975 timer(d)
974 fm.end()
976 fm.end()
975
977
976 @command(b'perfstartup', formatteropts)
978 @command(b'perfstartup', formatteropts)
977 def perfstartup(ui, repo, **opts):
979 def perfstartup(ui, repo, **opts):
978 opts = _byteskwargs(opts)
980 opts = _byteskwargs(opts)
979 timer, fm = gettimer(ui, opts)
981 timer, fm = gettimer(ui, opts)
980 def d():
982 def d():
981 if os.name != r'nt':
983 if os.name != r'nt':
982 os.system(b"HGRCPATH= %s version -q > /dev/null" %
984 os.system(b"HGRCPATH= %s version -q > /dev/null" %
983 fsencode(sys.argv[0]))
985 fsencode(sys.argv[0]))
984 else:
986 else:
985 os.environ[r'HGRCPATH'] = r' '
987 os.environ[r'HGRCPATH'] = r' '
986 os.system(r"%s version -q > NUL" % sys.argv[0])
988 os.system(r"%s version -q > NUL" % sys.argv[0])
987 timer(d)
989 timer(d)
988 fm.end()
990 fm.end()
989
991
990 @command(b'perfparents', formatteropts)
992 @command(b'perfparents', formatteropts)
991 def perfparents(ui, repo, **opts):
993 def perfparents(ui, repo, **opts):
992 opts = _byteskwargs(opts)
994 opts = _byteskwargs(opts)
993 timer, fm = gettimer(ui, opts)
995 timer, fm = gettimer(ui, opts)
994 # control the number of commits perfparents iterates over
996 # control the number of commits perfparents iterates over
995 # experimental config: perf.parentscount
997 # experimental config: perf.parentscount
996 count = getint(ui, b"perf", b"parentscount", 1000)
998 count = getint(ui, b"perf", b"parentscount", 1000)
997 if len(repo.changelog) < count:
999 if len(repo.changelog) < count:
998 raise error.Abort(b"repo needs %d commits for this test" % count)
1000 raise error.Abort(b"repo needs %d commits for this test" % count)
999 repo = repo.unfiltered()
1001 repo = repo.unfiltered()
1000 nl = [repo.changelog.node(i) for i in _xrange(count)]
1002 nl = [repo.changelog.node(i) for i in _xrange(count)]
1001 def d():
1003 def d():
1002 for n in nl:
1004 for n in nl:
1003 repo.changelog.parents(n)
1005 repo.changelog.parents(n)
1004 timer(d)
1006 timer(d)
1005 fm.end()
1007 fm.end()
1006
1008
1007 @command(b'perfctxfiles', formatteropts)
1009 @command(b'perfctxfiles', formatteropts)
1008 def perfctxfiles(ui, repo, x, **opts):
1010 def perfctxfiles(ui, repo, x, **opts):
1009 opts = _byteskwargs(opts)
1011 opts = _byteskwargs(opts)
1010 x = int(x)
1012 x = int(x)
1011 timer, fm = gettimer(ui, opts)
1013 timer, fm = gettimer(ui, opts)
1012 def d():
1014 def d():
1013 len(repo[x].files())
1015 len(repo[x].files())
1014 timer(d)
1016 timer(d)
1015 fm.end()
1017 fm.end()
1016
1018
1017 @command(b'perfrawfiles', formatteropts)
1019 @command(b'perfrawfiles', formatteropts)
1018 def perfrawfiles(ui, repo, x, **opts):
1020 def perfrawfiles(ui, repo, x, **opts):
1019 opts = _byteskwargs(opts)
1021 opts = _byteskwargs(opts)
1020 x = int(x)
1022 x = int(x)
1021 timer, fm = gettimer(ui, opts)
1023 timer, fm = gettimer(ui, opts)
1022 cl = repo.changelog
1024 cl = repo.changelog
1023 def d():
1025 def d():
1024 len(cl.read(x)[3])
1026 len(cl.read(x)[3])
1025 timer(d)
1027 timer(d)
1026 fm.end()
1028 fm.end()
1027
1029
1028 @command(b'perflookup', formatteropts)
1030 @command(b'perflookup', formatteropts)
1029 def perflookup(ui, repo, rev, **opts):
1031 def perflookup(ui, repo, rev, **opts):
1030 opts = _byteskwargs(opts)
1032 opts = _byteskwargs(opts)
1031 timer, fm = gettimer(ui, opts)
1033 timer, fm = gettimer(ui, opts)
1032 timer(lambda: len(repo.lookup(rev)))
1034 timer(lambda: len(repo.lookup(rev)))
1033 fm.end()
1035 fm.end()
1034
1036
1035 @command(b'perflinelogedits',
1037 @command(b'perflinelogedits',
1036 [(b'n', b'edits', 10000, b'number of edits'),
1038 [(b'n', b'edits', 10000, b'number of edits'),
1037 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1039 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1038 ], norepo=True)
1040 ], norepo=True)
1039 def perflinelogedits(ui, **opts):
1041 def perflinelogedits(ui, **opts):
1040 from mercurial import linelog
1042 from mercurial import linelog
1041
1043
1042 opts = _byteskwargs(opts)
1044 opts = _byteskwargs(opts)
1043
1045
1044 edits = opts[b'edits']
1046 edits = opts[b'edits']
1045 maxhunklines = opts[b'max_hunk_lines']
1047 maxhunklines = opts[b'max_hunk_lines']
1046
1048
1047 maxb1 = 100000
1049 maxb1 = 100000
1048 random.seed(0)
1050 random.seed(0)
1049 randint = random.randint
1051 randint = random.randint
1050 currentlines = 0
1052 currentlines = 0
1051 arglist = []
1053 arglist = []
1052 for rev in _xrange(edits):
1054 for rev in _xrange(edits):
1053 a1 = randint(0, currentlines)
1055 a1 = randint(0, currentlines)
1054 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1056 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1055 b1 = randint(0, maxb1)
1057 b1 = randint(0, maxb1)
1056 b2 = randint(b1, b1 + maxhunklines)
1058 b2 = randint(b1, b1 + maxhunklines)
1057 currentlines += (b2 - b1) - (a2 - a1)
1059 currentlines += (b2 - b1) - (a2 - a1)
1058 arglist.append((rev, a1, a2, b1, b2))
1060 arglist.append((rev, a1, a2, b1, b2))
1059
1061
1060 def d():
1062 def d():
1061 ll = linelog.linelog()
1063 ll = linelog.linelog()
1062 for args in arglist:
1064 for args in arglist:
1063 ll.replacelines(*args)
1065 ll.replacelines(*args)
1064
1066
1065 timer, fm = gettimer(ui, opts)
1067 timer, fm = gettimer(ui, opts)
1066 timer(d)
1068 timer(d)
1067 fm.end()
1069 fm.end()
1068
1070
1069 @command(b'perfrevrange', formatteropts)
1071 @command(b'perfrevrange', formatteropts)
1070 def perfrevrange(ui, repo, *specs, **opts):
1072 def perfrevrange(ui, repo, *specs, **opts):
1071 opts = _byteskwargs(opts)
1073 opts = _byteskwargs(opts)
1072 timer, fm = gettimer(ui, opts)
1074 timer, fm = gettimer(ui, opts)
1073 revrange = scmutil.revrange
1075 revrange = scmutil.revrange
1074 timer(lambda: len(revrange(repo, specs)))
1076 timer(lambda: len(revrange(repo, specs)))
1075 fm.end()
1077 fm.end()
1076
1078
1077 @command(b'perfnodelookup', formatteropts)
1079 @command(b'perfnodelookup', formatteropts)
1078 def perfnodelookup(ui, repo, rev, **opts):
1080 def perfnodelookup(ui, repo, rev, **opts):
1079 opts = _byteskwargs(opts)
1081 opts = _byteskwargs(opts)
1080 timer, fm = gettimer(ui, opts)
1082 timer, fm = gettimer(ui, opts)
1081 import mercurial.revlog
1083 import mercurial.revlog
1082 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1084 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1083 n = scmutil.revsingle(repo, rev).node()
1085 n = scmutil.revsingle(repo, rev).node()
1084 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1086 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1085 def d():
1087 def d():
1086 cl.rev(n)
1088 cl.rev(n)
1087 clearcaches(cl)
1089 clearcaches(cl)
1088 timer(d)
1090 timer(d)
1089 fm.end()
1091 fm.end()
1090
1092
1091 @command(b'perflog',
1093 @command(b'perflog',
1092 [(b'', b'rename', False, b'ask log to follow renames')
1094 [(b'', b'rename', False, b'ask log to follow renames')
1093 ] + formatteropts)
1095 ] + formatteropts)
1094 def perflog(ui, repo, rev=None, **opts):
1096 def perflog(ui, repo, rev=None, **opts):
1095 opts = _byteskwargs(opts)
1097 opts = _byteskwargs(opts)
1096 if rev is None:
1098 if rev is None:
1097 rev=[]
1099 rev=[]
1098 timer, fm = gettimer(ui, opts)
1100 timer, fm = gettimer(ui, opts)
1099 ui.pushbuffer()
1101 ui.pushbuffer()
1100 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1102 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1101 copies=opts.get(b'rename')))
1103 copies=opts.get(b'rename')))
1102 ui.popbuffer()
1104 ui.popbuffer()
1103 fm.end()
1105 fm.end()
1104
1106
1105 @command(b'perfmoonwalk', formatteropts)
1107 @command(b'perfmoonwalk', formatteropts)
1106 def perfmoonwalk(ui, repo, **opts):
1108 def perfmoonwalk(ui, repo, **opts):
1107 """benchmark walking the changelog backwards
1109 """benchmark walking the changelog backwards
1108
1110
1109 This also loads the changelog data for each revision in the changelog.
1111 This also loads the changelog data for each revision in the changelog.
1110 """
1112 """
1111 opts = _byteskwargs(opts)
1113 opts = _byteskwargs(opts)
1112 timer, fm = gettimer(ui, opts)
1114 timer, fm = gettimer(ui, opts)
1113 def moonwalk():
1115 def moonwalk():
1114 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1116 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1115 ctx = repo[i]
1117 ctx = repo[i]
1116 ctx.branch() # read changelog data (in addition to the index)
1118 ctx.branch() # read changelog data (in addition to the index)
1117 timer(moonwalk)
1119 timer(moonwalk)
1118 fm.end()
1120 fm.end()
1119
1121
1120 @command(b'perftemplating',
1122 @command(b'perftemplating',
1121 [(b'r', b'rev', [], b'revisions to run the template on'),
1123 [(b'r', b'rev', [], b'revisions to run the template on'),
1122 ] + formatteropts)
1124 ] + formatteropts)
1123 def perftemplating(ui, repo, testedtemplate=None, **opts):
1125 def perftemplating(ui, repo, testedtemplate=None, **opts):
1124 """test the rendering time of a given template"""
1126 """test the rendering time of a given template"""
1125 if makelogtemplater is None:
1127 if makelogtemplater is None:
1126 raise error.Abort((b"perftemplating not available with this Mercurial"),
1128 raise error.Abort((b"perftemplating not available with this Mercurial"),
1127 hint=b"use 4.3 or later")
1129 hint=b"use 4.3 or later")
1128
1130
1129 opts = _byteskwargs(opts)
1131 opts = _byteskwargs(opts)
1130
1132
1131 nullui = ui.copy()
1133 nullui = ui.copy()
1132 nullui.fout = open(os.devnull, r'wb')
1134 nullui.fout = open(os.devnull, r'wb')
1133 nullui.disablepager()
1135 nullui.disablepager()
1134 revs = opts.get(b'rev')
1136 revs = opts.get(b'rev')
1135 if not revs:
1137 if not revs:
1136 revs = [b'all()']
1138 revs = [b'all()']
1137 revs = list(scmutil.revrange(repo, revs))
1139 revs = list(scmutil.revrange(repo, revs))
1138
1140
1139 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1141 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1140 b' {author|person}: {desc|firstline}\n')
1142 b' {author|person}: {desc|firstline}\n')
1141 if testedtemplate is None:
1143 if testedtemplate is None:
1142 testedtemplate = defaulttemplate
1144 testedtemplate = defaulttemplate
1143 displayer = makelogtemplater(nullui, repo, testedtemplate)
1145 displayer = makelogtemplater(nullui, repo, testedtemplate)
1144 def format():
1146 def format():
1145 for r in revs:
1147 for r in revs:
1146 ctx = repo[r]
1148 ctx = repo[r]
1147 displayer.show(ctx)
1149 displayer.show(ctx)
1148 displayer.flush(ctx)
1150 displayer.flush(ctx)
1149
1151
1150 timer, fm = gettimer(ui, opts)
1152 timer, fm = gettimer(ui, opts)
1151 timer(format)
1153 timer(format)
1152 fm.end()
1154 fm.end()
1153
1155
1154 @command(b'perfcca', formatteropts)
1156 @command(b'perfcca', formatteropts)
1155 def perfcca(ui, repo, **opts):
1157 def perfcca(ui, repo, **opts):
1156 opts = _byteskwargs(opts)
1158 opts = _byteskwargs(opts)
1157 timer, fm = gettimer(ui, opts)
1159 timer, fm = gettimer(ui, opts)
1158 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1160 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1159 fm.end()
1161 fm.end()
1160
1162
1161 @command(b'perffncacheload', formatteropts)
1163 @command(b'perffncacheload', formatteropts)
1162 def perffncacheload(ui, repo, **opts):
1164 def perffncacheload(ui, repo, **opts):
1163 opts = _byteskwargs(opts)
1165 opts = _byteskwargs(opts)
1164 timer, fm = gettimer(ui, opts)
1166 timer, fm = gettimer(ui, opts)
1165 s = repo.store
1167 s = repo.store
1166 def d():
1168 def d():
1167 s.fncache._load()
1169 s.fncache._load()
1168 timer(d)
1170 timer(d)
1169 fm.end()
1171 fm.end()
1170
1172
1171 @command(b'perffncachewrite', formatteropts)
1173 @command(b'perffncachewrite', formatteropts)
1172 def perffncachewrite(ui, repo, **opts):
1174 def perffncachewrite(ui, repo, **opts):
1173 opts = _byteskwargs(opts)
1175 opts = _byteskwargs(opts)
1174 timer, fm = gettimer(ui, opts)
1176 timer, fm = gettimer(ui, opts)
1175 s = repo.store
1177 s = repo.store
1176 lock = repo.lock()
1178 lock = repo.lock()
1177 s.fncache._load()
1179 s.fncache._load()
1178 tr = repo.transaction(b'perffncachewrite')
1180 tr = repo.transaction(b'perffncachewrite')
1179 tr.addbackup(b'fncache')
1181 tr.addbackup(b'fncache')
1180 def d():
1182 def d():
1181 s.fncache._dirty = True
1183 s.fncache._dirty = True
1182 s.fncache.write(tr)
1184 s.fncache.write(tr)
1183 timer(d)
1185 timer(d)
1184 tr.close()
1186 tr.close()
1185 lock.release()
1187 lock.release()
1186 fm.end()
1188 fm.end()
1187
1189
1188 @command(b'perffncacheencode', formatteropts)
1190 @command(b'perffncacheencode', formatteropts)
1189 def perffncacheencode(ui, repo, **opts):
1191 def perffncacheencode(ui, repo, **opts):
1190 opts = _byteskwargs(opts)
1192 opts = _byteskwargs(opts)
1191 timer, fm = gettimer(ui, opts)
1193 timer, fm = gettimer(ui, opts)
1192 s = repo.store
1194 s = repo.store
1193 s.fncache._load()
1195 s.fncache._load()
1194 def d():
1196 def d():
1195 for p in s.fncache.entries:
1197 for p in s.fncache.entries:
1196 s.encode(p)
1198 s.encode(p)
1197 timer(d)
1199 timer(d)
1198 fm.end()
1200 fm.end()
1199
1201
1200 def _bdiffworker(q, blocks, xdiff, ready, done):
1202 def _bdiffworker(q, blocks, xdiff, ready, done):
1201 while not done.is_set():
1203 while not done.is_set():
1202 pair = q.get()
1204 pair = q.get()
1203 while pair is not None:
1205 while pair is not None:
1204 if xdiff:
1206 if xdiff:
1205 mdiff.bdiff.xdiffblocks(*pair)
1207 mdiff.bdiff.xdiffblocks(*pair)
1206 elif blocks:
1208 elif blocks:
1207 mdiff.bdiff.blocks(*pair)
1209 mdiff.bdiff.blocks(*pair)
1208 else:
1210 else:
1209 mdiff.textdiff(*pair)
1211 mdiff.textdiff(*pair)
1210 q.task_done()
1212 q.task_done()
1211 pair = q.get()
1213 pair = q.get()
1212 q.task_done() # for the None one
1214 q.task_done() # for the None one
1213 with ready:
1215 with ready:
1214 ready.wait()
1216 ready.wait()
1215
1217
1216 def _manifestrevision(repo, mnode):
1218 def _manifestrevision(repo, mnode):
1217 ml = repo.manifestlog
1219 ml = repo.manifestlog
1218
1220
1219 if util.safehasattr(ml, b'getstorage'):
1221 if util.safehasattr(ml, b'getstorage'):
1220 store = ml.getstorage(b'')
1222 store = ml.getstorage(b'')
1221 else:
1223 else:
1222 store = ml._revlog
1224 store = ml._revlog
1223
1225
1224 return store.revision(mnode)
1226 return store.revision(mnode)
1225
1227
1226 @command(b'perfbdiff', revlogopts + formatteropts + [
1228 @command(b'perfbdiff', revlogopts + formatteropts + [
1227 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1229 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1228 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1230 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1229 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1231 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1230 (b'', b'blocks', False, b'test computing diffs into blocks'),
1232 (b'', b'blocks', False, b'test computing diffs into blocks'),
1231 (b'', b'xdiff', False, b'use xdiff algorithm'),
1233 (b'', b'xdiff', False, b'use xdiff algorithm'),
1232 ],
1234 ],
1233
1235
1234 b'-c|-m|FILE REV')
1236 b'-c|-m|FILE REV')
1235 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1237 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1236 """benchmark a bdiff between revisions
1238 """benchmark a bdiff between revisions
1237
1239
1238 By default, benchmark a bdiff between its delta parent and itself.
1240 By default, benchmark a bdiff between its delta parent and itself.
1239
1241
1240 With ``--count``, benchmark bdiffs between delta parents and self for N
1242 With ``--count``, benchmark bdiffs between delta parents and self for N
1241 revisions starting at the specified revision.
1243 revisions starting at the specified revision.
1242
1244
1243 With ``--alldata``, assume the requested revision is a changeset and
1245 With ``--alldata``, assume the requested revision is a changeset and
1244 measure bdiffs for all changes related to that changeset (manifest
1246 measure bdiffs for all changes related to that changeset (manifest
1245 and filelogs).
1247 and filelogs).
1246 """
1248 """
1247 opts = _byteskwargs(opts)
1249 opts = _byteskwargs(opts)
1248
1250
1249 if opts[b'xdiff'] and not opts[b'blocks']:
1251 if opts[b'xdiff'] and not opts[b'blocks']:
1250 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1252 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1251
1253
1252 if opts[b'alldata']:
1254 if opts[b'alldata']:
1253 opts[b'changelog'] = True
1255 opts[b'changelog'] = True
1254
1256
1255 if opts.get(b'changelog') or opts.get(b'manifest'):
1257 if opts.get(b'changelog') or opts.get(b'manifest'):
1256 file_, rev = None, file_
1258 file_, rev = None, file_
1257 elif rev is None:
1259 elif rev is None:
1258 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1260 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1259
1261
1260 blocks = opts[b'blocks']
1262 blocks = opts[b'blocks']
1261 xdiff = opts[b'xdiff']
1263 xdiff = opts[b'xdiff']
1262 textpairs = []
1264 textpairs = []
1263
1265
1264 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1266 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1265
1267
1266 startrev = r.rev(r.lookup(rev))
1268 startrev = r.rev(r.lookup(rev))
1267 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1269 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1268 if opts[b'alldata']:
1270 if opts[b'alldata']:
1269 # Load revisions associated with changeset.
1271 # Load revisions associated with changeset.
1270 ctx = repo[rev]
1272 ctx = repo[rev]
1271 mtext = _manifestrevision(repo, ctx.manifestnode())
1273 mtext = _manifestrevision(repo, ctx.manifestnode())
1272 for pctx in ctx.parents():
1274 for pctx in ctx.parents():
1273 pman = _manifestrevision(repo, pctx.manifestnode())
1275 pman = _manifestrevision(repo, pctx.manifestnode())
1274 textpairs.append((pman, mtext))
1276 textpairs.append((pman, mtext))
1275
1277
1276 # Load filelog revisions by iterating manifest delta.
1278 # Load filelog revisions by iterating manifest delta.
1277 man = ctx.manifest()
1279 man = ctx.manifest()
1278 pman = ctx.p1().manifest()
1280 pman = ctx.p1().manifest()
1279 for filename, change in pman.diff(man).items():
1281 for filename, change in pman.diff(man).items():
1280 fctx = repo.file(filename)
1282 fctx = repo.file(filename)
1281 f1 = fctx.revision(change[0][0] or -1)
1283 f1 = fctx.revision(change[0][0] or -1)
1282 f2 = fctx.revision(change[1][0] or -1)
1284 f2 = fctx.revision(change[1][0] or -1)
1283 textpairs.append((f1, f2))
1285 textpairs.append((f1, f2))
1284 else:
1286 else:
1285 dp = r.deltaparent(rev)
1287 dp = r.deltaparent(rev)
1286 textpairs.append((r.revision(dp), r.revision(rev)))
1288 textpairs.append((r.revision(dp), r.revision(rev)))
1287
1289
1288 withthreads = threads > 0
1290 withthreads = threads > 0
1289 if not withthreads:
1291 if not withthreads:
1290 def d():
1292 def d():
1291 for pair in textpairs:
1293 for pair in textpairs:
1292 if xdiff:
1294 if xdiff:
1293 mdiff.bdiff.xdiffblocks(*pair)
1295 mdiff.bdiff.xdiffblocks(*pair)
1294 elif blocks:
1296 elif blocks:
1295 mdiff.bdiff.blocks(*pair)
1297 mdiff.bdiff.blocks(*pair)
1296 else:
1298 else:
1297 mdiff.textdiff(*pair)
1299 mdiff.textdiff(*pair)
1298 else:
1300 else:
1299 q = queue()
1301 q = queue()
1300 for i in _xrange(threads):
1302 for i in _xrange(threads):
1301 q.put(None)
1303 q.put(None)
1302 ready = threading.Condition()
1304 ready = threading.Condition()
1303 done = threading.Event()
1305 done = threading.Event()
1304 for i in _xrange(threads):
1306 for i in _xrange(threads):
1305 threading.Thread(target=_bdiffworker,
1307 threading.Thread(target=_bdiffworker,
1306 args=(q, blocks, xdiff, ready, done)).start()
1308 args=(q, blocks, xdiff, ready, done)).start()
1307 q.join()
1309 q.join()
1308 def d():
1310 def d():
1309 for pair in textpairs:
1311 for pair in textpairs:
1310 q.put(pair)
1312 q.put(pair)
1311 for i in _xrange(threads):
1313 for i in _xrange(threads):
1312 q.put(None)
1314 q.put(None)
1313 with ready:
1315 with ready:
1314 ready.notify_all()
1316 ready.notify_all()
1315 q.join()
1317 q.join()
1316 timer, fm = gettimer(ui, opts)
1318 timer, fm = gettimer(ui, opts)
1317 timer(d)
1319 timer(d)
1318 fm.end()
1320 fm.end()
1319
1321
1320 if withthreads:
1322 if withthreads:
1321 done.set()
1323 done.set()
1322 for i in _xrange(threads):
1324 for i in _xrange(threads):
1323 q.put(None)
1325 q.put(None)
1324 with ready:
1326 with ready:
1325 ready.notify_all()
1327 ready.notify_all()
1326
1328
1327 @command(b'perfunidiff', revlogopts + formatteropts + [
1329 @command(b'perfunidiff', revlogopts + formatteropts + [
1328 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1330 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1329 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1331 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1330 ], b'-c|-m|FILE REV')
1332 ], b'-c|-m|FILE REV')
1331 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1333 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1332 """benchmark a unified diff between revisions
1334 """benchmark a unified diff between revisions
1333
1335
1334 This doesn't include any copy tracing - it's just a unified diff
1336 This doesn't include any copy tracing - it's just a unified diff
1335 of the texts.
1337 of the texts.
1336
1338
1337 By default, benchmark a diff between its delta parent and itself.
1339 By default, benchmark a diff between its delta parent and itself.
1338
1340
1339 With ``--count``, benchmark diffs between delta parents and self for N
1341 With ``--count``, benchmark diffs between delta parents and self for N
1340 revisions starting at the specified revision.
1342 revisions starting at the specified revision.
1341
1343
1342 With ``--alldata``, assume the requested revision is a changeset and
1344 With ``--alldata``, assume the requested revision is a changeset and
1343 measure diffs for all changes related to that changeset (manifest
1345 measure diffs for all changes related to that changeset (manifest
1344 and filelogs).
1346 and filelogs).
1345 """
1347 """
1346 opts = _byteskwargs(opts)
1348 opts = _byteskwargs(opts)
1347 if opts[b'alldata']:
1349 if opts[b'alldata']:
1348 opts[b'changelog'] = True
1350 opts[b'changelog'] = True
1349
1351
1350 if opts.get(b'changelog') or opts.get(b'manifest'):
1352 if opts.get(b'changelog') or opts.get(b'manifest'):
1351 file_, rev = None, file_
1353 file_, rev = None, file_
1352 elif rev is None:
1354 elif rev is None:
1353 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1355 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1354
1356
1355 textpairs = []
1357 textpairs = []
1356
1358
1357 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1359 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1358
1360
1359 startrev = r.rev(r.lookup(rev))
1361 startrev = r.rev(r.lookup(rev))
1360 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1362 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1361 if opts[b'alldata']:
1363 if opts[b'alldata']:
1362 # Load revisions associated with changeset.
1364 # Load revisions associated with changeset.
1363 ctx = repo[rev]
1365 ctx = repo[rev]
1364 mtext = _manifestrevision(repo, ctx.manifestnode())
1366 mtext = _manifestrevision(repo, ctx.manifestnode())
1365 for pctx in ctx.parents():
1367 for pctx in ctx.parents():
1366 pman = _manifestrevision(repo, pctx.manifestnode())
1368 pman = _manifestrevision(repo, pctx.manifestnode())
1367 textpairs.append((pman, mtext))
1369 textpairs.append((pman, mtext))
1368
1370
1369 # Load filelog revisions by iterating manifest delta.
1371 # Load filelog revisions by iterating manifest delta.
1370 man = ctx.manifest()
1372 man = ctx.manifest()
1371 pman = ctx.p1().manifest()
1373 pman = ctx.p1().manifest()
1372 for filename, change in pman.diff(man).items():
1374 for filename, change in pman.diff(man).items():
1373 fctx = repo.file(filename)
1375 fctx = repo.file(filename)
1374 f1 = fctx.revision(change[0][0] or -1)
1376 f1 = fctx.revision(change[0][0] or -1)
1375 f2 = fctx.revision(change[1][0] or -1)
1377 f2 = fctx.revision(change[1][0] or -1)
1376 textpairs.append((f1, f2))
1378 textpairs.append((f1, f2))
1377 else:
1379 else:
1378 dp = r.deltaparent(rev)
1380 dp = r.deltaparent(rev)
1379 textpairs.append((r.revision(dp), r.revision(rev)))
1381 textpairs.append((r.revision(dp), r.revision(rev)))
1380
1382
1381 def d():
1383 def d():
1382 for left, right in textpairs:
1384 for left, right in textpairs:
1383 # The date strings don't matter, so we pass empty strings.
1385 # The date strings don't matter, so we pass empty strings.
1384 headerlines, hunks = mdiff.unidiff(
1386 headerlines, hunks = mdiff.unidiff(
1385 left, b'', right, b'', b'left', b'right', binary=False)
1387 left, b'', right, b'', b'left', b'right', binary=False)
1386 # consume iterators in roughly the way patch.py does
1388 # consume iterators in roughly the way patch.py does
1387 b'\n'.join(headerlines)
1389 b'\n'.join(headerlines)
1388 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1390 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1389 timer, fm = gettimer(ui, opts)
1391 timer, fm = gettimer(ui, opts)
1390 timer(d)
1392 timer(d)
1391 fm.end()
1393 fm.end()
1392
1394
1393 @command(b'perfdiffwd', formatteropts)
1395 @command(b'perfdiffwd', formatteropts)
1394 def perfdiffwd(ui, repo, **opts):
1396 def perfdiffwd(ui, repo, **opts):
1395 """Profile diff of working directory changes"""
1397 """Profile diff of working directory changes"""
1396 opts = _byteskwargs(opts)
1398 opts = _byteskwargs(opts)
1397 timer, fm = gettimer(ui, opts)
1399 timer, fm = gettimer(ui, opts)
1398 options = {
1400 options = {
1399 'w': 'ignore_all_space',
1401 'w': 'ignore_all_space',
1400 'b': 'ignore_space_change',
1402 'b': 'ignore_space_change',
1401 'B': 'ignore_blank_lines',
1403 'B': 'ignore_blank_lines',
1402 }
1404 }
1403
1405
1404 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1406 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1405 opts = dict((options[c], b'1') for c in diffopt)
1407 opts = dict((options[c], b'1') for c in diffopt)
1406 def d():
1408 def d():
1407 ui.pushbuffer()
1409 ui.pushbuffer()
1408 commands.diff(ui, repo, **opts)
1410 commands.diff(ui, repo, **opts)
1409 ui.popbuffer()
1411 ui.popbuffer()
1410 diffopt = diffopt.encode('ascii')
1412 diffopt = diffopt.encode('ascii')
1411 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1413 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1412 timer(d, title=title)
1414 timer(d, title=title)
1413 fm.end()
1415 fm.end()
1414
1416
1415 @command(b'perfrevlogindex', revlogopts + formatteropts,
1417 @command(b'perfrevlogindex', revlogopts + formatteropts,
1416 b'-c|-m|FILE')
1418 b'-c|-m|FILE')
1417 def perfrevlogindex(ui, repo, file_=None, **opts):
1419 def perfrevlogindex(ui, repo, file_=None, **opts):
1418 """Benchmark operations against a revlog index.
1420 """Benchmark operations against a revlog index.
1419
1421
1420 This tests constructing a revlog instance, reading index data,
1422 This tests constructing a revlog instance, reading index data,
1421 parsing index data, and performing various operations related to
1423 parsing index data, and performing various operations related to
1422 index data.
1424 index data.
1423 """
1425 """
1424
1426
1425 opts = _byteskwargs(opts)
1427 opts = _byteskwargs(opts)
1426
1428
1427 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1429 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1428
1430
1429 opener = getattr(rl, 'opener') # trick linter
1431 opener = getattr(rl, 'opener') # trick linter
1430 indexfile = rl.indexfile
1432 indexfile = rl.indexfile
1431 data = opener.read(indexfile)
1433 data = opener.read(indexfile)
1432
1434
1433 header = struct.unpack(b'>I', data[0:4])[0]
1435 header = struct.unpack(b'>I', data[0:4])[0]
1434 version = header & 0xFFFF
1436 version = header & 0xFFFF
1435 if version == 1:
1437 if version == 1:
1436 revlogio = revlog.revlogio()
1438 revlogio = revlog.revlogio()
1437 inline = header & (1 << 16)
1439 inline = header & (1 << 16)
1438 else:
1440 else:
1439 raise error.Abort((b'unsupported revlog version: %d') % version)
1441 raise error.Abort((b'unsupported revlog version: %d') % version)
1440
1442
1441 rllen = len(rl)
1443 rllen = len(rl)
1442
1444
1443 node0 = rl.node(0)
1445 node0 = rl.node(0)
1444 node25 = rl.node(rllen // 4)
1446 node25 = rl.node(rllen // 4)
1445 node50 = rl.node(rllen // 2)
1447 node50 = rl.node(rllen // 2)
1446 node75 = rl.node(rllen // 4 * 3)
1448 node75 = rl.node(rllen // 4 * 3)
1447 node100 = rl.node(rllen - 1)
1449 node100 = rl.node(rllen - 1)
1448
1450
1449 allrevs = range(rllen)
1451 allrevs = range(rllen)
1450 allrevsrev = list(reversed(allrevs))
1452 allrevsrev = list(reversed(allrevs))
1451 allnodes = [rl.node(rev) for rev in range(rllen)]
1453 allnodes = [rl.node(rev) for rev in range(rllen)]
1452 allnodesrev = list(reversed(allnodes))
1454 allnodesrev = list(reversed(allnodes))
1453
1455
1454 def constructor():
1456 def constructor():
1455 revlog.revlog(opener, indexfile)
1457 revlog.revlog(opener, indexfile)
1456
1458
1457 def read():
1459 def read():
1458 with opener(indexfile) as fh:
1460 with opener(indexfile) as fh:
1459 fh.read()
1461 fh.read()
1460
1462
1461 def parseindex():
1463 def parseindex():
1462 revlogio.parseindex(data, inline)
1464 revlogio.parseindex(data, inline)
1463
1465
1464 def getentry(revornode):
1466 def getentry(revornode):
1465 index = revlogio.parseindex(data, inline)[0]
1467 index = revlogio.parseindex(data, inline)[0]
1466 index[revornode]
1468 index[revornode]
1467
1469
1468 def getentries(revs, count=1):
1470 def getentries(revs, count=1):
1469 index = revlogio.parseindex(data, inline)[0]
1471 index = revlogio.parseindex(data, inline)[0]
1470
1472
1471 for i in range(count):
1473 for i in range(count):
1472 for rev in revs:
1474 for rev in revs:
1473 index[rev]
1475 index[rev]
1474
1476
1475 def resolvenode(node):
1477 def resolvenode(node):
1476 nodemap = revlogio.parseindex(data, inline)[1]
1478 nodemap = revlogio.parseindex(data, inline)[1]
1477 # This only works for the C code.
1479 # This only works for the C code.
1478 if nodemap is None:
1480 if nodemap is None:
1479 return
1481 return
1480
1482
1481 try:
1483 try:
1482 nodemap[node]
1484 nodemap[node]
1483 except error.RevlogError:
1485 except error.RevlogError:
1484 pass
1486 pass
1485
1487
1486 def resolvenodes(nodes, count=1):
1488 def resolvenodes(nodes, count=1):
1487 nodemap = revlogio.parseindex(data, inline)[1]
1489 nodemap = revlogio.parseindex(data, inline)[1]
1488 if nodemap is None:
1490 if nodemap is None:
1489 return
1491 return
1490
1492
1491 for i in range(count):
1493 for i in range(count):
1492 for node in nodes:
1494 for node in nodes:
1493 try:
1495 try:
1494 nodemap[node]
1496 nodemap[node]
1495 except error.RevlogError:
1497 except error.RevlogError:
1496 pass
1498 pass
1497
1499
1498 benches = [
1500 benches = [
1499 (constructor, b'revlog constructor'),
1501 (constructor, b'revlog constructor'),
1500 (read, b'read'),
1502 (read, b'read'),
1501 (parseindex, b'create index object'),
1503 (parseindex, b'create index object'),
1502 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1504 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1503 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1505 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1504 (lambda: resolvenode(node0), b'look up node at rev 0'),
1506 (lambda: resolvenode(node0), b'look up node at rev 0'),
1505 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1507 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1506 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1508 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1507 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1509 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1508 (lambda: resolvenode(node100), b'look up node at tip'),
1510 (lambda: resolvenode(node100), b'look up node at tip'),
1509 # 2x variation is to measure caching impact.
1511 # 2x variation is to measure caching impact.
1510 (lambda: resolvenodes(allnodes),
1512 (lambda: resolvenodes(allnodes),
1511 b'look up all nodes (forward)'),
1513 b'look up all nodes (forward)'),
1512 (lambda: resolvenodes(allnodes, 2),
1514 (lambda: resolvenodes(allnodes, 2),
1513 b'look up all nodes 2x (forward)'),
1515 b'look up all nodes 2x (forward)'),
1514 (lambda: resolvenodes(allnodesrev),
1516 (lambda: resolvenodes(allnodesrev),
1515 b'look up all nodes (reverse)'),
1517 b'look up all nodes (reverse)'),
1516 (lambda: resolvenodes(allnodesrev, 2),
1518 (lambda: resolvenodes(allnodesrev, 2),
1517 b'look up all nodes 2x (reverse)'),
1519 b'look up all nodes 2x (reverse)'),
1518 (lambda: getentries(allrevs),
1520 (lambda: getentries(allrevs),
1519 b'retrieve all index entries (forward)'),
1521 b'retrieve all index entries (forward)'),
1520 (lambda: getentries(allrevs, 2),
1522 (lambda: getentries(allrevs, 2),
1521 b'retrieve all index entries 2x (forward)'),
1523 b'retrieve all index entries 2x (forward)'),
1522 (lambda: getentries(allrevsrev),
1524 (lambda: getentries(allrevsrev),
1523 b'retrieve all index entries (reverse)'),
1525 b'retrieve all index entries (reverse)'),
1524 (lambda: getentries(allrevsrev, 2),
1526 (lambda: getentries(allrevsrev, 2),
1525 b'retrieve all index entries 2x (reverse)'),
1527 b'retrieve all index entries 2x (reverse)'),
1526 ]
1528 ]
1527
1529
1528 for fn, title in benches:
1530 for fn, title in benches:
1529 timer, fm = gettimer(ui, opts)
1531 timer, fm = gettimer(ui, opts)
1530 timer(fn, title=title)
1532 timer(fn, title=title)
1531 fm.end()
1533 fm.end()
1532
1534
1533 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1535 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1534 [(b'd', b'dist', 100, b'distance between the revisions'),
1536 [(b'd', b'dist', 100, b'distance between the revisions'),
1535 (b's', b'startrev', 0, b'revision to start reading at'),
1537 (b's', b'startrev', 0, b'revision to start reading at'),
1536 (b'', b'reverse', False, b'read in reverse')],
1538 (b'', b'reverse', False, b'read in reverse')],
1537 b'-c|-m|FILE')
1539 b'-c|-m|FILE')
1538 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1540 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1539 **opts):
1541 **opts):
1540 """Benchmark reading a series of revisions from a revlog.
1542 """Benchmark reading a series of revisions from a revlog.
1541
1543
1542 By default, we read every ``-d/--dist`` revision from 0 to tip of
1544 By default, we read every ``-d/--dist`` revision from 0 to tip of
1543 the specified revlog.
1545 the specified revlog.
1544
1546
1545 The start revision can be defined via ``-s/--startrev``.
1547 The start revision can be defined via ``-s/--startrev``.
1546 """
1548 """
1547 opts = _byteskwargs(opts)
1549 opts = _byteskwargs(opts)
1548
1550
1549 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1551 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1550 rllen = getlen(ui)(rl)
1552 rllen = getlen(ui)(rl)
1551
1553
1552 if startrev < 0:
1554 if startrev < 0:
1553 startrev = rllen + startrev
1555 startrev = rllen + startrev
1554
1556
1555 def d():
1557 def d():
1556 rl.clearcaches()
1558 rl.clearcaches()
1557
1559
1558 beginrev = startrev
1560 beginrev = startrev
1559 endrev = rllen
1561 endrev = rllen
1560 dist = opts[b'dist']
1562 dist = opts[b'dist']
1561
1563
1562 if reverse:
1564 if reverse:
1563 beginrev, endrev = endrev - 1, beginrev - 1
1565 beginrev, endrev = endrev - 1, beginrev - 1
1564 dist = -1 * dist
1566 dist = -1 * dist
1565
1567
1566 for x in _xrange(beginrev, endrev, dist):
1568 for x in _xrange(beginrev, endrev, dist):
1567 # Old revisions don't support passing int.
1569 # Old revisions don't support passing int.
1568 n = rl.node(x)
1570 n = rl.node(x)
1569 rl.revision(n)
1571 rl.revision(n)
1570
1572
1571 timer, fm = gettimer(ui, opts)
1573 timer, fm = gettimer(ui, opts)
1572 timer(d)
1574 timer(d)
1573 fm.end()
1575 fm.end()
1574
1576
1575 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1577 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1576 [(b's', b'startrev', 1000, b'revision to start writing at'),
1578 [(b's', b'startrev', 1000, b'revision to start writing at'),
1577 (b'', b'stoprev', -1, b'last revision to write'),
1579 (b'', b'stoprev', -1, b'last revision to write'),
1578 (b'', b'count', 3, b'last revision to write'),
1580 (b'', b'count', 3, b'last revision to write'),
1579 (b'', b'details', False, b'print timing for every revisions tested'),
1581 (b'', b'details', False, b'print timing for every revisions tested'),
1580 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1582 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1581 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1583 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1582 ],
1584 ],
1583 b'-c|-m|FILE')
1585 b'-c|-m|FILE')
1584 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1586 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1585 """Benchmark writing a series of revisions to a revlog.
1587 """Benchmark writing a series of revisions to a revlog.
1586
1588
1587 Possible source values are:
1589 Possible source values are:
1588 * `full`: add from a full text (default).
1590 * `full`: add from a full text (default).
1589 * `parent-1`: add from a delta to the first parent
1591 * `parent-1`: add from a delta to the first parent
1590 * `parent-2`: add from a delta to the second parent if it exists
1592 * `parent-2`: add from a delta to the second parent if it exists
1591 (use a delta from the first parent otherwise)
1593 (use a delta from the first parent otherwise)
1592 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1594 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1593 * `storage`: add from the existing precomputed deltas
1595 * `storage`: add from the existing precomputed deltas
1594 """
1596 """
1595 opts = _byteskwargs(opts)
1597 opts = _byteskwargs(opts)
1596
1598
1597 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1599 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1598 rllen = getlen(ui)(rl)
1600 rllen = getlen(ui)(rl)
1599 if startrev < 0:
1601 if startrev < 0:
1600 startrev = rllen + startrev
1602 startrev = rllen + startrev
1601 if stoprev < 0:
1603 if stoprev < 0:
1602 stoprev = rllen + stoprev
1604 stoprev = rllen + stoprev
1603
1605
1604 lazydeltabase = opts['lazydeltabase']
1606 lazydeltabase = opts['lazydeltabase']
1605 source = opts['source']
1607 source = opts['source']
1606 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1608 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1607 b'storage')
1609 b'storage')
1608 if source not in validsource:
1610 if source not in validsource:
1609 raise error.Abort('invalid source type: %s' % source)
1611 raise error.Abort('invalid source type: %s' % source)
1610
1612
1611 ### actually gather results
1613 ### actually gather results
1612 count = opts['count']
1614 count = opts['count']
1613 if count <= 0:
1615 if count <= 0:
1614 raise error.Abort('invalide run count: %d' % count)
1616 raise error.Abort('invalide run count: %d' % count)
1615 allresults = []
1617 allresults = []
1616 for c in range(count):
1618 for c in range(count):
1617 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1619 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1618 lazydeltabase=lazydeltabase)
1620 lazydeltabase=lazydeltabase)
1619 allresults.append(timing)
1621 allresults.append(timing)
1620
1622
1621 ### consolidate the results in a single list
1623 ### consolidate the results in a single list
1622 results = []
1624 results = []
1623 for idx, (rev, t) in enumerate(allresults[0]):
1625 for idx, (rev, t) in enumerate(allresults[0]):
1624 ts = [t]
1626 ts = [t]
1625 for other in allresults[1:]:
1627 for other in allresults[1:]:
1626 orev, ot = other[idx]
1628 orev, ot = other[idx]
1627 assert orev == rev
1629 assert orev == rev
1628 ts.append(ot)
1630 ts.append(ot)
1629 results.append((rev, ts))
1631 results.append((rev, ts))
1630 resultcount = len(results)
1632 resultcount = len(results)
1631
1633
1632 ### Compute and display relevant statistics
1634 ### Compute and display relevant statistics
1633
1635
1634 # get a formatter
1636 # get a formatter
1635 fm = ui.formatter(b'perf', opts)
1637 fm = ui.formatter(b'perf', opts)
1636 displayall = ui.configbool(b"perf", b"all-timing", False)
1638 displayall = ui.configbool(b"perf", b"all-timing", False)
1637
1639
1638 # print individual details if requested
1640 # print individual details if requested
1639 if opts['details']:
1641 if opts['details']:
1640 for idx, item in enumerate(results, 1):
1642 for idx, item in enumerate(results, 1):
1641 rev, data = item
1643 rev, data = item
1642 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1644 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1643 formatone(fm, data, title=title, displayall=displayall)
1645 formatone(fm, data, title=title, displayall=displayall)
1644
1646
1645 # sorts results by median time
1647 # sorts results by median time
1646 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1648 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1647 # list of (name, index) to display)
1649 # list of (name, index) to display)
1648 relevants = [
1650 relevants = [
1649 ("min", 0),
1651 ("min", 0),
1650 ("10%", resultcount * 10 // 100),
1652 ("10%", resultcount * 10 // 100),
1651 ("25%", resultcount * 25 // 100),
1653 ("25%", resultcount * 25 // 100),
1652 ("50%", resultcount * 70 // 100),
1654 ("50%", resultcount * 70 // 100),
1653 ("75%", resultcount * 75 // 100),
1655 ("75%", resultcount * 75 // 100),
1654 ("90%", resultcount * 90 // 100),
1656 ("90%", resultcount * 90 // 100),
1655 ("95%", resultcount * 95 // 100),
1657 ("95%", resultcount * 95 // 100),
1656 ("99%", resultcount * 99 // 100),
1658 ("99%", resultcount * 99 // 100),
1657 ("max", -1),
1659 ("max", -1),
1658 ]
1660 ]
1659 if not ui.quiet:
1661 if not ui.quiet:
1660 for name, idx in relevants:
1662 for name, idx in relevants:
1661 data = results[idx]
1663 data = results[idx]
1662 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1664 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1663 formatone(fm, data[1], title=title, displayall=displayall)
1665 formatone(fm, data[1], title=title, displayall=displayall)
1664
1666
1665 # XXX summing that many float will not be very precise, we ignore this fact
1667 # XXX summing that many float will not be very precise, we ignore this fact
1666 # for now
1668 # for now
1667 totaltime = []
1669 totaltime = []
1668 for item in allresults:
1670 for item in allresults:
1669 totaltime.append((sum(x[1][0] for x in item),
1671 totaltime.append((sum(x[1][0] for x in item),
1670 sum(x[1][1] for x in item),
1672 sum(x[1][1] for x in item),
1671 sum(x[1][2] for x in item),)
1673 sum(x[1][2] for x in item),)
1672 )
1674 )
1673 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1675 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1674 displayall=displayall)
1676 displayall=displayall)
1675 fm.end()
1677 fm.end()
1676
1678
1677 class _faketr(object):
1679 class _faketr(object):
1678 def add(s, x, y, z=None):
1680 def add(s, x, y, z=None):
1679 return None
1681 return None
1680
1682
1681 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1683 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1682 lazydeltabase=True):
1684 lazydeltabase=True):
1683 timings = []
1685 timings = []
1684 tr = _faketr()
1686 tr = _faketr()
1685 with _temprevlog(ui, orig, startrev) as dest:
1687 with _temprevlog(ui, orig, startrev) as dest:
1686 dest._lazydeltabase = lazydeltabase
1688 dest._lazydeltabase = lazydeltabase
1687 revs = list(orig.revs(startrev, stoprev))
1689 revs = list(orig.revs(startrev, stoprev))
1688 total = len(revs)
1690 total = len(revs)
1689 topic = 'adding'
1691 topic = 'adding'
1690 if runidx is not None:
1692 if runidx is not None:
1691 topic += ' (run #%d)' % runidx
1693 topic += ' (run #%d)' % runidx
1692 for idx, rev in enumerate(revs):
1694 for idx, rev in enumerate(revs):
1693 ui.progress(topic, idx, unit='revs', total=total)
1695 ui.progress(topic, idx, unit='revs', total=total)
1694 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1696 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1695 with timeone() as r:
1697 with timeone() as r:
1696 dest.addrawrevision(*addargs, **addkwargs)
1698 dest.addrawrevision(*addargs, **addkwargs)
1697 timings.append((rev, r[0]))
1699 timings.append((rev, r[0]))
1698 ui.progress(topic, total, unit='revs', total=total)
1700 ui.progress(topic, total, unit='revs', total=total)
1699 ui.progress(topic, None, unit='revs', total=total)
1701 ui.progress(topic, None, unit='revs', total=total)
1700 return timings
1702 return timings
1701
1703
1702 def _getrevisionseed(orig, rev, tr, source):
1704 def _getrevisionseed(orig, rev, tr, source):
1703 from mercurial.node import nullid
1705 from mercurial.node import nullid
1704
1706
1705 linkrev = orig.linkrev(rev)
1707 linkrev = orig.linkrev(rev)
1706 node = orig.node(rev)
1708 node = orig.node(rev)
1707 p1, p2 = orig.parents(node)
1709 p1, p2 = orig.parents(node)
1708 flags = orig.flags(rev)
1710 flags = orig.flags(rev)
1709 cachedelta = None
1711 cachedelta = None
1710 text = None
1712 text = None
1711
1713
1712 if source == b'full':
1714 if source == b'full':
1713 text = orig.revision(rev)
1715 text = orig.revision(rev)
1714 elif source == b'parent-1':
1716 elif source == b'parent-1':
1715 baserev = orig.rev(p1)
1717 baserev = orig.rev(p1)
1716 cachedelta = (baserev, orig.revdiff(p1, rev))
1718 cachedelta = (baserev, orig.revdiff(p1, rev))
1717 elif source == b'parent-2':
1719 elif source == b'parent-2':
1718 parent = p2
1720 parent = p2
1719 if p2 == nullid:
1721 if p2 == nullid:
1720 parent = p1
1722 parent = p1
1721 baserev = orig.rev(parent)
1723 baserev = orig.rev(parent)
1722 cachedelta = (baserev, orig.revdiff(parent, rev))
1724 cachedelta = (baserev, orig.revdiff(parent, rev))
1723 elif source == b'parent-smallest':
1725 elif source == b'parent-smallest':
1724 p1diff = orig.revdiff(p1, rev)
1726 p1diff = orig.revdiff(p1, rev)
1725 parent = p1
1727 parent = p1
1726 diff = p1diff
1728 diff = p1diff
1727 if p2 != nullid:
1729 if p2 != nullid:
1728 p2diff = orig.revdiff(p2, rev)
1730 p2diff = orig.revdiff(p2, rev)
1729 if len(p1diff) > len(p2diff):
1731 if len(p1diff) > len(p2diff):
1730 parent = p2
1732 parent = p2
1731 diff = p2diff
1733 diff = p2diff
1732 baserev = orig.rev(parent)
1734 baserev = orig.rev(parent)
1733 cachedelta = (baserev, diff)
1735 cachedelta = (baserev, diff)
1734 elif source == b'storage':
1736 elif source == b'storage':
1735 baserev = orig.deltaparent(rev)
1737 baserev = orig.deltaparent(rev)
1736 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1738 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1737
1739
1738 return ((text, tr, linkrev, p1, p2),
1740 return ((text, tr, linkrev, p1, p2),
1739 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1741 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1740
1742
1741 @contextlib.contextmanager
1743 @contextlib.contextmanager
1742 def _temprevlog(ui, orig, truncaterev):
1744 def _temprevlog(ui, orig, truncaterev):
1743 from mercurial import vfs as vfsmod
1745 from mercurial import vfs as vfsmod
1744
1746
1745 if orig._inline:
1747 if orig._inline:
1746 raise error.Abort('not supporting inline revlog (yet)')
1748 raise error.Abort('not supporting inline revlog (yet)')
1747
1749
1748 origindexpath = orig.opener.join(orig.indexfile)
1750 origindexpath = orig.opener.join(orig.indexfile)
1749 origdatapath = orig.opener.join(orig.datafile)
1751 origdatapath = orig.opener.join(orig.datafile)
1750 indexname = 'revlog.i'
1752 indexname = 'revlog.i'
1751 dataname = 'revlog.d'
1753 dataname = 'revlog.d'
1752
1754
1753 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1755 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1754 try:
1756 try:
1755 # copy the data file in a temporary directory
1757 # copy the data file in a temporary directory
1756 ui.debug('copying data in %s\n' % tmpdir)
1758 ui.debug('copying data in %s\n' % tmpdir)
1757 destindexpath = os.path.join(tmpdir, 'revlog.i')
1759 destindexpath = os.path.join(tmpdir, 'revlog.i')
1758 destdatapath = os.path.join(tmpdir, 'revlog.d')
1760 destdatapath = os.path.join(tmpdir, 'revlog.d')
1759 shutil.copyfile(origindexpath, destindexpath)
1761 shutil.copyfile(origindexpath, destindexpath)
1760 shutil.copyfile(origdatapath, destdatapath)
1762 shutil.copyfile(origdatapath, destdatapath)
1761
1763
1762 # remove the data we want to add again
1764 # remove the data we want to add again
1763 ui.debug('truncating data to be rewritten\n')
1765 ui.debug('truncating data to be rewritten\n')
1764 with open(destindexpath, 'ab') as index:
1766 with open(destindexpath, 'ab') as index:
1765 index.seek(0)
1767 index.seek(0)
1766 index.truncate(truncaterev * orig._io.size)
1768 index.truncate(truncaterev * orig._io.size)
1767 with open(destdatapath, 'ab') as data:
1769 with open(destdatapath, 'ab') as data:
1768 data.seek(0)
1770 data.seek(0)
1769 data.truncate(orig.start(truncaterev))
1771 data.truncate(orig.start(truncaterev))
1770
1772
1771 # instantiate a new revlog from the temporary copy
1773 # instantiate a new revlog from the temporary copy
1772 ui.debug('truncating adding to be rewritten\n')
1774 ui.debug('truncating adding to be rewritten\n')
1773 vfs = vfsmod.vfs(tmpdir)
1775 vfs = vfsmod.vfs(tmpdir)
1774 vfs.options = getattr(orig.opener, 'options', None)
1776 vfs.options = getattr(orig.opener, 'options', None)
1775
1777
1776 dest = revlog.revlog(vfs,
1778 dest = revlog.revlog(vfs,
1777 indexfile=indexname,
1779 indexfile=indexname,
1778 datafile=dataname)
1780 datafile=dataname)
1779 if dest._inline:
1781 if dest._inline:
1780 raise error.Abort('not supporting inline revlog (yet)')
1782 raise error.Abort('not supporting inline revlog (yet)')
1781 # make sure internals are initialized
1783 # make sure internals are initialized
1782 dest.revision(len(dest) - 1)
1784 dest.revision(len(dest) - 1)
1783 yield dest
1785 yield dest
1784 del dest, vfs
1786 del dest, vfs
1785 finally:
1787 finally:
1786 shutil.rmtree(tmpdir, True)
1788 shutil.rmtree(tmpdir, True)
1787
1789
1788 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1790 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1789 [(b'e', b'engines', b'', b'compression engines to use'),
1791 [(b'e', b'engines', b'', b'compression engines to use'),
1790 (b's', b'startrev', 0, b'revision to start at')],
1792 (b's', b'startrev', 0, b'revision to start at')],
1791 b'-c|-m|FILE')
1793 b'-c|-m|FILE')
1792 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1794 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1793 """Benchmark operations on revlog chunks.
1795 """Benchmark operations on revlog chunks.
1794
1796
1795 Logically, each revlog is a collection of fulltext revisions. However,
1797 Logically, each revlog is a collection of fulltext revisions. However,
1796 stored within each revlog are "chunks" of possibly compressed data. This
1798 stored within each revlog are "chunks" of possibly compressed data. This
1797 data needs to be read and decompressed or compressed and written.
1799 data needs to be read and decompressed or compressed and written.
1798
1800
1799 This command measures the time it takes to read+decompress and recompress
1801 This command measures the time it takes to read+decompress and recompress
1800 chunks in a revlog. It effectively isolates I/O and compression performance.
1802 chunks in a revlog. It effectively isolates I/O and compression performance.
1801 For measurements of higher-level operations like resolving revisions,
1803 For measurements of higher-level operations like resolving revisions,
1802 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1804 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1803 """
1805 """
1804 opts = _byteskwargs(opts)
1806 opts = _byteskwargs(opts)
1805
1807
1806 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1808 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1807
1809
1808 # _chunkraw was renamed to _getsegmentforrevs.
1810 # _chunkraw was renamed to _getsegmentforrevs.
1809 try:
1811 try:
1810 segmentforrevs = rl._getsegmentforrevs
1812 segmentforrevs = rl._getsegmentforrevs
1811 except AttributeError:
1813 except AttributeError:
1812 segmentforrevs = rl._chunkraw
1814 segmentforrevs = rl._chunkraw
1813
1815
1814 # Verify engines argument.
1816 # Verify engines argument.
1815 if engines:
1817 if engines:
1816 engines = set(e.strip() for e in engines.split(b','))
1818 engines = set(e.strip() for e in engines.split(b','))
1817 for engine in engines:
1819 for engine in engines:
1818 try:
1820 try:
1819 util.compressionengines[engine]
1821 util.compressionengines[engine]
1820 except KeyError:
1822 except KeyError:
1821 raise error.Abort(b'unknown compression engine: %s' % engine)
1823 raise error.Abort(b'unknown compression engine: %s' % engine)
1822 else:
1824 else:
1823 engines = []
1825 engines = []
1824 for e in util.compengines:
1826 for e in util.compengines:
1825 engine = util.compengines[e]
1827 engine = util.compengines[e]
1826 try:
1828 try:
1827 if engine.available():
1829 if engine.available():
1828 engine.revlogcompressor().compress(b'dummy')
1830 engine.revlogcompressor().compress(b'dummy')
1829 engines.append(e)
1831 engines.append(e)
1830 except NotImplementedError:
1832 except NotImplementedError:
1831 pass
1833 pass
1832
1834
1833 revs = list(rl.revs(startrev, len(rl) - 1))
1835 revs = list(rl.revs(startrev, len(rl) - 1))
1834
1836
1835 def rlfh(rl):
1837 def rlfh(rl):
1836 if rl._inline:
1838 if rl._inline:
1837 return getsvfs(repo)(rl.indexfile)
1839 return getsvfs(repo)(rl.indexfile)
1838 else:
1840 else:
1839 return getsvfs(repo)(rl.datafile)
1841 return getsvfs(repo)(rl.datafile)
1840
1842
1841 def doread():
1843 def doread():
1842 rl.clearcaches()
1844 rl.clearcaches()
1843 for rev in revs:
1845 for rev in revs:
1844 segmentforrevs(rev, rev)
1846 segmentforrevs(rev, rev)
1845
1847
1846 def doreadcachedfh():
1848 def doreadcachedfh():
1847 rl.clearcaches()
1849 rl.clearcaches()
1848 fh = rlfh(rl)
1850 fh = rlfh(rl)
1849 for rev in revs:
1851 for rev in revs:
1850 segmentforrevs(rev, rev, df=fh)
1852 segmentforrevs(rev, rev, df=fh)
1851
1853
1852 def doreadbatch():
1854 def doreadbatch():
1853 rl.clearcaches()
1855 rl.clearcaches()
1854 segmentforrevs(revs[0], revs[-1])
1856 segmentforrevs(revs[0], revs[-1])
1855
1857
1856 def doreadbatchcachedfh():
1858 def doreadbatchcachedfh():
1857 rl.clearcaches()
1859 rl.clearcaches()
1858 fh = rlfh(rl)
1860 fh = rlfh(rl)
1859 segmentforrevs(revs[0], revs[-1], df=fh)
1861 segmentforrevs(revs[0], revs[-1], df=fh)
1860
1862
1861 def dochunk():
1863 def dochunk():
1862 rl.clearcaches()
1864 rl.clearcaches()
1863 fh = rlfh(rl)
1865 fh = rlfh(rl)
1864 for rev in revs:
1866 for rev in revs:
1865 rl._chunk(rev, df=fh)
1867 rl._chunk(rev, df=fh)
1866
1868
1867 chunks = [None]
1869 chunks = [None]
1868
1870
1869 def dochunkbatch():
1871 def dochunkbatch():
1870 rl.clearcaches()
1872 rl.clearcaches()
1871 fh = rlfh(rl)
1873 fh = rlfh(rl)
1872 # Save chunks as a side-effect.
1874 # Save chunks as a side-effect.
1873 chunks[0] = rl._chunks(revs, df=fh)
1875 chunks[0] = rl._chunks(revs, df=fh)
1874
1876
1875 def docompress(compressor):
1877 def docompress(compressor):
1876 rl.clearcaches()
1878 rl.clearcaches()
1877
1879
1878 try:
1880 try:
1879 # Swap in the requested compression engine.
1881 # Swap in the requested compression engine.
1880 oldcompressor = rl._compressor
1882 oldcompressor = rl._compressor
1881 rl._compressor = compressor
1883 rl._compressor = compressor
1882 for chunk in chunks[0]:
1884 for chunk in chunks[0]:
1883 rl.compress(chunk)
1885 rl.compress(chunk)
1884 finally:
1886 finally:
1885 rl._compressor = oldcompressor
1887 rl._compressor = oldcompressor
1886
1888
1887 benches = [
1889 benches = [
1888 (lambda: doread(), b'read'),
1890 (lambda: doread(), b'read'),
1889 (lambda: doreadcachedfh(), b'read w/ reused fd'),
1891 (lambda: doreadcachedfh(), b'read w/ reused fd'),
1890 (lambda: doreadbatch(), b'read batch'),
1892 (lambda: doreadbatch(), b'read batch'),
1891 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
1893 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
1892 (lambda: dochunk(), b'chunk'),
1894 (lambda: dochunk(), b'chunk'),
1893 (lambda: dochunkbatch(), b'chunk batch'),
1895 (lambda: dochunkbatch(), b'chunk batch'),
1894 ]
1896 ]
1895
1897
1896 for engine in sorted(engines):
1898 for engine in sorted(engines):
1897 compressor = util.compengines[engine].revlogcompressor()
1899 compressor = util.compengines[engine].revlogcompressor()
1898 benches.append((functools.partial(docompress, compressor),
1900 benches.append((functools.partial(docompress, compressor),
1899 b'compress w/ %s' % engine))
1901 b'compress w/ %s' % engine))
1900
1902
1901 for fn, title in benches:
1903 for fn, title in benches:
1902 timer, fm = gettimer(ui, opts)
1904 timer, fm = gettimer(ui, opts)
1903 timer(fn, title=title)
1905 timer(fn, title=title)
1904 fm.end()
1906 fm.end()
1905
1907
1906 @command(b'perfrevlogrevision', revlogopts + formatteropts +
1908 @command(b'perfrevlogrevision', revlogopts + formatteropts +
1907 [(b'', b'cache', False, b'use caches instead of clearing')],
1909 [(b'', b'cache', False, b'use caches instead of clearing')],
1908 b'-c|-m|FILE REV')
1910 b'-c|-m|FILE REV')
1909 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1911 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1910 """Benchmark obtaining a revlog revision.
1912 """Benchmark obtaining a revlog revision.
1911
1913
1912 Obtaining a revlog revision consists of roughly the following steps:
1914 Obtaining a revlog revision consists of roughly the following steps:
1913
1915
1914 1. Compute the delta chain
1916 1. Compute the delta chain
1915 2. Slice the delta chain if applicable
1917 2. Slice the delta chain if applicable
1916 3. Obtain the raw chunks for that delta chain
1918 3. Obtain the raw chunks for that delta chain
1917 4. Decompress each raw chunk
1919 4. Decompress each raw chunk
1918 5. Apply binary patches to obtain fulltext
1920 5. Apply binary patches to obtain fulltext
1919 6. Verify hash of fulltext
1921 6. Verify hash of fulltext
1920
1922
1921 This command measures the time spent in each of these phases.
1923 This command measures the time spent in each of these phases.
1922 """
1924 """
1923 opts = _byteskwargs(opts)
1925 opts = _byteskwargs(opts)
1924
1926
1925 if opts.get(b'changelog') or opts.get(b'manifest'):
1927 if opts.get(b'changelog') or opts.get(b'manifest'):
1926 file_, rev = None, file_
1928 file_, rev = None, file_
1927 elif rev is None:
1929 elif rev is None:
1928 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
1930 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
1929
1931
1930 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
1932 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
1931
1933
1932 # _chunkraw was renamed to _getsegmentforrevs.
1934 # _chunkraw was renamed to _getsegmentforrevs.
1933 try:
1935 try:
1934 segmentforrevs = r._getsegmentforrevs
1936 segmentforrevs = r._getsegmentforrevs
1935 except AttributeError:
1937 except AttributeError:
1936 segmentforrevs = r._chunkraw
1938 segmentforrevs = r._chunkraw
1937
1939
1938 node = r.lookup(rev)
1940 node = r.lookup(rev)
1939 rev = r.rev(node)
1941 rev = r.rev(node)
1940
1942
1941 def getrawchunks(data, chain):
1943 def getrawchunks(data, chain):
1942 start = r.start
1944 start = r.start
1943 length = r.length
1945 length = r.length
1944 inline = r._inline
1946 inline = r._inline
1945 iosize = r._io.size
1947 iosize = r._io.size
1946 buffer = util.buffer
1948 buffer = util.buffer
1947
1949
1948 chunks = []
1950 chunks = []
1949 ladd = chunks.append
1951 ladd = chunks.append
1950 for idx, item in enumerate(chain):
1952 for idx, item in enumerate(chain):
1951 offset = start(item[0])
1953 offset = start(item[0])
1952 bits = data[idx]
1954 bits = data[idx]
1953 for rev in item:
1955 for rev in item:
1954 chunkstart = start(rev)
1956 chunkstart = start(rev)
1955 if inline:
1957 if inline:
1956 chunkstart += (rev + 1) * iosize
1958 chunkstart += (rev + 1) * iosize
1957 chunklength = length(rev)
1959 chunklength = length(rev)
1958 ladd(buffer(bits, chunkstart - offset, chunklength))
1960 ladd(buffer(bits, chunkstart - offset, chunklength))
1959
1961
1960 return chunks
1962 return chunks
1961
1963
1962 def dodeltachain(rev):
1964 def dodeltachain(rev):
1963 if not cache:
1965 if not cache:
1964 r.clearcaches()
1966 r.clearcaches()
1965 r._deltachain(rev)
1967 r._deltachain(rev)
1966
1968
1967 def doread(chain):
1969 def doread(chain):
1968 if not cache:
1970 if not cache:
1969 r.clearcaches()
1971 r.clearcaches()
1970 for item in slicedchain:
1972 for item in slicedchain:
1971 segmentforrevs(item[0], item[-1])
1973 segmentforrevs(item[0], item[-1])
1972
1974
1973 def doslice(r, chain, size):
1975 def doslice(r, chain, size):
1974 for s in slicechunk(r, chain, targetsize=size):
1976 for s in slicechunk(r, chain, targetsize=size):
1975 pass
1977 pass
1976
1978
1977 def dorawchunks(data, chain):
1979 def dorawchunks(data, chain):
1978 if not cache:
1980 if not cache:
1979 r.clearcaches()
1981 r.clearcaches()
1980 getrawchunks(data, chain)
1982 getrawchunks(data, chain)
1981
1983
1982 def dodecompress(chunks):
1984 def dodecompress(chunks):
1983 decomp = r.decompress
1985 decomp = r.decompress
1984 for chunk in chunks:
1986 for chunk in chunks:
1985 decomp(chunk)
1987 decomp(chunk)
1986
1988
1987 def dopatch(text, bins):
1989 def dopatch(text, bins):
1988 if not cache:
1990 if not cache:
1989 r.clearcaches()
1991 r.clearcaches()
1990 mdiff.patches(text, bins)
1992 mdiff.patches(text, bins)
1991
1993
1992 def dohash(text):
1994 def dohash(text):
1993 if not cache:
1995 if not cache:
1994 r.clearcaches()
1996 r.clearcaches()
1995 r.checkhash(text, node, rev=rev)
1997 r.checkhash(text, node, rev=rev)
1996
1998
1997 def dorevision():
1999 def dorevision():
1998 if not cache:
2000 if not cache:
1999 r.clearcaches()
2001 r.clearcaches()
2000 r.revision(node)
2002 r.revision(node)
2001
2003
2002 try:
2004 try:
2003 from mercurial.revlogutils.deltas import slicechunk
2005 from mercurial.revlogutils.deltas import slicechunk
2004 except ImportError:
2006 except ImportError:
2005 slicechunk = getattr(revlog, '_slicechunk', None)
2007 slicechunk = getattr(revlog, '_slicechunk', None)
2006
2008
2007 size = r.length(rev)
2009 size = r.length(rev)
2008 chain = r._deltachain(rev)[0]
2010 chain = r._deltachain(rev)[0]
2009 if not getattr(r, '_withsparseread', False):
2011 if not getattr(r, '_withsparseread', False):
2010 slicedchain = (chain,)
2012 slicedchain = (chain,)
2011 else:
2013 else:
2012 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2014 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2013 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2015 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2014 rawchunks = getrawchunks(data, slicedchain)
2016 rawchunks = getrawchunks(data, slicedchain)
2015 bins = r._chunks(chain)
2017 bins = r._chunks(chain)
2016 text = bytes(bins[0])
2018 text = bytes(bins[0])
2017 bins = bins[1:]
2019 bins = bins[1:]
2018 text = mdiff.patches(text, bins)
2020 text = mdiff.patches(text, bins)
2019
2021
2020 benches = [
2022 benches = [
2021 (lambda: dorevision(), b'full'),
2023 (lambda: dorevision(), b'full'),
2022 (lambda: dodeltachain(rev), b'deltachain'),
2024 (lambda: dodeltachain(rev), b'deltachain'),
2023 (lambda: doread(chain), b'read'),
2025 (lambda: doread(chain), b'read'),
2024 ]
2026 ]
2025
2027
2026 if getattr(r, '_withsparseread', False):
2028 if getattr(r, '_withsparseread', False):
2027 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2029 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2028 benches.append(slicing)
2030 benches.append(slicing)
2029
2031
2030 benches.extend([
2032 benches.extend([
2031 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2033 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2032 (lambda: dodecompress(rawchunks), b'decompress'),
2034 (lambda: dodecompress(rawchunks), b'decompress'),
2033 (lambda: dopatch(text, bins), b'patch'),
2035 (lambda: dopatch(text, bins), b'patch'),
2034 (lambda: dohash(text), b'hash'),
2036 (lambda: dohash(text), b'hash'),
2035 ])
2037 ])
2036
2038
2037 timer, fm = gettimer(ui, opts)
2039 timer, fm = gettimer(ui, opts)
2038 for fn, title in benches:
2040 for fn, title in benches:
2039 timer(fn, title=title)
2041 timer(fn, title=title)
2040 fm.end()
2042 fm.end()
2041
2043
2042 @command(b'perfrevset',
2044 @command(b'perfrevset',
2043 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2045 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2044 (b'', b'contexts', False, b'obtain changectx for each revision')]
2046 (b'', b'contexts', False, b'obtain changectx for each revision')]
2045 + formatteropts, b"REVSET")
2047 + formatteropts, b"REVSET")
2046 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2048 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2047 """benchmark the execution time of a revset
2049 """benchmark the execution time of a revset
2048
2050
2049 Use the --clean option if need to evaluate the impact of build volatile
2051 Use the --clean option if need to evaluate the impact of build volatile
2050 revisions set cache on the revset execution. Volatile cache hold filtered
2052 revisions set cache on the revset execution. Volatile cache hold filtered
2051 and obsolete related cache."""
2053 and obsolete related cache."""
2052 opts = _byteskwargs(opts)
2054 opts = _byteskwargs(opts)
2053
2055
2054 timer, fm = gettimer(ui, opts)
2056 timer, fm = gettimer(ui, opts)
2055 def d():
2057 def d():
2056 if clear:
2058 if clear:
2057 repo.invalidatevolatilesets()
2059 repo.invalidatevolatilesets()
2058 if contexts:
2060 if contexts:
2059 for ctx in repo.set(expr): pass
2061 for ctx in repo.set(expr): pass
2060 else:
2062 else:
2061 for r in repo.revs(expr): pass
2063 for r in repo.revs(expr): pass
2062 timer(d)
2064 timer(d)
2063 fm.end()
2065 fm.end()
2064
2066
2065 @command(b'perfvolatilesets',
2067 @command(b'perfvolatilesets',
2066 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2068 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2067 ] + formatteropts)
2069 ] + formatteropts)
2068 def perfvolatilesets(ui, repo, *names, **opts):
2070 def perfvolatilesets(ui, repo, *names, **opts):
2069 """benchmark the computation of various volatile set
2071 """benchmark the computation of various volatile set
2070
2072
2071 Volatile set computes element related to filtering and obsolescence."""
2073 Volatile set computes element related to filtering and obsolescence."""
2072 opts = _byteskwargs(opts)
2074 opts = _byteskwargs(opts)
2073 timer, fm = gettimer(ui, opts)
2075 timer, fm = gettimer(ui, opts)
2074 repo = repo.unfiltered()
2076 repo = repo.unfiltered()
2075
2077
2076 def getobs(name):
2078 def getobs(name):
2077 def d():
2079 def d():
2078 repo.invalidatevolatilesets()
2080 repo.invalidatevolatilesets()
2079 if opts[b'clear_obsstore']:
2081 if opts[b'clear_obsstore']:
2080 clearfilecache(repo, b'obsstore')
2082 clearfilecache(repo, b'obsstore')
2081 obsolete.getrevs(repo, name)
2083 obsolete.getrevs(repo, name)
2082 return d
2084 return d
2083
2085
2084 allobs = sorted(obsolete.cachefuncs)
2086 allobs = sorted(obsolete.cachefuncs)
2085 if names:
2087 if names:
2086 allobs = [n for n in allobs if n in names]
2088 allobs = [n for n in allobs if n in names]
2087
2089
2088 for name in allobs:
2090 for name in allobs:
2089 timer(getobs(name), title=name)
2091 timer(getobs(name), title=name)
2090
2092
2091 def getfiltered(name):
2093 def getfiltered(name):
2092 def d():
2094 def d():
2093 repo.invalidatevolatilesets()
2095 repo.invalidatevolatilesets()
2094 if opts[b'clear_obsstore']:
2096 if opts[b'clear_obsstore']:
2095 clearfilecache(repo, b'obsstore')
2097 clearfilecache(repo, b'obsstore')
2096 repoview.filterrevs(repo, name)
2098 repoview.filterrevs(repo, name)
2097 return d
2099 return d
2098
2100
2099 allfilter = sorted(repoview.filtertable)
2101 allfilter = sorted(repoview.filtertable)
2100 if names:
2102 if names:
2101 allfilter = [n for n in allfilter if n in names]
2103 allfilter = [n for n in allfilter if n in names]
2102
2104
2103 for name in allfilter:
2105 for name in allfilter:
2104 timer(getfiltered(name), title=name)
2106 timer(getfiltered(name), title=name)
2105 fm.end()
2107 fm.end()
2106
2108
2107 @command(b'perfbranchmap',
2109 @command(b'perfbranchmap',
2108 [(b'f', b'full', False,
2110 [(b'f', b'full', False,
2109 b'Includes build time of subset'),
2111 b'Includes build time of subset'),
2110 (b'', b'clear-revbranch', False,
2112 (b'', b'clear-revbranch', False,
2111 b'purge the revbranch cache between computation'),
2113 b'purge the revbranch cache between computation'),
2112 ] + formatteropts)
2114 ] + formatteropts)
2113 def perfbranchmap(ui, repo, *filternames, **opts):
2115 def perfbranchmap(ui, repo, *filternames, **opts):
2114 """benchmark the update of a branchmap
2116 """benchmark the update of a branchmap
2115
2117
2116 This benchmarks the full repo.branchmap() call with read and write disabled
2118 This benchmarks the full repo.branchmap() call with read and write disabled
2117 """
2119 """
2118 opts = _byteskwargs(opts)
2120 opts = _byteskwargs(opts)
2119 full = opts.get(b"full", False)
2121 full = opts.get(b"full", False)
2120 clear_revbranch = opts.get(b"clear_revbranch", False)
2122 clear_revbranch = opts.get(b"clear_revbranch", False)
2121 timer, fm = gettimer(ui, opts)
2123 timer, fm = gettimer(ui, opts)
2122 def getbranchmap(filtername):
2124 def getbranchmap(filtername):
2123 """generate a benchmark function for the filtername"""
2125 """generate a benchmark function for the filtername"""
2124 if filtername is None:
2126 if filtername is None:
2125 view = repo
2127 view = repo
2126 else:
2128 else:
2127 view = repo.filtered(filtername)
2129 view = repo.filtered(filtername)
2128 def d():
2130 def d():
2129 if clear_revbranch:
2131 if clear_revbranch:
2130 repo.revbranchcache()._clear()
2132 repo.revbranchcache()._clear()
2131 if full:
2133 if full:
2132 view._branchcaches.clear()
2134 view._branchcaches.clear()
2133 else:
2135 else:
2134 view._branchcaches.pop(filtername, None)
2136 view._branchcaches.pop(filtername, None)
2135 view.branchmap()
2137 view.branchmap()
2136 return d
2138 return d
2137 # add filter in smaller subset to bigger subset
2139 # add filter in smaller subset to bigger subset
2138 possiblefilters = set(repoview.filtertable)
2140 possiblefilters = set(repoview.filtertable)
2139 if filternames:
2141 if filternames:
2140 possiblefilters &= set(filternames)
2142 possiblefilters &= set(filternames)
2141 subsettable = getbranchmapsubsettable()
2143 subsettable = getbranchmapsubsettable()
2142 allfilters = []
2144 allfilters = []
2143 while possiblefilters:
2145 while possiblefilters:
2144 for name in possiblefilters:
2146 for name in possiblefilters:
2145 subset = subsettable.get(name)
2147 subset = subsettable.get(name)
2146 if subset not in possiblefilters:
2148 if subset not in possiblefilters:
2147 break
2149 break
2148 else:
2150 else:
2149 assert False, b'subset cycle %s!' % possiblefilters
2151 assert False, b'subset cycle %s!' % possiblefilters
2150 allfilters.append(name)
2152 allfilters.append(name)
2151 possiblefilters.remove(name)
2153 possiblefilters.remove(name)
2152
2154
2153 # warm the cache
2155 # warm the cache
2154 if not full:
2156 if not full:
2155 for name in allfilters:
2157 for name in allfilters:
2156 repo.filtered(name).branchmap()
2158 repo.filtered(name).branchmap()
2157 if not filternames or b'unfiltered' in filternames:
2159 if not filternames or b'unfiltered' in filternames:
2158 # add unfiltered
2160 # add unfiltered
2159 allfilters.append(None)
2161 allfilters.append(None)
2160
2162
2161 branchcacheread = safeattrsetter(branchmap, b'read')
2163 branchcacheread = safeattrsetter(branchmap, b'read')
2162 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2164 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2163 branchcacheread.set(lambda repo: None)
2165 branchcacheread.set(lambda repo: None)
2164 branchcachewrite.set(lambda bc, repo: None)
2166 branchcachewrite.set(lambda bc, repo: None)
2165 try:
2167 try:
2166 for name in allfilters:
2168 for name in allfilters:
2167 printname = name
2169 printname = name
2168 if name is None:
2170 if name is None:
2169 printname = b'unfiltered'
2171 printname = b'unfiltered'
2170 timer(getbranchmap(name), title=str(printname))
2172 timer(getbranchmap(name), title=str(printname))
2171 finally:
2173 finally:
2172 branchcacheread.restore()
2174 branchcacheread.restore()
2173 branchcachewrite.restore()
2175 branchcachewrite.restore()
2174 fm.end()
2176 fm.end()
2175
2177
2176 @command(b'perfbranchmapload', [
2178 @command(b'perfbranchmapload', [
2177 (b'f', b'filter', b'', b'Specify repoview filter'),
2179 (b'f', b'filter', b'', b'Specify repoview filter'),
2178 (b'', b'list', False, b'List brachmap filter caches'),
2180 (b'', b'list', False, b'List brachmap filter caches'),
2179 ] + formatteropts)
2181 ] + formatteropts)
2180 def perfbranchmapread(ui, repo, filter=b'', list=False, **opts):
2182 def perfbranchmapread(ui, repo, filter=b'', list=False, **opts):
2181 """benchmark reading the branchmap"""
2183 """benchmark reading the branchmap"""
2182 opts = _byteskwargs(opts)
2184 opts = _byteskwargs(opts)
2183
2185
2184 if list:
2186 if list:
2185 for name, kind, st in repo.cachevfs.readdir(stat=True):
2187 for name, kind, st in repo.cachevfs.readdir(stat=True):
2186 if name.startswith(b'branch2'):
2188 if name.startswith(b'branch2'):
2187 filtername = name.partition(b'-')[2] or b'unfiltered'
2189 filtername = name.partition(b'-')[2] or b'unfiltered'
2188 ui.status(b'%s - %s\n'
2190 ui.status(b'%s - %s\n'
2189 % (filtername, util.bytecount(st.st_size)))
2191 % (filtername, util.bytecount(st.st_size)))
2190 return
2192 return
2191 if filter:
2193 if filter:
2192 repo = repoview.repoview(repo, filter)
2194 repo = repoview.repoview(repo, filter)
2193 else:
2195 else:
2194 repo = repo.unfiltered()
2196 repo = repo.unfiltered()
2195 # try once without timer, the filter may not be cached
2197 # try once without timer, the filter may not be cached
2196 if branchmap.read(repo) is None:
2198 if branchmap.read(repo) is None:
2197 raise error.Abort(b'No brachmap cached for %s repo'
2199 raise error.Abort(b'No brachmap cached for %s repo'
2198 % (filter or b'unfiltered'))
2200 % (filter or b'unfiltered'))
2199 timer, fm = gettimer(ui, opts)
2201 timer, fm = gettimer(ui, opts)
2200 timer(lambda: branchmap.read(repo) and None)
2202 timer(lambda: branchmap.read(repo) and None)
2201 fm.end()
2203 fm.end()
2202
2204
2203 @command(b'perfloadmarkers')
2205 @command(b'perfloadmarkers')
2204 def perfloadmarkers(ui, repo):
2206 def perfloadmarkers(ui, repo):
2205 """benchmark the time to parse the on-disk markers for a repo
2207 """benchmark the time to parse the on-disk markers for a repo
2206
2208
2207 Result is the number of markers in the repo."""
2209 Result is the number of markers in the repo."""
2208 timer, fm = gettimer(ui)
2210 timer, fm = gettimer(ui)
2209 svfs = getsvfs(repo)
2211 svfs = getsvfs(repo)
2210 timer(lambda: len(obsolete.obsstore(svfs)))
2212 timer(lambda: len(obsolete.obsstore(svfs)))
2211 fm.end()
2213 fm.end()
2212
2214
2213 @command(b'perflrucachedict', formatteropts +
2215 @command(b'perflrucachedict', formatteropts +
2214 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2216 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2215 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2217 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2216 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2218 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2217 (b'', b'size', 4, b'size of cache'),
2219 (b'', b'size', 4, b'size of cache'),
2218 (b'', b'gets', 10000, b'number of key lookups'),
2220 (b'', b'gets', 10000, b'number of key lookups'),
2219 (b'', b'sets', 10000, b'number of key sets'),
2221 (b'', b'sets', 10000, b'number of key sets'),
2220 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2222 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2221 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2223 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2222 norepo=True)
2224 norepo=True)
2223 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2225 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2224 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2226 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2225 opts = _byteskwargs(opts)
2227 opts = _byteskwargs(opts)
2226
2228
2227 def doinit():
2229 def doinit():
2228 for i in _xrange(10000):
2230 for i in _xrange(10000):
2229 util.lrucachedict(size)
2231 util.lrucachedict(size)
2230
2232
2231 costrange = list(range(mincost, maxcost + 1))
2233 costrange = list(range(mincost, maxcost + 1))
2232
2234
2233 values = []
2235 values = []
2234 for i in _xrange(size):
2236 for i in _xrange(size):
2235 values.append(random.randint(0, _maxint))
2237 values.append(random.randint(0, _maxint))
2236
2238
2237 # Get mode fills the cache and tests raw lookup performance with no
2239 # Get mode fills the cache and tests raw lookup performance with no
2238 # eviction.
2240 # eviction.
2239 getseq = []
2241 getseq = []
2240 for i in _xrange(gets):
2242 for i in _xrange(gets):
2241 getseq.append(random.choice(values))
2243 getseq.append(random.choice(values))
2242
2244
2243 def dogets():
2245 def dogets():
2244 d = util.lrucachedict(size)
2246 d = util.lrucachedict(size)
2245 for v in values:
2247 for v in values:
2246 d[v] = v
2248 d[v] = v
2247 for key in getseq:
2249 for key in getseq:
2248 value = d[key]
2250 value = d[key]
2249 value # silence pyflakes warning
2251 value # silence pyflakes warning
2250
2252
2251 def dogetscost():
2253 def dogetscost():
2252 d = util.lrucachedict(size, maxcost=costlimit)
2254 d = util.lrucachedict(size, maxcost=costlimit)
2253 for i, v in enumerate(values):
2255 for i, v in enumerate(values):
2254 d.insert(v, v, cost=costs[i])
2256 d.insert(v, v, cost=costs[i])
2255 for key in getseq:
2257 for key in getseq:
2256 try:
2258 try:
2257 value = d[key]
2259 value = d[key]
2258 value # silence pyflakes warning
2260 value # silence pyflakes warning
2259 except KeyError:
2261 except KeyError:
2260 pass
2262 pass
2261
2263
2262 # Set mode tests insertion speed with cache eviction.
2264 # Set mode tests insertion speed with cache eviction.
2263 setseq = []
2265 setseq = []
2264 costs = []
2266 costs = []
2265 for i in _xrange(sets):
2267 for i in _xrange(sets):
2266 setseq.append(random.randint(0, _maxint))
2268 setseq.append(random.randint(0, _maxint))
2267 costs.append(random.choice(costrange))
2269 costs.append(random.choice(costrange))
2268
2270
2269 def doinserts():
2271 def doinserts():
2270 d = util.lrucachedict(size)
2272 d = util.lrucachedict(size)
2271 for v in setseq:
2273 for v in setseq:
2272 d.insert(v, v)
2274 d.insert(v, v)
2273
2275
2274 def doinsertscost():
2276 def doinsertscost():
2275 d = util.lrucachedict(size, maxcost=costlimit)
2277 d = util.lrucachedict(size, maxcost=costlimit)
2276 for i, v in enumerate(setseq):
2278 for i, v in enumerate(setseq):
2277 d.insert(v, v, cost=costs[i])
2279 d.insert(v, v, cost=costs[i])
2278
2280
2279 def dosets():
2281 def dosets():
2280 d = util.lrucachedict(size)
2282 d = util.lrucachedict(size)
2281 for v in setseq:
2283 for v in setseq:
2282 d[v] = v
2284 d[v] = v
2283
2285
2284 # Mixed mode randomly performs gets and sets with eviction.
2286 # Mixed mode randomly performs gets and sets with eviction.
2285 mixedops = []
2287 mixedops = []
2286 for i in _xrange(mixed):
2288 for i in _xrange(mixed):
2287 r = random.randint(0, 100)
2289 r = random.randint(0, 100)
2288 if r < mixedgetfreq:
2290 if r < mixedgetfreq:
2289 op = 0
2291 op = 0
2290 else:
2292 else:
2291 op = 1
2293 op = 1
2292
2294
2293 mixedops.append((op,
2295 mixedops.append((op,
2294 random.randint(0, size * 2),
2296 random.randint(0, size * 2),
2295 random.choice(costrange)))
2297 random.choice(costrange)))
2296
2298
2297 def domixed():
2299 def domixed():
2298 d = util.lrucachedict(size)
2300 d = util.lrucachedict(size)
2299
2301
2300 for op, v, cost in mixedops:
2302 for op, v, cost in mixedops:
2301 if op == 0:
2303 if op == 0:
2302 try:
2304 try:
2303 d[v]
2305 d[v]
2304 except KeyError:
2306 except KeyError:
2305 pass
2307 pass
2306 else:
2308 else:
2307 d[v] = v
2309 d[v] = v
2308
2310
2309 def domixedcost():
2311 def domixedcost():
2310 d = util.lrucachedict(size, maxcost=costlimit)
2312 d = util.lrucachedict(size, maxcost=costlimit)
2311
2313
2312 for op, v, cost in mixedops:
2314 for op, v, cost in mixedops:
2313 if op == 0:
2315 if op == 0:
2314 try:
2316 try:
2315 d[v]
2317 d[v]
2316 except KeyError:
2318 except KeyError:
2317 pass
2319 pass
2318 else:
2320 else:
2319 d.insert(v, v, cost=cost)
2321 d.insert(v, v, cost=cost)
2320
2322
2321 benches = [
2323 benches = [
2322 (doinit, b'init'),
2324 (doinit, b'init'),
2323 ]
2325 ]
2324
2326
2325 if costlimit:
2327 if costlimit:
2326 benches.extend([
2328 benches.extend([
2327 (dogetscost, b'gets w/ cost limit'),
2329 (dogetscost, b'gets w/ cost limit'),
2328 (doinsertscost, b'inserts w/ cost limit'),
2330 (doinsertscost, b'inserts w/ cost limit'),
2329 (domixedcost, b'mixed w/ cost limit'),
2331 (domixedcost, b'mixed w/ cost limit'),
2330 ])
2332 ])
2331 else:
2333 else:
2332 benches.extend([
2334 benches.extend([
2333 (dogets, b'gets'),
2335 (dogets, b'gets'),
2334 (doinserts, b'inserts'),
2336 (doinserts, b'inserts'),
2335 (dosets, b'sets'),
2337 (dosets, b'sets'),
2336 (domixed, b'mixed')
2338 (domixed, b'mixed')
2337 ])
2339 ])
2338
2340
2339 for fn, title in benches:
2341 for fn, title in benches:
2340 timer, fm = gettimer(ui, opts)
2342 timer, fm = gettimer(ui, opts)
2341 timer(fn, title=title)
2343 timer(fn, title=title)
2342 fm.end()
2344 fm.end()
2343
2345
2344 @command(b'perfwrite', formatteropts)
2346 @command(b'perfwrite', formatteropts)
2345 def perfwrite(ui, repo, **opts):
2347 def perfwrite(ui, repo, **opts):
2346 """microbenchmark ui.write
2348 """microbenchmark ui.write
2347 """
2349 """
2348 opts = _byteskwargs(opts)
2350 opts = _byteskwargs(opts)
2349
2351
2350 timer, fm = gettimer(ui, opts)
2352 timer, fm = gettimer(ui, opts)
2351 def write():
2353 def write():
2352 for i in range(100000):
2354 for i in range(100000):
2353 ui.write((b'Testing write performance\n'))
2355 ui.write((b'Testing write performance\n'))
2354 timer(write)
2356 timer(write)
2355 fm.end()
2357 fm.end()
2356
2358
2357 def uisetup(ui):
2359 def uisetup(ui):
2358 if (util.safehasattr(cmdutil, b'openrevlog') and
2360 if (util.safehasattr(cmdutil, b'openrevlog') and
2359 not util.safehasattr(commands, b'debugrevlogopts')):
2361 not util.safehasattr(commands, b'debugrevlogopts')):
2360 # for "historical portability":
2362 # for "historical portability":
2361 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2363 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2362 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2364 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2363 # openrevlog() should cause failure, because it has been
2365 # openrevlog() should cause failure, because it has been
2364 # available since 3.5 (or 49c583ca48c4).
2366 # available since 3.5 (or 49c583ca48c4).
2365 def openrevlog(orig, repo, cmd, file_, opts):
2367 def openrevlog(orig, repo, cmd, file_, opts):
2366 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2368 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2367 raise error.Abort(b"This version doesn't support --dir option",
2369 raise error.Abort(b"This version doesn't support --dir option",
2368 hint=b"use 3.5 or later")
2370 hint=b"use 3.5 or later")
2369 return orig(repo, cmd, file_, opts)
2371 return orig(repo, cmd, file_, opts)
2370 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2372 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
General Comments 0
You need to be logged in to leave comments. Login now