##// END OF EJS Templates
perf: extract the timing of a section in a context manager...
Boris Feld -
r40179:acf560bc default
parent child Browse files
Show More
@@ -1,2107 +1,2116
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance'''
2 '''helper extension to measure performance'''
3
3
4 # "historical portability" policy of perf.py:
4 # "historical portability" policy of perf.py:
5 #
5 #
6 # We have to do:
6 # We have to do:
7 # - make perf.py "loadable" with as wide Mercurial version as possible
7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 # This doesn't mean that perf commands work correctly with that Mercurial.
8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 # - make historical perf command work correctly with as wide Mercurial
10 # - make historical perf command work correctly with as wide Mercurial
11 # version as possible
11 # version as possible
12 #
12 #
13 # We have to do, if possible with reasonable cost:
13 # We have to do, if possible with reasonable cost:
14 # - make recent perf command for historical feature work correctly
14 # - make recent perf command for historical feature work correctly
15 # with early Mercurial
15 # with early Mercurial
16 #
16 #
17 # We don't have to do:
17 # We don't have to do:
18 # - make perf command for recent feature work correctly with early
18 # - make perf command for recent feature work correctly with early
19 # Mercurial
19 # Mercurial
20
20
21 from __future__ import absolute_import
21 from __future__ import absolute_import
22 import contextlib
22 import functools
23 import functools
23 import gc
24 import gc
24 import os
25 import os
25 import random
26 import random
26 import struct
27 import struct
27 import sys
28 import sys
28 import threading
29 import threading
29 import time
30 import time
30 from mercurial import (
31 from mercurial import (
31 changegroup,
32 changegroup,
32 cmdutil,
33 cmdutil,
33 commands,
34 commands,
34 copies,
35 copies,
35 error,
36 error,
36 extensions,
37 extensions,
37 mdiff,
38 mdiff,
38 merge,
39 merge,
39 revlog,
40 revlog,
40 util,
41 util,
41 )
42 )
42
43
43 # for "historical portability":
44 # for "historical portability":
44 # try to import modules separately (in dict order), and ignore
45 # try to import modules separately (in dict order), and ignore
45 # failure, because these aren't available with early Mercurial
46 # failure, because these aren't available with early Mercurial
46 try:
47 try:
47 from mercurial import branchmap # since 2.5 (or bcee63733aad)
48 from mercurial import branchmap # since 2.5 (or bcee63733aad)
48 except ImportError:
49 except ImportError:
49 pass
50 pass
50 try:
51 try:
51 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
52 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
52 except ImportError:
53 except ImportError:
53 pass
54 pass
54 try:
55 try:
55 from mercurial import registrar # since 3.7 (or 37d50250b696)
56 from mercurial import registrar # since 3.7 (or 37d50250b696)
56 dir(registrar) # forcibly load it
57 dir(registrar) # forcibly load it
57 except ImportError:
58 except ImportError:
58 registrar = None
59 registrar = None
59 try:
60 try:
60 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
61 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
61 except ImportError:
62 except ImportError:
62 pass
63 pass
63 try:
64 try:
64 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
65 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
65 except ImportError:
66 except ImportError:
66 pass
67 pass
67
68
68 def identity(a):
69 def identity(a):
69 return a
70 return a
70
71
71 try:
72 try:
72 from mercurial import pycompat
73 from mercurial import pycompat
73 getargspec = pycompat.getargspec # added to module after 4.5
74 getargspec = pycompat.getargspec # added to module after 4.5
74 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
75 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
75 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
76 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
76 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
77 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
77 if pycompat.ispy3:
78 if pycompat.ispy3:
78 _maxint = sys.maxsize # per py3 docs for replacing maxint
79 _maxint = sys.maxsize # per py3 docs for replacing maxint
79 else:
80 else:
80 _maxint = sys.maxint
81 _maxint = sys.maxint
81 except (ImportError, AttributeError):
82 except (ImportError, AttributeError):
82 import inspect
83 import inspect
83 getargspec = inspect.getargspec
84 getargspec = inspect.getargspec
84 _byteskwargs = identity
85 _byteskwargs = identity
85 _maxint = sys.maxint # no py3 support
86 _maxint = sys.maxint # no py3 support
86 _sysstr = lambda x: x # no py3 support
87 _sysstr = lambda x: x # no py3 support
87 _xrange = xrange
88 _xrange = xrange
88
89
89 try:
90 try:
90 # 4.7+
91 # 4.7+
91 queue = pycompat.queue.Queue
92 queue = pycompat.queue.Queue
92 except (AttributeError, ImportError):
93 except (AttributeError, ImportError):
93 # <4.7.
94 # <4.7.
94 try:
95 try:
95 queue = pycompat.queue
96 queue = pycompat.queue
96 except (AttributeError, ImportError):
97 except (AttributeError, ImportError):
97 queue = util.queue
98 queue = util.queue
98
99
99 try:
100 try:
100 from mercurial import logcmdutil
101 from mercurial import logcmdutil
101 makelogtemplater = logcmdutil.maketemplater
102 makelogtemplater = logcmdutil.maketemplater
102 except (AttributeError, ImportError):
103 except (AttributeError, ImportError):
103 try:
104 try:
104 makelogtemplater = cmdutil.makelogtemplater
105 makelogtemplater = cmdutil.makelogtemplater
105 except (AttributeError, ImportError):
106 except (AttributeError, ImportError):
106 makelogtemplater = None
107 makelogtemplater = None
107
108
108 # for "historical portability":
109 # for "historical portability":
109 # define util.safehasattr forcibly, because util.safehasattr has been
110 # define util.safehasattr forcibly, because util.safehasattr has been
110 # available since 1.9.3 (or 94b200a11cf7)
111 # available since 1.9.3 (or 94b200a11cf7)
111 _undefined = object()
112 _undefined = object()
112 def safehasattr(thing, attr):
113 def safehasattr(thing, attr):
113 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
114 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
114 setattr(util, 'safehasattr', safehasattr)
115 setattr(util, 'safehasattr', safehasattr)
115
116
116 # for "historical portability":
117 # for "historical portability":
117 # define util.timer forcibly, because util.timer has been available
118 # define util.timer forcibly, because util.timer has been available
118 # since ae5d60bb70c9
119 # since ae5d60bb70c9
119 if safehasattr(time, 'perf_counter'):
120 if safehasattr(time, 'perf_counter'):
120 util.timer = time.perf_counter
121 util.timer = time.perf_counter
121 elif os.name == b'nt':
122 elif os.name == b'nt':
122 util.timer = time.clock
123 util.timer = time.clock
123 else:
124 else:
124 util.timer = time.time
125 util.timer = time.time
125
126
126 # for "historical portability":
127 # for "historical portability":
127 # use locally defined empty option list, if formatteropts isn't
128 # use locally defined empty option list, if formatteropts isn't
128 # available, because commands.formatteropts has been available since
129 # available, because commands.formatteropts has been available since
129 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
130 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
130 # available since 2.2 (or ae5f92e154d3)
131 # available since 2.2 (or ae5f92e154d3)
131 formatteropts = getattr(cmdutil, "formatteropts",
132 formatteropts = getattr(cmdutil, "formatteropts",
132 getattr(commands, "formatteropts", []))
133 getattr(commands, "formatteropts", []))
133
134
134 # for "historical portability":
135 # for "historical portability":
135 # use locally defined option list, if debugrevlogopts isn't available,
136 # use locally defined option list, if debugrevlogopts isn't available,
136 # because commands.debugrevlogopts has been available since 3.7 (or
137 # because commands.debugrevlogopts has been available since 3.7 (or
137 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
138 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
138 # since 1.9 (or a79fea6b3e77).
139 # since 1.9 (or a79fea6b3e77).
139 revlogopts = getattr(cmdutil, "debugrevlogopts",
140 revlogopts = getattr(cmdutil, "debugrevlogopts",
140 getattr(commands, "debugrevlogopts", [
141 getattr(commands, "debugrevlogopts", [
141 (b'c', b'changelog', False, (b'open changelog')),
142 (b'c', b'changelog', False, (b'open changelog')),
142 (b'm', b'manifest', False, (b'open manifest')),
143 (b'm', b'manifest', False, (b'open manifest')),
143 (b'', b'dir', False, (b'open directory manifest')),
144 (b'', b'dir', False, (b'open directory manifest')),
144 ]))
145 ]))
145
146
146 cmdtable = {}
147 cmdtable = {}
147
148
148 # for "historical portability":
149 # for "historical portability":
149 # define parsealiases locally, because cmdutil.parsealiases has been
150 # define parsealiases locally, because cmdutil.parsealiases has been
150 # available since 1.5 (or 6252852b4332)
151 # available since 1.5 (or 6252852b4332)
151 def parsealiases(cmd):
152 def parsealiases(cmd):
152 return cmd.lstrip(b"^").split(b"|")
153 return cmd.lstrip(b"^").split(b"|")
153
154
154 if safehasattr(registrar, 'command'):
155 if safehasattr(registrar, 'command'):
155 command = registrar.command(cmdtable)
156 command = registrar.command(cmdtable)
156 elif safehasattr(cmdutil, 'command'):
157 elif safehasattr(cmdutil, 'command'):
157 command = cmdutil.command(cmdtable)
158 command = cmdutil.command(cmdtable)
158 if b'norepo' not in getargspec(command).args:
159 if b'norepo' not in getargspec(command).args:
159 # for "historical portability":
160 # for "historical portability":
160 # wrap original cmdutil.command, because "norepo" option has
161 # wrap original cmdutil.command, because "norepo" option has
161 # been available since 3.1 (or 75a96326cecb)
162 # been available since 3.1 (or 75a96326cecb)
162 _command = command
163 _command = command
163 def command(name, options=(), synopsis=None, norepo=False):
164 def command(name, options=(), synopsis=None, norepo=False):
164 if norepo:
165 if norepo:
165 commands.norepo += b' %s' % b' '.join(parsealiases(name))
166 commands.norepo += b' %s' % b' '.join(parsealiases(name))
166 return _command(name, list(options), synopsis)
167 return _command(name, list(options), synopsis)
167 else:
168 else:
168 # for "historical portability":
169 # for "historical portability":
169 # define "@command" annotation locally, because cmdutil.command
170 # define "@command" annotation locally, because cmdutil.command
170 # has been available since 1.9 (or 2daa5179e73f)
171 # has been available since 1.9 (or 2daa5179e73f)
171 def command(name, options=(), synopsis=None, norepo=False):
172 def command(name, options=(), synopsis=None, norepo=False):
172 def decorator(func):
173 def decorator(func):
173 if synopsis:
174 if synopsis:
174 cmdtable[name] = func, list(options), synopsis
175 cmdtable[name] = func, list(options), synopsis
175 else:
176 else:
176 cmdtable[name] = func, list(options)
177 cmdtable[name] = func, list(options)
177 if norepo:
178 if norepo:
178 commands.norepo += b' %s' % b' '.join(parsealiases(name))
179 commands.norepo += b' %s' % b' '.join(parsealiases(name))
179 return func
180 return func
180 return decorator
181 return decorator
181
182
182 try:
183 try:
183 import mercurial.registrar
184 import mercurial.registrar
184 import mercurial.configitems
185 import mercurial.configitems
185 configtable = {}
186 configtable = {}
186 configitem = mercurial.registrar.configitem(configtable)
187 configitem = mercurial.registrar.configitem(configtable)
187 configitem(b'perf', b'presleep',
188 configitem(b'perf', b'presleep',
188 default=mercurial.configitems.dynamicdefault,
189 default=mercurial.configitems.dynamicdefault,
189 )
190 )
190 configitem(b'perf', b'stub',
191 configitem(b'perf', b'stub',
191 default=mercurial.configitems.dynamicdefault,
192 default=mercurial.configitems.dynamicdefault,
192 )
193 )
193 configitem(b'perf', b'parentscount',
194 configitem(b'perf', b'parentscount',
194 default=mercurial.configitems.dynamicdefault,
195 default=mercurial.configitems.dynamicdefault,
195 )
196 )
196 configitem(b'perf', b'all-timing',
197 configitem(b'perf', b'all-timing',
197 default=mercurial.configitems.dynamicdefault,
198 default=mercurial.configitems.dynamicdefault,
198 )
199 )
199 except (ImportError, AttributeError):
200 except (ImportError, AttributeError):
200 pass
201 pass
201
202
202 def getlen(ui):
203 def getlen(ui):
203 if ui.configbool(b"perf", b"stub", False):
204 if ui.configbool(b"perf", b"stub", False):
204 return lambda x: 1
205 return lambda x: 1
205 return len
206 return len
206
207
207 def gettimer(ui, opts=None):
208 def gettimer(ui, opts=None):
208 """return a timer function and formatter: (timer, formatter)
209 """return a timer function and formatter: (timer, formatter)
209
210
210 This function exists to gather the creation of formatter in a single
211 This function exists to gather the creation of formatter in a single
211 place instead of duplicating it in all performance commands."""
212 place instead of duplicating it in all performance commands."""
212
213
213 # enforce an idle period before execution to counteract power management
214 # enforce an idle period before execution to counteract power management
214 # experimental config: perf.presleep
215 # experimental config: perf.presleep
215 time.sleep(getint(ui, b"perf", b"presleep", 1))
216 time.sleep(getint(ui, b"perf", b"presleep", 1))
216
217
217 if opts is None:
218 if opts is None:
218 opts = {}
219 opts = {}
219 # redirect all to stderr unless buffer api is in use
220 # redirect all to stderr unless buffer api is in use
220 if not ui._buffers:
221 if not ui._buffers:
221 ui = ui.copy()
222 ui = ui.copy()
222 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
223 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
223 if uifout:
224 if uifout:
224 # for "historical portability":
225 # for "historical portability":
225 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
226 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
226 uifout.set(ui.ferr)
227 uifout.set(ui.ferr)
227
228
228 # get a formatter
229 # get a formatter
229 uiformatter = getattr(ui, 'formatter', None)
230 uiformatter = getattr(ui, 'formatter', None)
230 if uiformatter:
231 if uiformatter:
231 fm = uiformatter(b'perf', opts)
232 fm = uiformatter(b'perf', opts)
232 else:
233 else:
233 # for "historical portability":
234 # for "historical portability":
234 # define formatter locally, because ui.formatter has been
235 # define formatter locally, because ui.formatter has been
235 # available since 2.2 (or ae5f92e154d3)
236 # available since 2.2 (or ae5f92e154d3)
236 from mercurial import node
237 from mercurial import node
237 class defaultformatter(object):
238 class defaultformatter(object):
238 """Minimized composition of baseformatter and plainformatter
239 """Minimized composition of baseformatter and plainformatter
239 """
240 """
240 def __init__(self, ui, topic, opts):
241 def __init__(self, ui, topic, opts):
241 self._ui = ui
242 self._ui = ui
242 if ui.debugflag:
243 if ui.debugflag:
243 self.hexfunc = node.hex
244 self.hexfunc = node.hex
244 else:
245 else:
245 self.hexfunc = node.short
246 self.hexfunc = node.short
246 def __nonzero__(self):
247 def __nonzero__(self):
247 return False
248 return False
248 __bool__ = __nonzero__
249 __bool__ = __nonzero__
249 def startitem(self):
250 def startitem(self):
250 pass
251 pass
251 def data(self, **data):
252 def data(self, **data):
252 pass
253 pass
253 def write(self, fields, deftext, *fielddata, **opts):
254 def write(self, fields, deftext, *fielddata, **opts):
254 self._ui.write(deftext % fielddata, **opts)
255 self._ui.write(deftext % fielddata, **opts)
255 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
256 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
256 if cond:
257 if cond:
257 self._ui.write(deftext % fielddata, **opts)
258 self._ui.write(deftext % fielddata, **opts)
258 def plain(self, text, **opts):
259 def plain(self, text, **opts):
259 self._ui.write(text, **opts)
260 self._ui.write(text, **opts)
260 def end(self):
261 def end(self):
261 pass
262 pass
262 fm = defaultformatter(ui, b'perf', opts)
263 fm = defaultformatter(ui, b'perf', opts)
263
264
264 # stub function, runs code only once instead of in a loop
265 # stub function, runs code only once instead of in a loop
265 # experimental config: perf.stub
266 # experimental config: perf.stub
266 if ui.configbool(b"perf", b"stub", False):
267 if ui.configbool(b"perf", b"stub", False):
267 return functools.partial(stub_timer, fm), fm
268 return functools.partial(stub_timer, fm), fm
268
269
269 # experimental config: perf.all-timing
270 # experimental config: perf.all-timing
270 displayall = ui.configbool(b"perf", b"all-timing", False)
271 displayall = ui.configbool(b"perf", b"all-timing", False)
271 return functools.partial(_timer, fm, displayall=displayall), fm
272 return functools.partial(_timer, fm, displayall=displayall), fm
272
273
273 def stub_timer(fm, func, title=None):
274 def stub_timer(fm, func, title=None):
274 func()
275 func()
275
276
277 @contextlib.contextmanager
278 def timeone():
279 r = []
280 ostart = os.times()
281 cstart = util.timer()
282 yield r
283 cstop = util.timer()
284 ostop = os.times()
285 a, b = ostart, ostop
286 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
287
276 def _timer(fm, func, title=None, displayall=False):
288 def _timer(fm, func, title=None, displayall=False):
277 gc.collect()
289 gc.collect()
278 results = []
290 results = []
279 begin = util.timer()
291 begin = util.timer()
280 count = 0
292 count = 0
281 while True:
293 while True:
282 ostart = os.times()
294 with timeone() as item:
283 cstart = util.timer()
295 r = func()
284 r = func()
296 count += 1
297 results.append(item[0])
285 cstop = util.timer()
298 cstop = util.timer()
286 ostop = os.times()
287 count += 1
288 a, b = ostart, ostop
289 results.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
290 if cstop - begin > 3 and count >= 100:
299 if cstop - begin > 3 and count >= 100:
291 break
300 break
292 if cstop - begin > 10 and count >= 3:
301 if cstop - begin > 10 and count >= 3:
293 break
302 break
294
303
295 fm.startitem()
304 fm.startitem()
296
305
297 if title:
306 if title:
298 fm.write(b'title', b'! %s\n', title)
307 fm.write(b'title', b'! %s\n', title)
299 if r:
308 if r:
300 fm.write(b'result', b'! result: %s\n', r)
309 fm.write(b'result', b'! result: %s\n', r)
301 def display(role, entry):
310 def display(role, entry):
302 prefix = b''
311 prefix = b''
303 if role != b'best':
312 if role != b'best':
304 prefix = b'%s.' % role
313 prefix = b'%s.' % role
305 fm.plain(b'!')
314 fm.plain(b'!')
306 fm.write(prefix + b'wall', b' wall %f', entry[0])
315 fm.write(prefix + b'wall', b' wall %f', entry[0])
307 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
316 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
308 fm.write(prefix + b'user', b' user %f', entry[1])
317 fm.write(prefix + b'user', b' user %f', entry[1])
309 fm.write(prefix + b'sys', b' sys %f', entry[2])
318 fm.write(prefix + b'sys', b' sys %f', entry[2])
310 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
319 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
311 fm.plain(b'\n')
320 fm.plain(b'\n')
312 results.sort()
321 results.sort()
313 min_val = results[0]
322 min_val = results[0]
314 display(b'best', min_val)
323 display(b'best', min_val)
315 if displayall:
324 if displayall:
316 max_val = results[-1]
325 max_val = results[-1]
317 display(b'max', max_val)
326 display(b'max', max_val)
318 avg = tuple([sum(x) / count for x in zip(*results)])
327 avg = tuple([sum(x) / count for x in zip(*results)])
319 display(b'avg', avg)
328 display(b'avg', avg)
320 median = results[len(results) // 2]
329 median = results[len(results) // 2]
321 display(b'median', median)
330 display(b'median', median)
322
331
323 # utilities for historical portability
332 # utilities for historical portability
324
333
325 def getint(ui, section, name, default):
334 def getint(ui, section, name, default):
326 # for "historical portability":
335 # for "historical portability":
327 # ui.configint has been available since 1.9 (or fa2b596db182)
336 # ui.configint has been available since 1.9 (or fa2b596db182)
328 v = ui.config(section, name, None)
337 v = ui.config(section, name, None)
329 if v is None:
338 if v is None:
330 return default
339 return default
331 try:
340 try:
332 return int(v)
341 return int(v)
333 except ValueError:
342 except ValueError:
334 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
343 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
335 % (section, name, v))
344 % (section, name, v))
336
345
337 def safeattrsetter(obj, name, ignoremissing=False):
346 def safeattrsetter(obj, name, ignoremissing=False):
338 """Ensure that 'obj' has 'name' attribute before subsequent setattr
347 """Ensure that 'obj' has 'name' attribute before subsequent setattr
339
348
340 This function is aborted, if 'obj' doesn't have 'name' attribute
349 This function is aborted, if 'obj' doesn't have 'name' attribute
341 at runtime. This avoids overlooking removal of an attribute, which
350 at runtime. This avoids overlooking removal of an attribute, which
342 breaks assumption of performance measurement, in the future.
351 breaks assumption of performance measurement, in the future.
343
352
344 This function returns the object to (1) assign a new value, and
353 This function returns the object to (1) assign a new value, and
345 (2) restore an original value to the attribute.
354 (2) restore an original value to the attribute.
346
355
347 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
356 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
348 abortion, and this function returns None. This is useful to
357 abortion, and this function returns None. This is useful to
349 examine an attribute, which isn't ensured in all Mercurial
358 examine an attribute, which isn't ensured in all Mercurial
350 versions.
359 versions.
351 """
360 """
352 if not util.safehasattr(obj, name):
361 if not util.safehasattr(obj, name):
353 if ignoremissing:
362 if ignoremissing:
354 return None
363 return None
355 raise error.Abort((b"missing attribute %s of %s might break assumption"
364 raise error.Abort((b"missing attribute %s of %s might break assumption"
356 b" of performance measurement") % (name, obj))
365 b" of performance measurement") % (name, obj))
357
366
358 origvalue = getattr(obj, _sysstr(name))
367 origvalue = getattr(obj, _sysstr(name))
359 class attrutil(object):
368 class attrutil(object):
360 def set(self, newvalue):
369 def set(self, newvalue):
361 setattr(obj, _sysstr(name), newvalue)
370 setattr(obj, _sysstr(name), newvalue)
362 def restore(self):
371 def restore(self):
363 setattr(obj, _sysstr(name), origvalue)
372 setattr(obj, _sysstr(name), origvalue)
364
373
365 return attrutil()
374 return attrutil()
366
375
367 # utilities to examine each internal API changes
376 # utilities to examine each internal API changes
368
377
369 def getbranchmapsubsettable():
378 def getbranchmapsubsettable():
370 # for "historical portability":
379 # for "historical portability":
371 # subsettable is defined in:
380 # subsettable is defined in:
372 # - branchmap since 2.9 (or 175c6fd8cacc)
381 # - branchmap since 2.9 (or 175c6fd8cacc)
373 # - repoview since 2.5 (or 59a9f18d4587)
382 # - repoview since 2.5 (or 59a9f18d4587)
374 for mod in (branchmap, repoview):
383 for mod in (branchmap, repoview):
375 subsettable = getattr(mod, 'subsettable', None)
384 subsettable = getattr(mod, 'subsettable', None)
376 if subsettable:
385 if subsettable:
377 return subsettable
386 return subsettable
378
387
379 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
388 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
380 # branchmap and repoview modules exist, but subsettable attribute
389 # branchmap and repoview modules exist, but subsettable attribute
381 # doesn't)
390 # doesn't)
382 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
391 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
383 hint=b"use 2.5 or later")
392 hint=b"use 2.5 or later")
384
393
385 def getsvfs(repo):
394 def getsvfs(repo):
386 """Return appropriate object to access files under .hg/store
395 """Return appropriate object to access files under .hg/store
387 """
396 """
388 # for "historical portability":
397 # for "historical portability":
389 # repo.svfs has been available since 2.3 (or 7034365089bf)
398 # repo.svfs has been available since 2.3 (or 7034365089bf)
390 svfs = getattr(repo, 'svfs', None)
399 svfs = getattr(repo, 'svfs', None)
391 if svfs:
400 if svfs:
392 return svfs
401 return svfs
393 else:
402 else:
394 return getattr(repo, 'sopener')
403 return getattr(repo, 'sopener')
395
404
396 def getvfs(repo):
405 def getvfs(repo):
397 """Return appropriate object to access files under .hg
406 """Return appropriate object to access files under .hg
398 """
407 """
399 # for "historical portability":
408 # for "historical portability":
400 # repo.vfs has been available since 2.3 (or 7034365089bf)
409 # repo.vfs has been available since 2.3 (or 7034365089bf)
401 vfs = getattr(repo, 'vfs', None)
410 vfs = getattr(repo, 'vfs', None)
402 if vfs:
411 if vfs:
403 return vfs
412 return vfs
404 else:
413 else:
405 return getattr(repo, 'opener')
414 return getattr(repo, 'opener')
406
415
407 def repocleartagscachefunc(repo):
416 def repocleartagscachefunc(repo):
408 """Return the function to clear tags cache according to repo internal API
417 """Return the function to clear tags cache according to repo internal API
409 """
418 """
410 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
419 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
411 # in this case, setattr(repo, '_tagscache', None) or so isn't
420 # in this case, setattr(repo, '_tagscache', None) or so isn't
412 # correct way to clear tags cache, because existing code paths
421 # correct way to clear tags cache, because existing code paths
413 # expect _tagscache to be a structured object.
422 # expect _tagscache to be a structured object.
414 def clearcache():
423 def clearcache():
415 # _tagscache has been filteredpropertycache since 2.5 (or
424 # _tagscache has been filteredpropertycache since 2.5 (or
416 # 98c867ac1330), and delattr() can't work in such case
425 # 98c867ac1330), and delattr() can't work in such case
417 if b'_tagscache' in vars(repo):
426 if b'_tagscache' in vars(repo):
418 del repo.__dict__[b'_tagscache']
427 del repo.__dict__[b'_tagscache']
419 return clearcache
428 return clearcache
420
429
421 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
430 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
422 if repotags: # since 1.4 (or 5614a628d173)
431 if repotags: # since 1.4 (or 5614a628d173)
423 return lambda : repotags.set(None)
432 return lambda : repotags.set(None)
424
433
425 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
434 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
426 if repotagscache: # since 0.6 (or d7df759d0e97)
435 if repotagscache: # since 0.6 (or d7df759d0e97)
427 return lambda : repotagscache.set(None)
436 return lambda : repotagscache.set(None)
428
437
429 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
438 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
430 # this point, but it isn't so problematic, because:
439 # this point, but it isn't so problematic, because:
431 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
440 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
432 # in perftags() causes failure soon
441 # in perftags() causes failure soon
433 # - perf.py itself has been available since 1.1 (or eb240755386d)
442 # - perf.py itself has been available since 1.1 (or eb240755386d)
434 raise error.Abort((b"tags API of this hg command is unknown"))
443 raise error.Abort((b"tags API of this hg command is unknown"))
435
444
436 # utilities to clear cache
445 # utilities to clear cache
437
446
438 def clearfilecache(repo, attrname):
447 def clearfilecache(repo, attrname):
439 unfi = repo.unfiltered()
448 unfi = repo.unfiltered()
440 if attrname in vars(unfi):
449 if attrname in vars(unfi):
441 delattr(unfi, attrname)
450 delattr(unfi, attrname)
442 unfi._filecache.pop(attrname, None)
451 unfi._filecache.pop(attrname, None)
443
452
444 # perf commands
453 # perf commands
445
454
446 @command(b'perfwalk', formatteropts)
455 @command(b'perfwalk', formatteropts)
447 def perfwalk(ui, repo, *pats, **opts):
456 def perfwalk(ui, repo, *pats, **opts):
448 opts = _byteskwargs(opts)
457 opts = _byteskwargs(opts)
449 timer, fm = gettimer(ui, opts)
458 timer, fm = gettimer(ui, opts)
450 m = scmutil.match(repo[None], pats, {})
459 m = scmutil.match(repo[None], pats, {})
451 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
460 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
452 ignored=False))))
461 ignored=False))))
453 fm.end()
462 fm.end()
454
463
455 @command(b'perfannotate', formatteropts)
464 @command(b'perfannotate', formatteropts)
456 def perfannotate(ui, repo, f, **opts):
465 def perfannotate(ui, repo, f, **opts):
457 opts = _byteskwargs(opts)
466 opts = _byteskwargs(opts)
458 timer, fm = gettimer(ui, opts)
467 timer, fm = gettimer(ui, opts)
459 fc = repo[b'.'][f]
468 fc = repo[b'.'][f]
460 timer(lambda: len(fc.annotate(True)))
469 timer(lambda: len(fc.annotate(True)))
461 fm.end()
470 fm.end()
462
471
463 @command(b'perfstatus',
472 @command(b'perfstatus',
464 [(b'u', b'unknown', False,
473 [(b'u', b'unknown', False,
465 b'ask status to look for unknown files')] + formatteropts)
474 b'ask status to look for unknown files')] + formatteropts)
466 def perfstatus(ui, repo, **opts):
475 def perfstatus(ui, repo, **opts):
467 opts = _byteskwargs(opts)
476 opts = _byteskwargs(opts)
468 #m = match.always(repo.root, repo.getcwd())
477 #m = match.always(repo.root, repo.getcwd())
469 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
478 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
470 # False))))
479 # False))))
471 timer, fm = gettimer(ui, opts)
480 timer, fm = gettimer(ui, opts)
472 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
481 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
473 fm.end()
482 fm.end()
474
483
475 @command(b'perfaddremove', formatteropts)
484 @command(b'perfaddremove', formatteropts)
476 def perfaddremove(ui, repo, **opts):
485 def perfaddremove(ui, repo, **opts):
477 opts = _byteskwargs(opts)
486 opts = _byteskwargs(opts)
478 timer, fm = gettimer(ui, opts)
487 timer, fm = gettimer(ui, opts)
479 try:
488 try:
480 oldquiet = repo.ui.quiet
489 oldquiet = repo.ui.quiet
481 repo.ui.quiet = True
490 repo.ui.quiet = True
482 matcher = scmutil.match(repo[None])
491 matcher = scmutil.match(repo[None])
483 opts[b'dry_run'] = True
492 opts[b'dry_run'] = True
484 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
493 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
485 finally:
494 finally:
486 repo.ui.quiet = oldquiet
495 repo.ui.quiet = oldquiet
487 fm.end()
496 fm.end()
488
497
489 def clearcaches(cl):
498 def clearcaches(cl):
490 # behave somewhat consistently across internal API changes
499 # behave somewhat consistently across internal API changes
491 if util.safehasattr(cl, b'clearcaches'):
500 if util.safehasattr(cl, b'clearcaches'):
492 cl.clearcaches()
501 cl.clearcaches()
493 elif util.safehasattr(cl, b'_nodecache'):
502 elif util.safehasattr(cl, b'_nodecache'):
494 from mercurial.node import nullid, nullrev
503 from mercurial.node import nullid, nullrev
495 cl._nodecache = {nullid: nullrev}
504 cl._nodecache = {nullid: nullrev}
496 cl._nodepos = None
505 cl._nodepos = None
497
506
498 @command(b'perfheads', formatteropts)
507 @command(b'perfheads', formatteropts)
499 def perfheads(ui, repo, **opts):
508 def perfheads(ui, repo, **opts):
500 opts = _byteskwargs(opts)
509 opts = _byteskwargs(opts)
501 timer, fm = gettimer(ui, opts)
510 timer, fm = gettimer(ui, opts)
502 cl = repo.changelog
511 cl = repo.changelog
503 def d():
512 def d():
504 len(cl.headrevs())
513 len(cl.headrevs())
505 clearcaches(cl)
514 clearcaches(cl)
506 timer(d)
515 timer(d)
507 fm.end()
516 fm.end()
508
517
509 @command(b'perftags', formatteropts)
518 @command(b'perftags', formatteropts)
510 def perftags(ui, repo, **opts):
519 def perftags(ui, repo, **opts):
511 import mercurial.changelog
520 import mercurial.changelog
512 import mercurial.manifest
521 import mercurial.manifest
513
522
514 opts = _byteskwargs(opts)
523 opts = _byteskwargs(opts)
515 timer, fm = gettimer(ui, opts)
524 timer, fm = gettimer(ui, opts)
516 svfs = getsvfs(repo)
525 svfs = getsvfs(repo)
517 repocleartagscache = repocleartagscachefunc(repo)
526 repocleartagscache = repocleartagscachefunc(repo)
518 def t():
527 def t():
519 repo.changelog = mercurial.changelog.changelog(svfs)
528 repo.changelog = mercurial.changelog.changelog(svfs)
520 rootmanifest = mercurial.manifest.manifestrevlog(svfs)
529 rootmanifest = mercurial.manifest.manifestrevlog(svfs)
521 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo,
530 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo,
522 rootmanifest)
531 rootmanifest)
523 repocleartagscache()
532 repocleartagscache()
524 return len(repo.tags())
533 return len(repo.tags())
525 timer(t)
534 timer(t)
526 fm.end()
535 fm.end()
527
536
528 @command(b'perfancestors', formatteropts)
537 @command(b'perfancestors', formatteropts)
529 def perfancestors(ui, repo, **opts):
538 def perfancestors(ui, repo, **opts):
530 opts = _byteskwargs(opts)
539 opts = _byteskwargs(opts)
531 timer, fm = gettimer(ui, opts)
540 timer, fm = gettimer(ui, opts)
532 heads = repo.changelog.headrevs()
541 heads = repo.changelog.headrevs()
533 def d():
542 def d():
534 for a in repo.changelog.ancestors(heads):
543 for a in repo.changelog.ancestors(heads):
535 pass
544 pass
536 timer(d)
545 timer(d)
537 fm.end()
546 fm.end()
538
547
539 @command(b'perfancestorset', formatteropts)
548 @command(b'perfancestorset', formatteropts)
540 def perfancestorset(ui, repo, revset, **opts):
549 def perfancestorset(ui, repo, revset, **opts):
541 opts = _byteskwargs(opts)
550 opts = _byteskwargs(opts)
542 timer, fm = gettimer(ui, opts)
551 timer, fm = gettimer(ui, opts)
543 revs = repo.revs(revset)
552 revs = repo.revs(revset)
544 heads = repo.changelog.headrevs()
553 heads = repo.changelog.headrevs()
545 def d():
554 def d():
546 s = repo.changelog.ancestors(heads)
555 s = repo.changelog.ancestors(heads)
547 for rev in revs:
556 for rev in revs:
548 rev in s
557 rev in s
549 timer(d)
558 timer(d)
550 fm.end()
559 fm.end()
551
560
552 @command(b'perfbookmarks', formatteropts)
561 @command(b'perfbookmarks', formatteropts)
553 def perfbookmarks(ui, repo, **opts):
562 def perfbookmarks(ui, repo, **opts):
554 """benchmark parsing bookmarks from disk to memory"""
563 """benchmark parsing bookmarks from disk to memory"""
555 opts = _byteskwargs(opts)
564 opts = _byteskwargs(opts)
556 timer, fm = gettimer(ui, opts)
565 timer, fm = gettimer(ui, opts)
557 def d():
566 def d():
558 clearfilecache(repo, b'_bookmarks')
567 clearfilecache(repo, b'_bookmarks')
559 repo._bookmarks
568 repo._bookmarks
560 timer(d)
569 timer(d)
561 fm.end()
570 fm.end()
562
571
563 @command(b'perfbundleread', formatteropts, b'BUNDLE')
572 @command(b'perfbundleread', formatteropts, b'BUNDLE')
564 def perfbundleread(ui, repo, bundlepath, **opts):
573 def perfbundleread(ui, repo, bundlepath, **opts):
565 """Benchmark reading of bundle files.
574 """Benchmark reading of bundle files.
566
575
567 This command is meant to isolate the I/O part of bundle reading as
576 This command is meant to isolate the I/O part of bundle reading as
568 much as possible.
577 much as possible.
569 """
578 """
570 from mercurial import (
579 from mercurial import (
571 bundle2,
580 bundle2,
572 exchange,
581 exchange,
573 streamclone,
582 streamclone,
574 )
583 )
575
584
576 opts = _byteskwargs(opts)
585 opts = _byteskwargs(opts)
577
586
578 def makebench(fn):
587 def makebench(fn):
579 def run():
588 def run():
580 with open(bundlepath, b'rb') as fh:
589 with open(bundlepath, b'rb') as fh:
581 bundle = exchange.readbundle(ui, fh, bundlepath)
590 bundle = exchange.readbundle(ui, fh, bundlepath)
582 fn(bundle)
591 fn(bundle)
583
592
584 return run
593 return run
585
594
586 def makereadnbytes(size):
595 def makereadnbytes(size):
587 def run():
596 def run():
588 with open(bundlepath, b'rb') as fh:
597 with open(bundlepath, b'rb') as fh:
589 bundle = exchange.readbundle(ui, fh, bundlepath)
598 bundle = exchange.readbundle(ui, fh, bundlepath)
590 while bundle.read(size):
599 while bundle.read(size):
591 pass
600 pass
592
601
593 return run
602 return run
594
603
595 def makestdioread(size):
604 def makestdioread(size):
596 def run():
605 def run():
597 with open(bundlepath, b'rb') as fh:
606 with open(bundlepath, b'rb') as fh:
598 while fh.read(size):
607 while fh.read(size):
599 pass
608 pass
600
609
601 return run
610 return run
602
611
603 # bundle1
612 # bundle1
604
613
605 def deltaiter(bundle):
614 def deltaiter(bundle):
606 for delta in bundle.deltaiter():
615 for delta in bundle.deltaiter():
607 pass
616 pass
608
617
609 def iterchunks(bundle):
618 def iterchunks(bundle):
610 for chunk in bundle.getchunks():
619 for chunk in bundle.getchunks():
611 pass
620 pass
612
621
613 # bundle2
622 # bundle2
614
623
615 def forwardchunks(bundle):
624 def forwardchunks(bundle):
616 for chunk in bundle._forwardchunks():
625 for chunk in bundle._forwardchunks():
617 pass
626 pass
618
627
619 def iterparts(bundle):
628 def iterparts(bundle):
620 for part in bundle.iterparts():
629 for part in bundle.iterparts():
621 pass
630 pass
622
631
623 def iterpartsseekable(bundle):
632 def iterpartsseekable(bundle):
624 for part in bundle.iterparts(seekable=True):
633 for part in bundle.iterparts(seekable=True):
625 pass
634 pass
626
635
627 def seek(bundle):
636 def seek(bundle):
628 for part in bundle.iterparts(seekable=True):
637 for part in bundle.iterparts(seekable=True):
629 part.seek(0, os.SEEK_END)
638 part.seek(0, os.SEEK_END)
630
639
631 def makepartreadnbytes(size):
640 def makepartreadnbytes(size):
632 def run():
641 def run():
633 with open(bundlepath, b'rb') as fh:
642 with open(bundlepath, b'rb') as fh:
634 bundle = exchange.readbundle(ui, fh, bundlepath)
643 bundle = exchange.readbundle(ui, fh, bundlepath)
635 for part in bundle.iterparts():
644 for part in bundle.iterparts():
636 while part.read(size):
645 while part.read(size):
637 pass
646 pass
638
647
639 return run
648 return run
640
649
641 benches = [
650 benches = [
642 (makestdioread(8192), b'read(8k)'),
651 (makestdioread(8192), b'read(8k)'),
643 (makestdioread(16384), b'read(16k)'),
652 (makestdioread(16384), b'read(16k)'),
644 (makestdioread(32768), b'read(32k)'),
653 (makestdioread(32768), b'read(32k)'),
645 (makestdioread(131072), b'read(128k)'),
654 (makestdioread(131072), b'read(128k)'),
646 ]
655 ]
647
656
648 with open(bundlepath, b'rb') as fh:
657 with open(bundlepath, b'rb') as fh:
649 bundle = exchange.readbundle(ui, fh, bundlepath)
658 bundle = exchange.readbundle(ui, fh, bundlepath)
650
659
651 if isinstance(bundle, changegroup.cg1unpacker):
660 if isinstance(bundle, changegroup.cg1unpacker):
652 benches.extend([
661 benches.extend([
653 (makebench(deltaiter), b'cg1 deltaiter()'),
662 (makebench(deltaiter), b'cg1 deltaiter()'),
654 (makebench(iterchunks), b'cg1 getchunks()'),
663 (makebench(iterchunks), b'cg1 getchunks()'),
655 (makereadnbytes(8192), b'cg1 read(8k)'),
664 (makereadnbytes(8192), b'cg1 read(8k)'),
656 (makereadnbytes(16384), b'cg1 read(16k)'),
665 (makereadnbytes(16384), b'cg1 read(16k)'),
657 (makereadnbytes(32768), b'cg1 read(32k)'),
666 (makereadnbytes(32768), b'cg1 read(32k)'),
658 (makereadnbytes(131072), b'cg1 read(128k)'),
667 (makereadnbytes(131072), b'cg1 read(128k)'),
659 ])
668 ])
660 elif isinstance(bundle, bundle2.unbundle20):
669 elif isinstance(bundle, bundle2.unbundle20):
661 benches.extend([
670 benches.extend([
662 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
671 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
663 (makebench(iterparts), b'bundle2 iterparts()'),
672 (makebench(iterparts), b'bundle2 iterparts()'),
664 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
673 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
665 (makebench(seek), b'bundle2 part seek()'),
674 (makebench(seek), b'bundle2 part seek()'),
666 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
675 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
667 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
676 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
668 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
677 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
669 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
678 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
670 ])
679 ])
671 elif isinstance(bundle, streamclone.streamcloneapplier):
680 elif isinstance(bundle, streamclone.streamcloneapplier):
672 raise error.Abort(b'stream clone bundles not supported')
681 raise error.Abort(b'stream clone bundles not supported')
673 else:
682 else:
674 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
683 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
675
684
676 for fn, title in benches:
685 for fn, title in benches:
677 timer, fm = gettimer(ui, opts)
686 timer, fm = gettimer(ui, opts)
678 timer(fn, title=title)
687 timer(fn, title=title)
679 fm.end()
688 fm.end()
680
689
681 @command(b'perfchangegroupchangelog', formatteropts +
690 @command(b'perfchangegroupchangelog', formatteropts +
682 [(b'', b'version', b'02', b'changegroup version'),
691 [(b'', b'version', b'02', b'changegroup version'),
683 (b'r', b'rev', b'', b'revisions to add to changegroup')])
692 (b'r', b'rev', b'', b'revisions to add to changegroup')])
684 def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts):
693 def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts):
685 """Benchmark producing a changelog group for a changegroup.
694 """Benchmark producing a changelog group for a changegroup.
686
695
687 This measures the time spent processing the changelog during a
696 This measures the time spent processing the changelog during a
688 bundle operation. This occurs during `hg bundle` and on a server
697 bundle operation. This occurs during `hg bundle` and on a server
689 processing a `getbundle` wire protocol request (handles clones
698 processing a `getbundle` wire protocol request (handles clones
690 and pull requests).
699 and pull requests).
691
700
692 By default, all revisions are added to the changegroup.
701 By default, all revisions are added to the changegroup.
693 """
702 """
694 opts = _byteskwargs(opts)
703 opts = _byteskwargs(opts)
695 cl = repo.changelog
704 cl = repo.changelog
696 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
705 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
697 bundler = changegroup.getbundler(version, repo)
706 bundler = changegroup.getbundler(version, repo)
698
707
699 def d():
708 def d():
700 state, chunks = bundler._generatechangelog(cl, nodes)
709 state, chunks = bundler._generatechangelog(cl, nodes)
701 for chunk in chunks:
710 for chunk in chunks:
702 pass
711 pass
703
712
704 timer, fm = gettimer(ui, opts)
713 timer, fm = gettimer(ui, opts)
705
714
706 # Terminal printing can interfere with timing. So disable it.
715 # Terminal printing can interfere with timing. So disable it.
707 with ui.configoverride({(b'progress', b'disable'): True}):
716 with ui.configoverride({(b'progress', b'disable'): True}):
708 timer(d)
717 timer(d)
709
718
710 fm.end()
719 fm.end()
711
720
712 @command(b'perfdirs', formatteropts)
721 @command(b'perfdirs', formatteropts)
713 def perfdirs(ui, repo, **opts):
722 def perfdirs(ui, repo, **opts):
714 opts = _byteskwargs(opts)
723 opts = _byteskwargs(opts)
715 timer, fm = gettimer(ui, opts)
724 timer, fm = gettimer(ui, opts)
716 dirstate = repo.dirstate
725 dirstate = repo.dirstate
717 b'a' in dirstate
726 b'a' in dirstate
718 def d():
727 def d():
719 dirstate.hasdir(b'a')
728 dirstate.hasdir(b'a')
720 del dirstate._map._dirs
729 del dirstate._map._dirs
721 timer(d)
730 timer(d)
722 fm.end()
731 fm.end()
723
732
724 @command(b'perfdirstate', formatteropts)
733 @command(b'perfdirstate', formatteropts)
725 def perfdirstate(ui, repo, **opts):
734 def perfdirstate(ui, repo, **opts):
726 opts = _byteskwargs(opts)
735 opts = _byteskwargs(opts)
727 timer, fm = gettimer(ui, opts)
736 timer, fm = gettimer(ui, opts)
728 b"a" in repo.dirstate
737 b"a" in repo.dirstate
729 def d():
738 def d():
730 repo.dirstate.invalidate()
739 repo.dirstate.invalidate()
731 b"a" in repo.dirstate
740 b"a" in repo.dirstate
732 timer(d)
741 timer(d)
733 fm.end()
742 fm.end()
734
743
735 @command(b'perfdirstatedirs', formatteropts)
744 @command(b'perfdirstatedirs', formatteropts)
736 def perfdirstatedirs(ui, repo, **opts):
745 def perfdirstatedirs(ui, repo, **opts):
737 opts = _byteskwargs(opts)
746 opts = _byteskwargs(opts)
738 timer, fm = gettimer(ui, opts)
747 timer, fm = gettimer(ui, opts)
739 b"a" in repo.dirstate
748 b"a" in repo.dirstate
740 def d():
749 def d():
741 repo.dirstate.hasdir(b"a")
750 repo.dirstate.hasdir(b"a")
742 del repo.dirstate._map._dirs
751 del repo.dirstate._map._dirs
743 timer(d)
752 timer(d)
744 fm.end()
753 fm.end()
745
754
746 @command(b'perfdirstatefoldmap', formatteropts)
755 @command(b'perfdirstatefoldmap', formatteropts)
747 def perfdirstatefoldmap(ui, repo, **opts):
756 def perfdirstatefoldmap(ui, repo, **opts):
748 opts = _byteskwargs(opts)
757 opts = _byteskwargs(opts)
749 timer, fm = gettimer(ui, opts)
758 timer, fm = gettimer(ui, opts)
750 dirstate = repo.dirstate
759 dirstate = repo.dirstate
751 b'a' in dirstate
760 b'a' in dirstate
752 def d():
761 def d():
753 dirstate._map.filefoldmap.get(b'a')
762 dirstate._map.filefoldmap.get(b'a')
754 del dirstate._map.filefoldmap
763 del dirstate._map.filefoldmap
755 timer(d)
764 timer(d)
756 fm.end()
765 fm.end()
757
766
758 @command(b'perfdirfoldmap', formatteropts)
767 @command(b'perfdirfoldmap', formatteropts)
759 def perfdirfoldmap(ui, repo, **opts):
768 def perfdirfoldmap(ui, repo, **opts):
760 opts = _byteskwargs(opts)
769 opts = _byteskwargs(opts)
761 timer, fm = gettimer(ui, opts)
770 timer, fm = gettimer(ui, opts)
762 dirstate = repo.dirstate
771 dirstate = repo.dirstate
763 b'a' in dirstate
772 b'a' in dirstate
764 def d():
773 def d():
765 dirstate._map.dirfoldmap.get(b'a')
774 dirstate._map.dirfoldmap.get(b'a')
766 del dirstate._map.dirfoldmap
775 del dirstate._map.dirfoldmap
767 del dirstate._map._dirs
776 del dirstate._map._dirs
768 timer(d)
777 timer(d)
769 fm.end()
778 fm.end()
770
779
771 @command(b'perfdirstatewrite', formatteropts)
780 @command(b'perfdirstatewrite', formatteropts)
772 def perfdirstatewrite(ui, repo, **opts):
781 def perfdirstatewrite(ui, repo, **opts):
773 opts = _byteskwargs(opts)
782 opts = _byteskwargs(opts)
774 timer, fm = gettimer(ui, opts)
783 timer, fm = gettimer(ui, opts)
775 ds = repo.dirstate
784 ds = repo.dirstate
776 b"a" in ds
785 b"a" in ds
777 def d():
786 def d():
778 ds._dirty = True
787 ds._dirty = True
779 ds.write(repo.currenttransaction())
788 ds.write(repo.currenttransaction())
780 timer(d)
789 timer(d)
781 fm.end()
790 fm.end()
782
791
783 @command(b'perfmergecalculate',
792 @command(b'perfmergecalculate',
784 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
793 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
785 def perfmergecalculate(ui, repo, rev, **opts):
794 def perfmergecalculate(ui, repo, rev, **opts):
786 opts = _byteskwargs(opts)
795 opts = _byteskwargs(opts)
787 timer, fm = gettimer(ui, opts)
796 timer, fm = gettimer(ui, opts)
788 wctx = repo[None]
797 wctx = repo[None]
789 rctx = scmutil.revsingle(repo, rev, rev)
798 rctx = scmutil.revsingle(repo, rev, rev)
790 ancestor = wctx.ancestor(rctx)
799 ancestor = wctx.ancestor(rctx)
791 # we don't want working dir files to be stat'd in the benchmark, so prime
800 # we don't want working dir files to be stat'd in the benchmark, so prime
792 # that cache
801 # that cache
793 wctx.dirty()
802 wctx.dirty()
794 def d():
803 def d():
795 # acceptremote is True because we don't want prompts in the middle of
804 # acceptremote is True because we don't want prompts in the middle of
796 # our benchmark
805 # our benchmark
797 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
806 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
798 acceptremote=True, followcopies=True)
807 acceptremote=True, followcopies=True)
799 timer(d)
808 timer(d)
800 fm.end()
809 fm.end()
801
810
802 @command(b'perfpathcopies', [], b"REV REV")
811 @command(b'perfpathcopies', [], b"REV REV")
803 def perfpathcopies(ui, repo, rev1, rev2, **opts):
812 def perfpathcopies(ui, repo, rev1, rev2, **opts):
804 opts = _byteskwargs(opts)
813 opts = _byteskwargs(opts)
805 timer, fm = gettimer(ui, opts)
814 timer, fm = gettimer(ui, opts)
806 ctx1 = scmutil.revsingle(repo, rev1, rev1)
815 ctx1 = scmutil.revsingle(repo, rev1, rev1)
807 ctx2 = scmutil.revsingle(repo, rev2, rev2)
816 ctx2 = scmutil.revsingle(repo, rev2, rev2)
808 def d():
817 def d():
809 copies.pathcopies(ctx1, ctx2)
818 copies.pathcopies(ctx1, ctx2)
810 timer(d)
819 timer(d)
811 fm.end()
820 fm.end()
812
821
813 @command(b'perfphases',
822 @command(b'perfphases',
814 [(b'', b'full', False, b'include file reading time too'),
823 [(b'', b'full', False, b'include file reading time too'),
815 ], b"")
824 ], b"")
816 def perfphases(ui, repo, **opts):
825 def perfphases(ui, repo, **opts):
817 """benchmark phasesets computation"""
826 """benchmark phasesets computation"""
818 opts = _byteskwargs(opts)
827 opts = _byteskwargs(opts)
819 timer, fm = gettimer(ui, opts)
828 timer, fm = gettimer(ui, opts)
820 _phases = repo._phasecache
829 _phases = repo._phasecache
821 full = opts.get(b'full')
830 full = opts.get(b'full')
822 def d():
831 def d():
823 phases = _phases
832 phases = _phases
824 if full:
833 if full:
825 clearfilecache(repo, b'_phasecache')
834 clearfilecache(repo, b'_phasecache')
826 phases = repo._phasecache
835 phases = repo._phasecache
827 phases.invalidate()
836 phases.invalidate()
828 phases.loadphaserevs(repo)
837 phases.loadphaserevs(repo)
829 timer(d)
838 timer(d)
830 fm.end()
839 fm.end()
831
840
832 @command(b'perfphasesremote',
841 @command(b'perfphasesremote',
833 [], b"[DEST]")
842 [], b"[DEST]")
834 def perfphasesremote(ui, repo, dest=None, **opts):
843 def perfphasesremote(ui, repo, dest=None, **opts):
835 """benchmark time needed to analyse phases of the remote server"""
844 """benchmark time needed to analyse phases of the remote server"""
836 from mercurial.node import (
845 from mercurial.node import (
837 bin,
846 bin,
838 )
847 )
839 from mercurial import (
848 from mercurial import (
840 exchange,
849 exchange,
841 hg,
850 hg,
842 phases,
851 phases,
843 )
852 )
844 opts = _byteskwargs(opts)
853 opts = _byteskwargs(opts)
845 timer, fm = gettimer(ui, opts)
854 timer, fm = gettimer(ui, opts)
846
855
847 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
856 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
848 if not path:
857 if not path:
849 raise error.Abort((b'default repository not configured!'),
858 raise error.Abort((b'default repository not configured!'),
850 hint=(b"see 'hg help config.paths'"))
859 hint=(b"see 'hg help config.paths'"))
851 dest = path.pushloc or path.loc
860 dest = path.pushloc or path.loc
852 branches = (path.branch, opts.get(b'branch') or [])
861 branches = (path.branch, opts.get(b'branch') or [])
853 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
862 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
854 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
863 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
855 other = hg.peer(repo, opts, dest)
864 other = hg.peer(repo, opts, dest)
856
865
857 # easier to perform discovery through the operation
866 # easier to perform discovery through the operation
858 op = exchange.pushoperation(repo, other)
867 op = exchange.pushoperation(repo, other)
859 exchange._pushdiscoverychangeset(op)
868 exchange._pushdiscoverychangeset(op)
860
869
861 remotesubset = op.fallbackheads
870 remotesubset = op.fallbackheads
862
871
863 with other.commandexecutor() as e:
872 with other.commandexecutor() as e:
864 remotephases = e.callcommand(b'listkeys',
873 remotephases = e.callcommand(b'listkeys',
865 {b'namespace': b'phases'}).result()
874 {b'namespace': b'phases'}).result()
866 del other
875 del other
867 publishing = remotephases.get(b'publishing', False)
876 publishing = remotephases.get(b'publishing', False)
868 if publishing:
877 if publishing:
869 ui.status((b'publishing: yes\n'))
878 ui.status((b'publishing: yes\n'))
870 else:
879 else:
871 ui.status((b'publishing: no\n'))
880 ui.status((b'publishing: no\n'))
872
881
873 nodemap = repo.changelog.nodemap
882 nodemap = repo.changelog.nodemap
874 nonpublishroots = 0
883 nonpublishroots = 0
875 for nhex, phase in remotephases.iteritems():
884 for nhex, phase in remotephases.iteritems():
876 if nhex == b'publishing': # ignore data related to publish option
885 if nhex == b'publishing': # ignore data related to publish option
877 continue
886 continue
878 node = bin(nhex)
887 node = bin(nhex)
879 if node in nodemap and int(phase):
888 if node in nodemap and int(phase):
880 nonpublishroots += 1
889 nonpublishroots += 1
881 ui.status((b'number of roots: %d\n') % len(remotephases))
890 ui.status((b'number of roots: %d\n') % len(remotephases))
882 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
891 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
883 def d():
892 def d():
884 phases.remotephasessummary(repo,
893 phases.remotephasessummary(repo,
885 remotesubset,
894 remotesubset,
886 remotephases)
895 remotephases)
887 timer(d)
896 timer(d)
888 fm.end()
897 fm.end()
889
898
890 @command(b'perfmanifest',[
899 @command(b'perfmanifest',[
891 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
900 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
892 (b'', b'clear-disk', False, b'clear on-disk caches too'),
901 (b'', b'clear-disk', False, b'clear on-disk caches too'),
893 ] + formatteropts, b'REV|NODE')
902 ] + formatteropts, b'REV|NODE')
894 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
903 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
895 """benchmark the time to read a manifest from disk and return a usable
904 """benchmark the time to read a manifest from disk and return a usable
896 dict-like object
905 dict-like object
897
906
898 Manifest caches are cleared before retrieval."""
907 Manifest caches are cleared before retrieval."""
899 opts = _byteskwargs(opts)
908 opts = _byteskwargs(opts)
900 timer, fm = gettimer(ui, opts)
909 timer, fm = gettimer(ui, opts)
901 if not manifest_rev:
910 if not manifest_rev:
902 ctx = scmutil.revsingle(repo, rev, rev)
911 ctx = scmutil.revsingle(repo, rev, rev)
903 t = ctx.manifestnode()
912 t = ctx.manifestnode()
904 else:
913 else:
905 from mercurial.node import bin
914 from mercurial.node import bin
906
915
907 if len(rev) == 40:
916 if len(rev) == 40:
908 t = bin(rev)
917 t = bin(rev)
909 else:
918 else:
910 try:
919 try:
911 rev = int(rev)
920 rev = int(rev)
912
921
913 if util.safehasattr(repo.manifestlog, b'getstorage'):
922 if util.safehasattr(repo.manifestlog, b'getstorage'):
914 t = repo.manifestlog.getstorage(b'').node(rev)
923 t = repo.manifestlog.getstorage(b'').node(rev)
915 else:
924 else:
916 t = repo.manifestlog._revlog.lookup(rev)
925 t = repo.manifestlog._revlog.lookup(rev)
917 except ValueError:
926 except ValueError:
918 raise error.Abort(b'manifest revision must be integer or full '
927 raise error.Abort(b'manifest revision must be integer or full '
919 b'node')
928 b'node')
920 def d():
929 def d():
921 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
930 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
922 repo.manifestlog[t].read()
931 repo.manifestlog[t].read()
923 timer(d)
932 timer(d)
924 fm.end()
933 fm.end()
925
934
926 @command(b'perfchangeset', formatteropts)
935 @command(b'perfchangeset', formatteropts)
927 def perfchangeset(ui, repo, rev, **opts):
936 def perfchangeset(ui, repo, rev, **opts):
928 opts = _byteskwargs(opts)
937 opts = _byteskwargs(opts)
929 timer, fm = gettimer(ui, opts)
938 timer, fm = gettimer(ui, opts)
930 n = scmutil.revsingle(repo, rev).node()
939 n = scmutil.revsingle(repo, rev).node()
931 def d():
940 def d():
932 repo.changelog.read(n)
941 repo.changelog.read(n)
933 #repo.changelog._cache = None
942 #repo.changelog._cache = None
934 timer(d)
943 timer(d)
935 fm.end()
944 fm.end()
936
945
937 @command(b'perfindex', formatteropts)
946 @command(b'perfindex', formatteropts)
938 def perfindex(ui, repo, **opts):
947 def perfindex(ui, repo, **opts):
939 import mercurial.revlog
948 import mercurial.revlog
940 opts = _byteskwargs(opts)
949 opts = _byteskwargs(opts)
941 timer, fm = gettimer(ui, opts)
950 timer, fm = gettimer(ui, opts)
942 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
951 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
943 n = repo[b"tip"].node()
952 n = repo[b"tip"].node()
944 svfs = getsvfs(repo)
953 svfs = getsvfs(repo)
945 def d():
954 def d():
946 cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
955 cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
947 cl.rev(n)
956 cl.rev(n)
948 timer(d)
957 timer(d)
949 fm.end()
958 fm.end()
950
959
951 @command(b'perfstartup', formatteropts)
960 @command(b'perfstartup', formatteropts)
952 def perfstartup(ui, repo, **opts):
961 def perfstartup(ui, repo, **opts):
953 opts = _byteskwargs(opts)
962 opts = _byteskwargs(opts)
954 timer, fm = gettimer(ui, opts)
963 timer, fm = gettimer(ui, opts)
955 cmd = sys.argv[0]
964 cmd = sys.argv[0]
956 def d():
965 def d():
957 if os.name != r'nt':
966 if os.name != r'nt':
958 os.system(b"HGRCPATH= %s version -q > /dev/null" % cmd)
967 os.system(b"HGRCPATH= %s version -q > /dev/null" % cmd)
959 else:
968 else:
960 os.environ[r'HGRCPATH'] = r' '
969 os.environ[r'HGRCPATH'] = r' '
961 os.system(r"%s version -q > NUL" % cmd)
970 os.system(r"%s version -q > NUL" % cmd)
962 timer(d)
971 timer(d)
963 fm.end()
972 fm.end()
964
973
965 @command(b'perfparents', formatteropts)
974 @command(b'perfparents', formatteropts)
966 def perfparents(ui, repo, **opts):
975 def perfparents(ui, repo, **opts):
967 opts = _byteskwargs(opts)
976 opts = _byteskwargs(opts)
968 timer, fm = gettimer(ui, opts)
977 timer, fm = gettimer(ui, opts)
969 # control the number of commits perfparents iterates over
978 # control the number of commits perfparents iterates over
970 # experimental config: perf.parentscount
979 # experimental config: perf.parentscount
971 count = getint(ui, b"perf", b"parentscount", 1000)
980 count = getint(ui, b"perf", b"parentscount", 1000)
972 if len(repo.changelog) < count:
981 if len(repo.changelog) < count:
973 raise error.Abort(b"repo needs %d commits for this test" % count)
982 raise error.Abort(b"repo needs %d commits for this test" % count)
974 repo = repo.unfiltered()
983 repo = repo.unfiltered()
975 nl = [repo.changelog.node(i) for i in _xrange(count)]
984 nl = [repo.changelog.node(i) for i in _xrange(count)]
976 def d():
985 def d():
977 for n in nl:
986 for n in nl:
978 repo.changelog.parents(n)
987 repo.changelog.parents(n)
979 timer(d)
988 timer(d)
980 fm.end()
989 fm.end()
981
990
982 @command(b'perfctxfiles', formatteropts)
991 @command(b'perfctxfiles', formatteropts)
983 def perfctxfiles(ui, repo, x, **opts):
992 def perfctxfiles(ui, repo, x, **opts):
984 opts = _byteskwargs(opts)
993 opts = _byteskwargs(opts)
985 x = int(x)
994 x = int(x)
986 timer, fm = gettimer(ui, opts)
995 timer, fm = gettimer(ui, opts)
987 def d():
996 def d():
988 len(repo[x].files())
997 len(repo[x].files())
989 timer(d)
998 timer(d)
990 fm.end()
999 fm.end()
991
1000
992 @command(b'perfrawfiles', formatteropts)
1001 @command(b'perfrawfiles', formatteropts)
993 def perfrawfiles(ui, repo, x, **opts):
1002 def perfrawfiles(ui, repo, x, **opts):
994 opts = _byteskwargs(opts)
1003 opts = _byteskwargs(opts)
995 x = int(x)
1004 x = int(x)
996 timer, fm = gettimer(ui, opts)
1005 timer, fm = gettimer(ui, opts)
997 cl = repo.changelog
1006 cl = repo.changelog
998 def d():
1007 def d():
999 len(cl.read(x)[3])
1008 len(cl.read(x)[3])
1000 timer(d)
1009 timer(d)
1001 fm.end()
1010 fm.end()
1002
1011
1003 @command(b'perflookup', formatteropts)
1012 @command(b'perflookup', formatteropts)
1004 def perflookup(ui, repo, rev, **opts):
1013 def perflookup(ui, repo, rev, **opts):
1005 opts = _byteskwargs(opts)
1014 opts = _byteskwargs(opts)
1006 timer, fm = gettimer(ui, opts)
1015 timer, fm = gettimer(ui, opts)
1007 timer(lambda: len(repo.lookup(rev)))
1016 timer(lambda: len(repo.lookup(rev)))
1008 fm.end()
1017 fm.end()
1009
1018
1010 @command(b'perflinelogedits',
1019 @command(b'perflinelogedits',
1011 [(b'n', b'edits', 10000, b'number of edits'),
1020 [(b'n', b'edits', 10000, b'number of edits'),
1012 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1021 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1013 ], norepo=True)
1022 ], norepo=True)
1014 def perflinelogedits(ui, **opts):
1023 def perflinelogedits(ui, **opts):
1015 from mercurial import linelog
1024 from mercurial import linelog
1016
1025
1017 opts = _byteskwargs(opts)
1026 opts = _byteskwargs(opts)
1018
1027
1019 edits = opts[b'edits']
1028 edits = opts[b'edits']
1020 maxhunklines = opts[b'max_hunk_lines']
1029 maxhunklines = opts[b'max_hunk_lines']
1021
1030
1022 maxb1 = 100000
1031 maxb1 = 100000
1023 random.seed(0)
1032 random.seed(0)
1024 randint = random.randint
1033 randint = random.randint
1025 currentlines = 0
1034 currentlines = 0
1026 arglist = []
1035 arglist = []
1027 for rev in _xrange(edits):
1036 for rev in _xrange(edits):
1028 a1 = randint(0, currentlines)
1037 a1 = randint(0, currentlines)
1029 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1038 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1030 b1 = randint(0, maxb1)
1039 b1 = randint(0, maxb1)
1031 b2 = randint(b1, b1 + maxhunklines)
1040 b2 = randint(b1, b1 + maxhunklines)
1032 currentlines += (b2 - b1) - (a2 - a1)
1041 currentlines += (b2 - b1) - (a2 - a1)
1033 arglist.append((rev, a1, a2, b1, b2))
1042 arglist.append((rev, a1, a2, b1, b2))
1034
1043
1035 def d():
1044 def d():
1036 ll = linelog.linelog()
1045 ll = linelog.linelog()
1037 for args in arglist:
1046 for args in arglist:
1038 ll.replacelines(*args)
1047 ll.replacelines(*args)
1039
1048
1040 timer, fm = gettimer(ui, opts)
1049 timer, fm = gettimer(ui, opts)
1041 timer(d)
1050 timer(d)
1042 fm.end()
1051 fm.end()
1043
1052
1044 @command(b'perfrevrange', formatteropts)
1053 @command(b'perfrevrange', formatteropts)
1045 def perfrevrange(ui, repo, *specs, **opts):
1054 def perfrevrange(ui, repo, *specs, **opts):
1046 opts = _byteskwargs(opts)
1055 opts = _byteskwargs(opts)
1047 timer, fm = gettimer(ui, opts)
1056 timer, fm = gettimer(ui, opts)
1048 revrange = scmutil.revrange
1057 revrange = scmutil.revrange
1049 timer(lambda: len(revrange(repo, specs)))
1058 timer(lambda: len(revrange(repo, specs)))
1050 fm.end()
1059 fm.end()
1051
1060
1052 @command(b'perfnodelookup', formatteropts)
1061 @command(b'perfnodelookup', formatteropts)
1053 def perfnodelookup(ui, repo, rev, **opts):
1062 def perfnodelookup(ui, repo, rev, **opts):
1054 opts = _byteskwargs(opts)
1063 opts = _byteskwargs(opts)
1055 timer, fm = gettimer(ui, opts)
1064 timer, fm = gettimer(ui, opts)
1056 import mercurial.revlog
1065 import mercurial.revlog
1057 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1066 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1058 n = scmutil.revsingle(repo, rev).node()
1067 n = scmutil.revsingle(repo, rev).node()
1059 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1068 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1060 def d():
1069 def d():
1061 cl.rev(n)
1070 cl.rev(n)
1062 clearcaches(cl)
1071 clearcaches(cl)
1063 timer(d)
1072 timer(d)
1064 fm.end()
1073 fm.end()
1065
1074
1066 @command(b'perflog',
1075 @command(b'perflog',
1067 [(b'', b'rename', False, b'ask log to follow renames')
1076 [(b'', b'rename', False, b'ask log to follow renames')
1068 ] + formatteropts)
1077 ] + formatteropts)
1069 def perflog(ui, repo, rev=None, **opts):
1078 def perflog(ui, repo, rev=None, **opts):
1070 opts = _byteskwargs(opts)
1079 opts = _byteskwargs(opts)
1071 if rev is None:
1080 if rev is None:
1072 rev=[]
1081 rev=[]
1073 timer, fm = gettimer(ui, opts)
1082 timer, fm = gettimer(ui, opts)
1074 ui.pushbuffer()
1083 ui.pushbuffer()
1075 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1084 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1076 copies=opts.get(b'rename')))
1085 copies=opts.get(b'rename')))
1077 ui.popbuffer()
1086 ui.popbuffer()
1078 fm.end()
1087 fm.end()
1079
1088
1080 @command(b'perfmoonwalk', formatteropts)
1089 @command(b'perfmoonwalk', formatteropts)
1081 def perfmoonwalk(ui, repo, **opts):
1090 def perfmoonwalk(ui, repo, **opts):
1082 """benchmark walking the changelog backwards
1091 """benchmark walking the changelog backwards
1083
1092
1084 This also loads the changelog data for each revision in the changelog.
1093 This also loads the changelog data for each revision in the changelog.
1085 """
1094 """
1086 opts = _byteskwargs(opts)
1095 opts = _byteskwargs(opts)
1087 timer, fm = gettimer(ui, opts)
1096 timer, fm = gettimer(ui, opts)
1088 def moonwalk():
1097 def moonwalk():
1089 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1098 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1090 ctx = repo[i]
1099 ctx = repo[i]
1091 ctx.branch() # read changelog data (in addition to the index)
1100 ctx.branch() # read changelog data (in addition to the index)
1092 timer(moonwalk)
1101 timer(moonwalk)
1093 fm.end()
1102 fm.end()
1094
1103
1095 @command(b'perftemplating',
1104 @command(b'perftemplating',
1096 [(b'r', b'rev', [], b'revisions to run the template on'),
1105 [(b'r', b'rev', [], b'revisions to run the template on'),
1097 ] + formatteropts)
1106 ] + formatteropts)
1098 def perftemplating(ui, repo, testedtemplate=None, **opts):
1107 def perftemplating(ui, repo, testedtemplate=None, **opts):
1099 """test the rendering time of a given template"""
1108 """test the rendering time of a given template"""
1100 if makelogtemplater is None:
1109 if makelogtemplater is None:
1101 raise error.Abort((b"perftemplating not available with this Mercurial"),
1110 raise error.Abort((b"perftemplating not available with this Mercurial"),
1102 hint=b"use 4.3 or later")
1111 hint=b"use 4.3 or later")
1103
1112
1104 opts = _byteskwargs(opts)
1113 opts = _byteskwargs(opts)
1105
1114
1106 nullui = ui.copy()
1115 nullui = ui.copy()
1107 nullui.fout = open(os.devnull, r'wb')
1116 nullui.fout = open(os.devnull, r'wb')
1108 nullui.disablepager()
1117 nullui.disablepager()
1109 revs = opts.get(b'rev')
1118 revs = opts.get(b'rev')
1110 if not revs:
1119 if not revs:
1111 revs = [b'all()']
1120 revs = [b'all()']
1112 revs = list(scmutil.revrange(repo, revs))
1121 revs = list(scmutil.revrange(repo, revs))
1113
1122
1114 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1123 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1115 b' {author|person}: {desc|firstline}\n')
1124 b' {author|person}: {desc|firstline}\n')
1116 if testedtemplate is None:
1125 if testedtemplate is None:
1117 testedtemplate = defaulttemplate
1126 testedtemplate = defaulttemplate
1118 displayer = makelogtemplater(nullui, repo, testedtemplate)
1127 displayer = makelogtemplater(nullui, repo, testedtemplate)
1119 def format():
1128 def format():
1120 for r in revs:
1129 for r in revs:
1121 ctx = repo[r]
1130 ctx = repo[r]
1122 displayer.show(ctx)
1131 displayer.show(ctx)
1123 displayer.flush(ctx)
1132 displayer.flush(ctx)
1124
1133
1125 timer, fm = gettimer(ui, opts)
1134 timer, fm = gettimer(ui, opts)
1126 timer(format)
1135 timer(format)
1127 fm.end()
1136 fm.end()
1128
1137
1129 @command(b'perfcca', formatteropts)
1138 @command(b'perfcca', formatteropts)
1130 def perfcca(ui, repo, **opts):
1139 def perfcca(ui, repo, **opts):
1131 opts = _byteskwargs(opts)
1140 opts = _byteskwargs(opts)
1132 timer, fm = gettimer(ui, opts)
1141 timer, fm = gettimer(ui, opts)
1133 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1142 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1134 fm.end()
1143 fm.end()
1135
1144
1136 @command(b'perffncacheload', formatteropts)
1145 @command(b'perffncacheload', formatteropts)
1137 def perffncacheload(ui, repo, **opts):
1146 def perffncacheload(ui, repo, **opts):
1138 opts = _byteskwargs(opts)
1147 opts = _byteskwargs(opts)
1139 timer, fm = gettimer(ui, opts)
1148 timer, fm = gettimer(ui, opts)
1140 s = repo.store
1149 s = repo.store
1141 def d():
1150 def d():
1142 s.fncache._load()
1151 s.fncache._load()
1143 timer(d)
1152 timer(d)
1144 fm.end()
1153 fm.end()
1145
1154
1146 @command(b'perffncachewrite', formatteropts)
1155 @command(b'perffncachewrite', formatteropts)
1147 def perffncachewrite(ui, repo, **opts):
1156 def perffncachewrite(ui, repo, **opts):
1148 opts = _byteskwargs(opts)
1157 opts = _byteskwargs(opts)
1149 timer, fm = gettimer(ui, opts)
1158 timer, fm = gettimer(ui, opts)
1150 s = repo.store
1159 s = repo.store
1151 lock = repo.lock()
1160 lock = repo.lock()
1152 s.fncache._load()
1161 s.fncache._load()
1153 tr = repo.transaction(b'perffncachewrite')
1162 tr = repo.transaction(b'perffncachewrite')
1154 tr.addbackup(b'fncache')
1163 tr.addbackup(b'fncache')
1155 def d():
1164 def d():
1156 s.fncache._dirty = True
1165 s.fncache._dirty = True
1157 s.fncache.write(tr)
1166 s.fncache.write(tr)
1158 timer(d)
1167 timer(d)
1159 tr.close()
1168 tr.close()
1160 lock.release()
1169 lock.release()
1161 fm.end()
1170 fm.end()
1162
1171
1163 @command(b'perffncacheencode', formatteropts)
1172 @command(b'perffncacheencode', formatteropts)
1164 def perffncacheencode(ui, repo, **opts):
1173 def perffncacheencode(ui, repo, **opts):
1165 opts = _byteskwargs(opts)
1174 opts = _byteskwargs(opts)
1166 timer, fm = gettimer(ui, opts)
1175 timer, fm = gettimer(ui, opts)
1167 s = repo.store
1176 s = repo.store
1168 s.fncache._load()
1177 s.fncache._load()
1169 def d():
1178 def d():
1170 for p in s.fncache.entries:
1179 for p in s.fncache.entries:
1171 s.encode(p)
1180 s.encode(p)
1172 timer(d)
1181 timer(d)
1173 fm.end()
1182 fm.end()
1174
1183
1175 def _bdiffworker(q, blocks, xdiff, ready, done):
1184 def _bdiffworker(q, blocks, xdiff, ready, done):
1176 while not done.is_set():
1185 while not done.is_set():
1177 pair = q.get()
1186 pair = q.get()
1178 while pair is not None:
1187 while pair is not None:
1179 if xdiff:
1188 if xdiff:
1180 mdiff.bdiff.xdiffblocks(*pair)
1189 mdiff.bdiff.xdiffblocks(*pair)
1181 elif blocks:
1190 elif blocks:
1182 mdiff.bdiff.blocks(*pair)
1191 mdiff.bdiff.blocks(*pair)
1183 else:
1192 else:
1184 mdiff.textdiff(*pair)
1193 mdiff.textdiff(*pair)
1185 q.task_done()
1194 q.task_done()
1186 pair = q.get()
1195 pair = q.get()
1187 q.task_done() # for the None one
1196 q.task_done() # for the None one
1188 with ready:
1197 with ready:
1189 ready.wait()
1198 ready.wait()
1190
1199
1191 def _manifestrevision(repo, mnode):
1200 def _manifestrevision(repo, mnode):
1192 ml = repo.manifestlog
1201 ml = repo.manifestlog
1193
1202
1194 if util.safehasattr(ml, b'getstorage'):
1203 if util.safehasattr(ml, b'getstorage'):
1195 store = ml.getstorage(b'')
1204 store = ml.getstorage(b'')
1196 else:
1205 else:
1197 store = ml._revlog
1206 store = ml._revlog
1198
1207
1199 return store.revision(mnode)
1208 return store.revision(mnode)
1200
1209
1201 @command(b'perfbdiff', revlogopts + formatteropts + [
1210 @command(b'perfbdiff', revlogopts + formatteropts + [
1202 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1211 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1203 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1212 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1204 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1213 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1205 (b'', b'blocks', False, b'test computing diffs into blocks'),
1214 (b'', b'blocks', False, b'test computing diffs into blocks'),
1206 (b'', b'xdiff', False, b'use xdiff algorithm'),
1215 (b'', b'xdiff', False, b'use xdiff algorithm'),
1207 ],
1216 ],
1208
1217
1209 b'-c|-m|FILE REV')
1218 b'-c|-m|FILE REV')
1210 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1219 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1211 """benchmark a bdiff between revisions
1220 """benchmark a bdiff between revisions
1212
1221
1213 By default, benchmark a bdiff between its delta parent and itself.
1222 By default, benchmark a bdiff between its delta parent and itself.
1214
1223
1215 With ``--count``, benchmark bdiffs between delta parents and self for N
1224 With ``--count``, benchmark bdiffs between delta parents and self for N
1216 revisions starting at the specified revision.
1225 revisions starting at the specified revision.
1217
1226
1218 With ``--alldata``, assume the requested revision is a changeset and
1227 With ``--alldata``, assume the requested revision is a changeset and
1219 measure bdiffs for all changes related to that changeset (manifest
1228 measure bdiffs for all changes related to that changeset (manifest
1220 and filelogs).
1229 and filelogs).
1221 """
1230 """
1222 opts = _byteskwargs(opts)
1231 opts = _byteskwargs(opts)
1223
1232
1224 if opts[b'xdiff'] and not opts[b'blocks']:
1233 if opts[b'xdiff'] and not opts[b'blocks']:
1225 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1234 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1226
1235
1227 if opts[b'alldata']:
1236 if opts[b'alldata']:
1228 opts[b'changelog'] = True
1237 opts[b'changelog'] = True
1229
1238
1230 if opts.get(b'changelog') or opts.get(b'manifest'):
1239 if opts.get(b'changelog') or opts.get(b'manifest'):
1231 file_, rev = None, file_
1240 file_, rev = None, file_
1232 elif rev is None:
1241 elif rev is None:
1233 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1242 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1234
1243
1235 blocks = opts[b'blocks']
1244 blocks = opts[b'blocks']
1236 xdiff = opts[b'xdiff']
1245 xdiff = opts[b'xdiff']
1237 textpairs = []
1246 textpairs = []
1238
1247
1239 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1248 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1240
1249
1241 startrev = r.rev(r.lookup(rev))
1250 startrev = r.rev(r.lookup(rev))
1242 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1251 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1243 if opts[b'alldata']:
1252 if opts[b'alldata']:
1244 # Load revisions associated with changeset.
1253 # Load revisions associated with changeset.
1245 ctx = repo[rev]
1254 ctx = repo[rev]
1246 mtext = _manifestrevision(repo, ctx.manifestnode())
1255 mtext = _manifestrevision(repo, ctx.manifestnode())
1247 for pctx in ctx.parents():
1256 for pctx in ctx.parents():
1248 pman = _manifestrevision(repo, pctx.manifestnode())
1257 pman = _manifestrevision(repo, pctx.manifestnode())
1249 textpairs.append((pman, mtext))
1258 textpairs.append((pman, mtext))
1250
1259
1251 # Load filelog revisions by iterating manifest delta.
1260 # Load filelog revisions by iterating manifest delta.
1252 man = ctx.manifest()
1261 man = ctx.manifest()
1253 pman = ctx.p1().manifest()
1262 pman = ctx.p1().manifest()
1254 for filename, change in pman.diff(man).items():
1263 for filename, change in pman.diff(man).items():
1255 fctx = repo.file(filename)
1264 fctx = repo.file(filename)
1256 f1 = fctx.revision(change[0][0] or -1)
1265 f1 = fctx.revision(change[0][0] or -1)
1257 f2 = fctx.revision(change[1][0] or -1)
1266 f2 = fctx.revision(change[1][0] or -1)
1258 textpairs.append((f1, f2))
1267 textpairs.append((f1, f2))
1259 else:
1268 else:
1260 dp = r.deltaparent(rev)
1269 dp = r.deltaparent(rev)
1261 textpairs.append((r.revision(dp), r.revision(rev)))
1270 textpairs.append((r.revision(dp), r.revision(rev)))
1262
1271
1263 withthreads = threads > 0
1272 withthreads = threads > 0
1264 if not withthreads:
1273 if not withthreads:
1265 def d():
1274 def d():
1266 for pair in textpairs:
1275 for pair in textpairs:
1267 if xdiff:
1276 if xdiff:
1268 mdiff.bdiff.xdiffblocks(*pair)
1277 mdiff.bdiff.xdiffblocks(*pair)
1269 elif blocks:
1278 elif blocks:
1270 mdiff.bdiff.blocks(*pair)
1279 mdiff.bdiff.blocks(*pair)
1271 else:
1280 else:
1272 mdiff.textdiff(*pair)
1281 mdiff.textdiff(*pair)
1273 else:
1282 else:
1274 q = queue()
1283 q = queue()
1275 for i in _xrange(threads):
1284 for i in _xrange(threads):
1276 q.put(None)
1285 q.put(None)
1277 ready = threading.Condition()
1286 ready = threading.Condition()
1278 done = threading.Event()
1287 done = threading.Event()
1279 for i in _xrange(threads):
1288 for i in _xrange(threads):
1280 threading.Thread(target=_bdiffworker,
1289 threading.Thread(target=_bdiffworker,
1281 args=(q, blocks, xdiff, ready, done)).start()
1290 args=(q, blocks, xdiff, ready, done)).start()
1282 q.join()
1291 q.join()
1283 def d():
1292 def d():
1284 for pair in textpairs:
1293 for pair in textpairs:
1285 q.put(pair)
1294 q.put(pair)
1286 for i in _xrange(threads):
1295 for i in _xrange(threads):
1287 q.put(None)
1296 q.put(None)
1288 with ready:
1297 with ready:
1289 ready.notify_all()
1298 ready.notify_all()
1290 q.join()
1299 q.join()
1291 timer, fm = gettimer(ui, opts)
1300 timer, fm = gettimer(ui, opts)
1292 timer(d)
1301 timer(d)
1293 fm.end()
1302 fm.end()
1294
1303
1295 if withthreads:
1304 if withthreads:
1296 done.set()
1305 done.set()
1297 for i in _xrange(threads):
1306 for i in _xrange(threads):
1298 q.put(None)
1307 q.put(None)
1299 with ready:
1308 with ready:
1300 ready.notify_all()
1309 ready.notify_all()
1301
1310
1302 @command(b'perfunidiff', revlogopts + formatteropts + [
1311 @command(b'perfunidiff', revlogopts + formatteropts + [
1303 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1312 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1304 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1313 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1305 ], b'-c|-m|FILE REV')
1314 ], b'-c|-m|FILE REV')
1306 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1315 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1307 """benchmark a unified diff between revisions
1316 """benchmark a unified diff between revisions
1308
1317
1309 This doesn't include any copy tracing - it's just a unified diff
1318 This doesn't include any copy tracing - it's just a unified diff
1310 of the texts.
1319 of the texts.
1311
1320
1312 By default, benchmark a diff between its delta parent and itself.
1321 By default, benchmark a diff between its delta parent and itself.
1313
1322
1314 With ``--count``, benchmark diffs between delta parents and self for N
1323 With ``--count``, benchmark diffs between delta parents and self for N
1315 revisions starting at the specified revision.
1324 revisions starting at the specified revision.
1316
1325
1317 With ``--alldata``, assume the requested revision is a changeset and
1326 With ``--alldata``, assume the requested revision is a changeset and
1318 measure diffs for all changes related to that changeset (manifest
1327 measure diffs for all changes related to that changeset (manifest
1319 and filelogs).
1328 and filelogs).
1320 """
1329 """
1321 opts = _byteskwargs(opts)
1330 opts = _byteskwargs(opts)
1322 if opts[b'alldata']:
1331 if opts[b'alldata']:
1323 opts[b'changelog'] = True
1332 opts[b'changelog'] = True
1324
1333
1325 if opts.get(b'changelog') or opts.get(b'manifest'):
1334 if opts.get(b'changelog') or opts.get(b'manifest'):
1326 file_, rev = None, file_
1335 file_, rev = None, file_
1327 elif rev is None:
1336 elif rev is None:
1328 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1337 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1329
1338
1330 textpairs = []
1339 textpairs = []
1331
1340
1332 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1341 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1333
1342
1334 startrev = r.rev(r.lookup(rev))
1343 startrev = r.rev(r.lookup(rev))
1335 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1344 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1336 if opts[b'alldata']:
1345 if opts[b'alldata']:
1337 # Load revisions associated with changeset.
1346 # Load revisions associated with changeset.
1338 ctx = repo[rev]
1347 ctx = repo[rev]
1339 mtext = _manifestrevision(repo, ctx.manifestnode())
1348 mtext = _manifestrevision(repo, ctx.manifestnode())
1340 for pctx in ctx.parents():
1349 for pctx in ctx.parents():
1341 pman = _manifestrevision(repo, pctx.manifestnode())
1350 pman = _manifestrevision(repo, pctx.manifestnode())
1342 textpairs.append((pman, mtext))
1351 textpairs.append((pman, mtext))
1343
1352
1344 # Load filelog revisions by iterating manifest delta.
1353 # Load filelog revisions by iterating manifest delta.
1345 man = ctx.manifest()
1354 man = ctx.manifest()
1346 pman = ctx.p1().manifest()
1355 pman = ctx.p1().manifest()
1347 for filename, change in pman.diff(man).items():
1356 for filename, change in pman.diff(man).items():
1348 fctx = repo.file(filename)
1357 fctx = repo.file(filename)
1349 f1 = fctx.revision(change[0][0] or -1)
1358 f1 = fctx.revision(change[0][0] or -1)
1350 f2 = fctx.revision(change[1][0] or -1)
1359 f2 = fctx.revision(change[1][0] or -1)
1351 textpairs.append((f1, f2))
1360 textpairs.append((f1, f2))
1352 else:
1361 else:
1353 dp = r.deltaparent(rev)
1362 dp = r.deltaparent(rev)
1354 textpairs.append((r.revision(dp), r.revision(rev)))
1363 textpairs.append((r.revision(dp), r.revision(rev)))
1355
1364
1356 def d():
1365 def d():
1357 for left, right in textpairs:
1366 for left, right in textpairs:
1358 # The date strings don't matter, so we pass empty strings.
1367 # The date strings don't matter, so we pass empty strings.
1359 headerlines, hunks = mdiff.unidiff(
1368 headerlines, hunks = mdiff.unidiff(
1360 left, b'', right, b'', b'left', b'right', binary=False)
1369 left, b'', right, b'', b'left', b'right', binary=False)
1361 # consume iterators in roughly the way patch.py does
1370 # consume iterators in roughly the way patch.py does
1362 b'\n'.join(headerlines)
1371 b'\n'.join(headerlines)
1363 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1372 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1364 timer, fm = gettimer(ui, opts)
1373 timer, fm = gettimer(ui, opts)
1365 timer(d)
1374 timer(d)
1366 fm.end()
1375 fm.end()
1367
1376
1368 @command(b'perfdiffwd', formatteropts)
1377 @command(b'perfdiffwd', formatteropts)
1369 def perfdiffwd(ui, repo, **opts):
1378 def perfdiffwd(ui, repo, **opts):
1370 """Profile diff of working directory changes"""
1379 """Profile diff of working directory changes"""
1371 opts = _byteskwargs(opts)
1380 opts = _byteskwargs(opts)
1372 timer, fm = gettimer(ui, opts)
1381 timer, fm = gettimer(ui, opts)
1373 options = {
1382 options = {
1374 b'w': b'ignore_all_space',
1383 b'w': b'ignore_all_space',
1375 b'b': b'ignore_space_change',
1384 b'b': b'ignore_space_change',
1376 b'B': b'ignore_blank_lines',
1385 b'B': b'ignore_blank_lines',
1377 }
1386 }
1378
1387
1379 for diffopt in (b'', b'w', b'b', b'B', b'wB'):
1388 for diffopt in (b'', b'w', b'b', b'B', b'wB'):
1380 opts = dict((options[c], b'1') for c in diffopt)
1389 opts = dict((options[c], b'1') for c in diffopt)
1381 def d():
1390 def d():
1382 ui.pushbuffer()
1391 ui.pushbuffer()
1383 commands.diff(ui, repo, **opts)
1392 commands.diff(ui, repo, **opts)
1384 ui.popbuffer()
1393 ui.popbuffer()
1385 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1394 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1386 timer(d, title)
1395 timer(d, title)
1387 fm.end()
1396 fm.end()
1388
1397
1389 @command(b'perfrevlogindex', revlogopts + formatteropts,
1398 @command(b'perfrevlogindex', revlogopts + formatteropts,
1390 b'-c|-m|FILE')
1399 b'-c|-m|FILE')
1391 def perfrevlogindex(ui, repo, file_=None, **opts):
1400 def perfrevlogindex(ui, repo, file_=None, **opts):
1392 """Benchmark operations against a revlog index.
1401 """Benchmark operations against a revlog index.
1393
1402
1394 This tests constructing a revlog instance, reading index data,
1403 This tests constructing a revlog instance, reading index data,
1395 parsing index data, and performing various operations related to
1404 parsing index data, and performing various operations related to
1396 index data.
1405 index data.
1397 """
1406 """
1398
1407
1399 opts = _byteskwargs(opts)
1408 opts = _byteskwargs(opts)
1400
1409
1401 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1410 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1402
1411
1403 opener = getattr(rl, 'opener') # trick linter
1412 opener = getattr(rl, 'opener') # trick linter
1404 indexfile = rl.indexfile
1413 indexfile = rl.indexfile
1405 data = opener.read(indexfile)
1414 data = opener.read(indexfile)
1406
1415
1407 header = struct.unpack(b'>I', data[0:4])[0]
1416 header = struct.unpack(b'>I', data[0:4])[0]
1408 version = header & 0xFFFF
1417 version = header & 0xFFFF
1409 if version == 1:
1418 if version == 1:
1410 revlogio = revlog.revlogio()
1419 revlogio = revlog.revlogio()
1411 inline = header & (1 << 16)
1420 inline = header & (1 << 16)
1412 else:
1421 else:
1413 raise error.Abort((b'unsupported revlog version: %d') % version)
1422 raise error.Abort((b'unsupported revlog version: %d') % version)
1414
1423
1415 rllen = len(rl)
1424 rllen = len(rl)
1416
1425
1417 node0 = rl.node(0)
1426 node0 = rl.node(0)
1418 node25 = rl.node(rllen // 4)
1427 node25 = rl.node(rllen // 4)
1419 node50 = rl.node(rllen // 2)
1428 node50 = rl.node(rllen // 2)
1420 node75 = rl.node(rllen // 4 * 3)
1429 node75 = rl.node(rllen // 4 * 3)
1421 node100 = rl.node(rllen - 1)
1430 node100 = rl.node(rllen - 1)
1422
1431
1423 allrevs = range(rllen)
1432 allrevs = range(rllen)
1424 allrevsrev = list(reversed(allrevs))
1433 allrevsrev = list(reversed(allrevs))
1425 allnodes = [rl.node(rev) for rev in range(rllen)]
1434 allnodes = [rl.node(rev) for rev in range(rllen)]
1426 allnodesrev = list(reversed(allnodes))
1435 allnodesrev = list(reversed(allnodes))
1427
1436
1428 def constructor():
1437 def constructor():
1429 revlog.revlog(opener, indexfile)
1438 revlog.revlog(opener, indexfile)
1430
1439
1431 def read():
1440 def read():
1432 with opener(indexfile) as fh:
1441 with opener(indexfile) as fh:
1433 fh.read()
1442 fh.read()
1434
1443
1435 def parseindex():
1444 def parseindex():
1436 revlogio.parseindex(data, inline)
1445 revlogio.parseindex(data, inline)
1437
1446
1438 def getentry(revornode):
1447 def getentry(revornode):
1439 index = revlogio.parseindex(data, inline)[0]
1448 index = revlogio.parseindex(data, inline)[0]
1440 index[revornode]
1449 index[revornode]
1441
1450
1442 def getentries(revs, count=1):
1451 def getentries(revs, count=1):
1443 index = revlogio.parseindex(data, inline)[0]
1452 index = revlogio.parseindex(data, inline)[0]
1444
1453
1445 for i in range(count):
1454 for i in range(count):
1446 for rev in revs:
1455 for rev in revs:
1447 index[rev]
1456 index[rev]
1448
1457
1449 def resolvenode(node):
1458 def resolvenode(node):
1450 nodemap = revlogio.parseindex(data, inline)[1]
1459 nodemap = revlogio.parseindex(data, inline)[1]
1451 # This only works for the C code.
1460 # This only works for the C code.
1452 if nodemap is None:
1461 if nodemap is None:
1453 return
1462 return
1454
1463
1455 try:
1464 try:
1456 nodemap[node]
1465 nodemap[node]
1457 except error.RevlogError:
1466 except error.RevlogError:
1458 pass
1467 pass
1459
1468
1460 def resolvenodes(nodes, count=1):
1469 def resolvenodes(nodes, count=1):
1461 nodemap = revlogio.parseindex(data, inline)[1]
1470 nodemap = revlogio.parseindex(data, inline)[1]
1462 if nodemap is None:
1471 if nodemap is None:
1463 return
1472 return
1464
1473
1465 for i in range(count):
1474 for i in range(count):
1466 for node in nodes:
1475 for node in nodes:
1467 try:
1476 try:
1468 nodemap[node]
1477 nodemap[node]
1469 except error.RevlogError:
1478 except error.RevlogError:
1470 pass
1479 pass
1471
1480
1472 benches = [
1481 benches = [
1473 (constructor, b'revlog constructor'),
1482 (constructor, b'revlog constructor'),
1474 (read, b'read'),
1483 (read, b'read'),
1475 (parseindex, b'create index object'),
1484 (parseindex, b'create index object'),
1476 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1485 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1477 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1486 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1478 (lambda: resolvenode(node0), b'look up node at rev 0'),
1487 (lambda: resolvenode(node0), b'look up node at rev 0'),
1479 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1488 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1480 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1489 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1481 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1490 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1482 (lambda: resolvenode(node100), b'look up node at tip'),
1491 (lambda: resolvenode(node100), b'look up node at tip'),
1483 # 2x variation is to measure caching impact.
1492 # 2x variation is to measure caching impact.
1484 (lambda: resolvenodes(allnodes),
1493 (lambda: resolvenodes(allnodes),
1485 b'look up all nodes (forward)'),
1494 b'look up all nodes (forward)'),
1486 (lambda: resolvenodes(allnodes, 2),
1495 (lambda: resolvenodes(allnodes, 2),
1487 b'look up all nodes 2x (forward)'),
1496 b'look up all nodes 2x (forward)'),
1488 (lambda: resolvenodes(allnodesrev),
1497 (lambda: resolvenodes(allnodesrev),
1489 b'look up all nodes (reverse)'),
1498 b'look up all nodes (reverse)'),
1490 (lambda: resolvenodes(allnodesrev, 2),
1499 (lambda: resolvenodes(allnodesrev, 2),
1491 b'look up all nodes 2x (reverse)'),
1500 b'look up all nodes 2x (reverse)'),
1492 (lambda: getentries(allrevs),
1501 (lambda: getentries(allrevs),
1493 b'retrieve all index entries (forward)'),
1502 b'retrieve all index entries (forward)'),
1494 (lambda: getentries(allrevs, 2),
1503 (lambda: getentries(allrevs, 2),
1495 b'retrieve all index entries 2x (forward)'),
1504 b'retrieve all index entries 2x (forward)'),
1496 (lambda: getentries(allrevsrev),
1505 (lambda: getentries(allrevsrev),
1497 b'retrieve all index entries (reverse)'),
1506 b'retrieve all index entries (reverse)'),
1498 (lambda: getentries(allrevsrev, 2),
1507 (lambda: getentries(allrevsrev, 2),
1499 b'retrieve all index entries 2x (reverse)'),
1508 b'retrieve all index entries 2x (reverse)'),
1500 ]
1509 ]
1501
1510
1502 for fn, title in benches:
1511 for fn, title in benches:
1503 timer, fm = gettimer(ui, opts)
1512 timer, fm = gettimer(ui, opts)
1504 timer(fn, title=title)
1513 timer(fn, title=title)
1505 fm.end()
1514 fm.end()
1506
1515
1507 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1516 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1508 [(b'd', b'dist', 100, b'distance between the revisions'),
1517 [(b'd', b'dist', 100, b'distance between the revisions'),
1509 (b's', b'startrev', 0, b'revision to start reading at'),
1518 (b's', b'startrev', 0, b'revision to start reading at'),
1510 (b'', b'reverse', False, b'read in reverse')],
1519 (b'', b'reverse', False, b'read in reverse')],
1511 b'-c|-m|FILE')
1520 b'-c|-m|FILE')
1512 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1521 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1513 **opts):
1522 **opts):
1514 """Benchmark reading a series of revisions from a revlog.
1523 """Benchmark reading a series of revisions from a revlog.
1515
1524
1516 By default, we read every ``-d/--dist`` revision from 0 to tip of
1525 By default, we read every ``-d/--dist`` revision from 0 to tip of
1517 the specified revlog.
1526 the specified revlog.
1518
1527
1519 The start revision can be defined via ``-s/--startrev``.
1528 The start revision can be defined via ``-s/--startrev``.
1520 """
1529 """
1521 opts = _byteskwargs(opts)
1530 opts = _byteskwargs(opts)
1522
1531
1523 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1532 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1524 rllen = getlen(ui)(rl)
1533 rllen = getlen(ui)(rl)
1525
1534
1526 if startrev < 0:
1535 if startrev < 0:
1527 startrev = rllen + startrev
1536 startrev = rllen + startrev
1528
1537
1529 def d():
1538 def d():
1530 rl.clearcaches()
1539 rl.clearcaches()
1531
1540
1532 beginrev = startrev
1541 beginrev = startrev
1533 endrev = rllen
1542 endrev = rllen
1534 dist = opts[b'dist']
1543 dist = opts[b'dist']
1535
1544
1536 if reverse:
1545 if reverse:
1537 beginrev, endrev = endrev, beginrev
1546 beginrev, endrev = endrev, beginrev
1538 dist = -1 * dist
1547 dist = -1 * dist
1539
1548
1540 for x in _xrange(beginrev, endrev, dist):
1549 for x in _xrange(beginrev, endrev, dist):
1541 # Old revisions don't support passing int.
1550 # Old revisions don't support passing int.
1542 n = rl.node(x)
1551 n = rl.node(x)
1543 rl.revision(n)
1552 rl.revision(n)
1544
1553
1545 timer, fm = gettimer(ui, opts)
1554 timer, fm = gettimer(ui, opts)
1546 timer(d)
1555 timer(d)
1547 fm.end()
1556 fm.end()
1548
1557
1549 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1558 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1550 [(b'e', b'engines', b'', b'compression engines to use'),
1559 [(b'e', b'engines', b'', b'compression engines to use'),
1551 (b's', b'startrev', 0, b'revision to start at')],
1560 (b's', b'startrev', 0, b'revision to start at')],
1552 b'-c|-m|FILE')
1561 b'-c|-m|FILE')
1553 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1562 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1554 """Benchmark operations on revlog chunks.
1563 """Benchmark operations on revlog chunks.
1555
1564
1556 Logically, each revlog is a collection of fulltext revisions. However,
1565 Logically, each revlog is a collection of fulltext revisions. However,
1557 stored within each revlog are "chunks" of possibly compressed data. This
1566 stored within each revlog are "chunks" of possibly compressed data. This
1558 data needs to be read and decompressed or compressed and written.
1567 data needs to be read and decompressed or compressed and written.
1559
1568
1560 This command measures the time it takes to read+decompress and recompress
1569 This command measures the time it takes to read+decompress and recompress
1561 chunks in a revlog. It effectively isolates I/O and compression performance.
1570 chunks in a revlog. It effectively isolates I/O and compression performance.
1562 For measurements of higher-level operations like resolving revisions,
1571 For measurements of higher-level operations like resolving revisions,
1563 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1572 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1564 """
1573 """
1565 opts = _byteskwargs(opts)
1574 opts = _byteskwargs(opts)
1566
1575
1567 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1576 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1568
1577
1569 # _chunkraw was renamed to _getsegmentforrevs.
1578 # _chunkraw was renamed to _getsegmentforrevs.
1570 try:
1579 try:
1571 segmentforrevs = rl._getsegmentforrevs
1580 segmentforrevs = rl._getsegmentforrevs
1572 except AttributeError:
1581 except AttributeError:
1573 segmentforrevs = rl._chunkraw
1582 segmentforrevs = rl._chunkraw
1574
1583
1575 # Verify engines argument.
1584 # Verify engines argument.
1576 if engines:
1585 if engines:
1577 engines = set(e.strip() for e in engines.split(b','))
1586 engines = set(e.strip() for e in engines.split(b','))
1578 for engine in engines:
1587 for engine in engines:
1579 try:
1588 try:
1580 util.compressionengines[engine]
1589 util.compressionengines[engine]
1581 except KeyError:
1590 except KeyError:
1582 raise error.Abort(b'unknown compression engine: %s' % engine)
1591 raise error.Abort(b'unknown compression engine: %s' % engine)
1583 else:
1592 else:
1584 engines = []
1593 engines = []
1585 for e in util.compengines:
1594 for e in util.compengines:
1586 engine = util.compengines[e]
1595 engine = util.compengines[e]
1587 try:
1596 try:
1588 if engine.available():
1597 if engine.available():
1589 engine.revlogcompressor().compress(b'dummy')
1598 engine.revlogcompressor().compress(b'dummy')
1590 engines.append(e)
1599 engines.append(e)
1591 except NotImplementedError:
1600 except NotImplementedError:
1592 pass
1601 pass
1593
1602
1594 revs = list(rl.revs(startrev, len(rl) - 1))
1603 revs = list(rl.revs(startrev, len(rl) - 1))
1595
1604
1596 def rlfh(rl):
1605 def rlfh(rl):
1597 if rl._inline:
1606 if rl._inline:
1598 return getsvfs(repo)(rl.indexfile)
1607 return getsvfs(repo)(rl.indexfile)
1599 else:
1608 else:
1600 return getsvfs(repo)(rl.datafile)
1609 return getsvfs(repo)(rl.datafile)
1601
1610
1602 def doread():
1611 def doread():
1603 rl.clearcaches()
1612 rl.clearcaches()
1604 for rev in revs:
1613 for rev in revs:
1605 segmentforrevs(rev, rev)
1614 segmentforrevs(rev, rev)
1606
1615
1607 def doreadcachedfh():
1616 def doreadcachedfh():
1608 rl.clearcaches()
1617 rl.clearcaches()
1609 fh = rlfh(rl)
1618 fh = rlfh(rl)
1610 for rev in revs:
1619 for rev in revs:
1611 segmentforrevs(rev, rev, df=fh)
1620 segmentforrevs(rev, rev, df=fh)
1612
1621
1613 def doreadbatch():
1622 def doreadbatch():
1614 rl.clearcaches()
1623 rl.clearcaches()
1615 segmentforrevs(revs[0], revs[-1])
1624 segmentforrevs(revs[0], revs[-1])
1616
1625
1617 def doreadbatchcachedfh():
1626 def doreadbatchcachedfh():
1618 rl.clearcaches()
1627 rl.clearcaches()
1619 fh = rlfh(rl)
1628 fh = rlfh(rl)
1620 segmentforrevs(revs[0], revs[-1], df=fh)
1629 segmentforrevs(revs[0], revs[-1], df=fh)
1621
1630
1622 def dochunk():
1631 def dochunk():
1623 rl.clearcaches()
1632 rl.clearcaches()
1624 fh = rlfh(rl)
1633 fh = rlfh(rl)
1625 for rev in revs:
1634 for rev in revs:
1626 rl._chunk(rev, df=fh)
1635 rl._chunk(rev, df=fh)
1627
1636
1628 chunks = [None]
1637 chunks = [None]
1629
1638
1630 def dochunkbatch():
1639 def dochunkbatch():
1631 rl.clearcaches()
1640 rl.clearcaches()
1632 fh = rlfh(rl)
1641 fh = rlfh(rl)
1633 # Save chunks as a side-effect.
1642 # Save chunks as a side-effect.
1634 chunks[0] = rl._chunks(revs, df=fh)
1643 chunks[0] = rl._chunks(revs, df=fh)
1635
1644
1636 def docompress(compressor):
1645 def docompress(compressor):
1637 rl.clearcaches()
1646 rl.clearcaches()
1638
1647
1639 try:
1648 try:
1640 # Swap in the requested compression engine.
1649 # Swap in the requested compression engine.
1641 oldcompressor = rl._compressor
1650 oldcompressor = rl._compressor
1642 rl._compressor = compressor
1651 rl._compressor = compressor
1643 for chunk in chunks[0]:
1652 for chunk in chunks[0]:
1644 rl.compress(chunk)
1653 rl.compress(chunk)
1645 finally:
1654 finally:
1646 rl._compressor = oldcompressor
1655 rl._compressor = oldcompressor
1647
1656
1648 benches = [
1657 benches = [
1649 (lambda: doread(), b'read'),
1658 (lambda: doread(), b'read'),
1650 (lambda: doreadcachedfh(), b'read w/ reused fd'),
1659 (lambda: doreadcachedfh(), b'read w/ reused fd'),
1651 (lambda: doreadbatch(), b'read batch'),
1660 (lambda: doreadbatch(), b'read batch'),
1652 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
1661 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
1653 (lambda: dochunk(), b'chunk'),
1662 (lambda: dochunk(), b'chunk'),
1654 (lambda: dochunkbatch(), b'chunk batch'),
1663 (lambda: dochunkbatch(), b'chunk batch'),
1655 ]
1664 ]
1656
1665
1657 for engine in sorted(engines):
1666 for engine in sorted(engines):
1658 compressor = util.compengines[engine].revlogcompressor()
1667 compressor = util.compengines[engine].revlogcompressor()
1659 benches.append((functools.partial(docompress, compressor),
1668 benches.append((functools.partial(docompress, compressor),
1660 b'compress w/ %s' % engine))
1669 b'compress w/ %s' % engine))
1661
1670
1662 for fn, title in benches:
1671 for fn, title in benches:
1663 timer, fm = gettimer(ui, opts)
1672 timer, fm = gettimer(ui, opts)
1664 timer(fn, title=title)
1673 timer(fn, title=title)
1665 fm.end()
1674 fm.end()
1666
1675
1667 @command(b'perfrevlogrevision', revlogopts + formatteropts +
1676 @command(b'perfrevlogrevision', revlogopts + formatteropts +
1668 [(b'', b'cache', False, b'use caches instead of clearing')],
1677 [(b'', b'cache', False, b'use caches instead of clearing')],
1669 b'-c|-m|FILE REV')
1678 b'-c|-m|FILE REV')
1670 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1679 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1671 """Benchmark obtaining a revlog revision.
1680 """Benchmark obtaining a revlog revision.
1672
1681
1673 Obtaining a revlog revision consists of roughly the following steps:
1682 Obtaining a revlog revision consists of roughly the following steps:
1674
1683
1675 1. Compute the delta chain
1684 1. Compute the delta chain
1676 2. Obtain the raw chunks for that delta chain
1685 2. Obtain the raw chunks for that delta chain
1677 3. Decompress each raw chunk
1686 3. Decompress each raw chunk
1678 4. Apply binary patches to obtain fulltext
1687 4. Apply binary patches to obtain fulltext
1679 5. Verify hash of fulltext
1688 5. Verify hash of fulltext
1680
1689
1681 This command measures the time spent in each of these phases.
1690 This command measures the time spent in each of these phases.
1682 """
1691 """
1683 opts = _byteskwargs(opts)
1692 opts = _byteskwargs(opts)
1684
1693
1685 if opts.get(b'changelog') or opts.get(b'manifest'):
1694 if opts.get(b'changelog') or opts.get(b'manifest'):
1686 file_, rev = None, file_
1695 file_, rev = None, file_
1687 elif rev is None:
1696 elif rev is None:
1688 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
1697 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
1689
1698
1690 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
1699 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
1691
1700
1692 # _chunkraw was renamed to _getsegmentforrevs.
1701 # _chunkraw was renamed to _getsegmentforrevs.
1693 try:
1702 try:
1694 segmentforrevs = r._getsegmentforrevs
1703 segmentforrevs = r._getsegmentforrevs
1695 except AttributeError:
1704 except AttributeError:
1696 segmentforrevs = r._chunkraw
1705 segmentforrevs = r._chunkraw
1697
1706
1698 node = r.lookup(rev)
1707 node = r.lookup(rev)
1699 rev = r.rev(node)
1708 rev = r.rev(node)
1700
1709
1701 def getrawchunks(data, chain):
1710 def getrawchunks(data, chain):
1702 start = r.start
1711 start = r.start
1703 length = r.length
1712 length = r.length
1704 inline = r._inline
1713 inline = r._inline
1705 iosize = r._io.size
1714 iosize = r._io.size
1706 buffer = util.buffer
1715 buffer = util.buffer
1707 offset = start(chain[0])
1716 offset = start(chain[0])
1708
1717
1709 chunks = []
1718 chunks = []
1710 ladd = chunks.append
1719 ladd = chunks.append
1711
1720
1712 for rev in chain:
1721 for rev in chain:
1713 chunkstart = start(rev)
1722 chunkstart = start(rev)
1714 if inline:
1723 if inline:
1715 chunkstart += (rev + 1) * iosize
1724 chunkstart += (rev + 1) * iosize
1716 chunklength = length(rev)
1725 chunklength = length(rev)
1717 ladd(buffer(data, chunkstart - offset, chunklength))
1726 ladd(buffer(data, chunkstart - offset, chunklength))
1718
1727
1719 return chunks
1728 return chunks
1720
1729
1721 def dodeltachain(rev):
1730 def dodeltachain(rev):
1722 if not cache:
1731 if not cache:
1723 r.clearcaches()
1732 r.clearcaches()
1724 r._deltachain(rev)
1733 r._deltachain(rev)
1725
1734
1726 def doread(chain):
1735 def doread(chain):
1727 if not cache:
1736 if not cache:
1728 r.clearcaches()
1737 r.clearcaches()
1729 segmentforrevs(chain[0], chain[-1])
1738 segmentforrevs(chain[0], chain[-1])
1730
1739
1731 def dorawchunks(data, chain):
1740 def dorawchunks(data, chain):
1732 if not cache:
1741 if not cache:
1733 r.clearcaches()
1742 r.clearcaches()
1734 getrawchunks(data, chain)
1743 getrawchunks(data, chain)
1735
1744
1736 def dodecompress(chunks):
1745 def dodecompress(chunks):
1737 decomp = r.decompress
1746 decomp = r.decompress
1738 for chunk in chunks:
1747 for chunk in chunks:
1739 decomp(chunk)
1748 decomp(chunk)
1740
1749
1741 def dopatch(text, bins):
1750 def dopatch(text, bins):
1742 if not cache:
1751 if not cache:
1743 r.clearcaches()
1752 r.clearcaches()
1744 mdiff.patches(text, bins)
1753 mdiff.patches(text, bins)
1745
1754
1746 def dohash(text):
1755 def dohash(text):
1747 if not cache:
1756 if not cache:
1748 r.clearcaches()
1757 r.clearcaches()
1749 r.checkhash(text, node, rev=rev)
1758 r.checkhash(text, node, rev=rev)
1750
1759
1751 def dorevision():
1760 def dorevision():
1752 if not cache:
1761 if not cache:
1753 r.clearcaches()
1762 r.clearcaches()
1754 r.revision(node)
1763 r.revision(node)
1755
1764
1756 chain = r._deltachain(rev)[0]
1765 chain = r._deltachain(rev)[0]
1757 data = segmentforrevs(chain[0], chain[-1])[1]
1766 data = segmentforrevs(chain[0], chain[-1])[1]
1758 rawchunks = getrawchunks(data, chain)
1767 rawchunks = getrawchunks(data, chain)
1759 bins = r._chunks(chain)
1768 bins = r._chunks(chain)
1760 text = str(bins[0])
1769 text = str(bins[0])
1761 bins = bins[1:]
1770 bins = bins[1:]
1762 text = mdiff.patches(text, bins)
1771 text = mdiff.patches(text, bins)
1763
1772
1764 benches = [
1773 benches = [
1765 (lambda: dorevision(), b'full'),
1774 (lambda: dorevision(), b'full'),
1766 (lambda: dodeltachain(rev), b'deltachain'),
1775 (lambda: dodeltachain(rev), b'deltachain'),
1767 (lambda: doread(chain), b'read'),
1776 (lambda: doread(chain), b'read'),
1768 (lambda: dorawchunks(data, chain), b'rawchunks'),
1777 (lambda: dorawchunks(data, chain), b'rawchunks'),
1769 (lambda: dodecompress(rawchunks), b'decompress'),
1778 (lambda: dodecompress(rawchunks), b'decompress'),
1770 (lambda: dopatch(text, bins), b'patch'),
1779 (lambda: dopatch(text, bins), b'patch'),
1771 (lambda: dohash(text), b'hash'),
1780 (lambda: dohash(text), b'hash'),
1772 ]
1781 ]
1773
1782
1774 for fn, title in benches:
1783 for fn, title in benches:
1775 timer, fm = gettimer(ui, opts)
1784 timer, fm = gettimer(ui, opts)
1776 timer(fn, title=title)
1785 timer(fn, title=title)
1777 fm.end()
1786 fm.end()
1778
1787
1779 @command(b'perfrevset',
1788 @command(b'perfrevset',
1780 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
1789 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
1781 (b'', b'contexts', False, b'obtain changectx for each revision')]
1790 (b'', b'contexts', False, b'obtain changectx for each revision')]
1782 + formatteropts, b"REVSET")
1791 + formatteropts, b"REVSET")
1783 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
1792 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
1784 """benchmark the execution time of a revset
1793 """benchmark the execution time of a revset
1785
1794
1786 Use the --clean option if need to evaluate the impact of build volatile
1795 Use the --clean option if need to evaluate the impact of build volatile
1787 revisions set cache on the revset execution. Volatile cache hold filtered
1796 revisions set cache on the revset execution. Volatile cache hold filtered
1788 and obsolete related cache."""
1797 and obsolete related cache."""
1789 opts = _byteskwargs(opts)
1798 opts = _byteskwargs(opts)
1790
1799
1791 timer, fm = gettimer(ui, opts)
1800 timer, fm = gettimer(ui, opts)
1792 def d():
1801 def d():
1793 if clear:
1802 if clear:
1794 repo.invalidatevolatilesets()
1803 repo.invalidatevolatilesets()
1795 if contexts:
1804 if contexts:
1796 for ctx in repo.set(expr): pass
1805 for ctx in repo.set(expr): pass
1797 else:
1806 else:
1798 for r in repo.revs(expr): pass
1807 for r in repo.revs(expr): pass
1799 timer(d)
1808 timer(d)
1800 fm.end()
1809 fm.end()
1801
1810
1802 @command(b'perfvolatilesets',
1811 @command(b'perfvolatilesets',
1803 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
1812 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
1804 ] + formatteropts)
1813 ] + formatteropts)
1805 def perfvolatilesets(ui, repo, *names, **opts):
1814 def perfvolatilesets(ui, repo, *names, **opts):
1806 """benchmark the computation of various volatile set
1815 """benchmark the computation of various volatile set
1807
1816
1808 Volatile set computes element related to filtering and obsolescence."""
1817 Volatile set computes element related to filtering and obsolescence."""
1809 opts = _byteskwargs(opts)
1818 opts = _byteskwargs(opts)
1810 timer, fm = gettimer(ui, opts)
1819 timer, fm = gettimer(ui, opts)
1811 repo = repo.unfiltered()
1820 repo = repo.unfiltered()
1812
1821
1813 def getobs(name):
1822 def getobs(name):
1814 def d():
1823 def d():
1815 repo.invalidatevolatilesets()
1824 repo.invalidatevolatilesets()
1816 if opts[b'clear_obsstore']:
1825 if opts[b'clear_obsstore']:
1817 clearfilecache(repo, b'obsstore')
1826 clearfilecache(repo, b'obsstore')
1818 obsolete.getrevs(repo, name)
1827 obsolete.getrevs(repo, name)
1819 return d
1828 return d
1820
1829
1821 allobs = sorted(obsolete.cachefuncs)
1830 allobs = sorted(obsolete.cachefuncs)
1822 if names:
1831 if names:
1823 allobs = [n for n in allobs if n in names]
1832 allobs = [n for n in allobs if n in names]
1824
1833
1825 for name in allobs:
1834 for name in allobs:
1826 timer(getobs(name), title=name)
1835 timer(getobs(name), title=name)
1827
1836
1828 def getfiltered(name):
1837 def getfiltered(name):
1829 def d():
1838 def d():
1830 repo.invalidatevolatilesets()
1839 repo.invalidatevolatilesets()
1831 if opts[b'clear_obsstore']:
1840 if opts[b'clear_obsstore']:
1832 clearfilecache(repo, b'obsstore')
1841 clearfilecache(repo, b'obsstore')
1833 repoview.filterrevs(repo, name)
1842 repoview.filterrevs(repo, name)
1834 return d
1843 return d
1835
1844
1836 allfilter = sorted(repoview.filtertable)
1845 allfilter = sorted(repoview.filtertable)
1837 if names:
1846 if names:
1838 allfilter = [n for n in allfilter if n in names]
1847 allfilter = [n for n in allfilter if n in names]
1839
1848
1840 for name in allfilter:
1849 for name in allfilter:
1841 timer(getfiltered(name), title=name)
1850 timer(getfiltered(name), title=name)
1842 fm.end()
1851 fm.end()
1843
1852
1844 @command(b'perfbranchmap',
1853 @command(b'perfbranchmap',
1845 [(b'f', b'full', False,
1854 [(b'f', b'full', False,
1846 b'Includes build time of subset'),
1855 b'Includes build time of subset'),
1847 (b'', b'clear-revbranch', False,
1856 (b'', b'clear-revbranch', False,
1848 b'purge the revbranch cache between computation'),
1857 b'purge the revbranch cache between computation'),
1849 ] + formatteropts)
1858 ] + formatteropts)
1850 def perfbranchmap(ui, repo, *filternames, **opts):
1859 def perfbranchmap(ui, repo, *filternames, **opts):
1851 """benchmark the update of a branchmap
1860 """benchmark the update of a branchmap
1852
1861
1853 This benchmarks the full repo.branchmap() call with read and write disabled
1862 This benchmarks the full repo.branchmap() call with read and write disabled
1854 """
1863 """
1855 opts = _byteskwargs(opts)
1864 opts = _byteskwargs(opts)
1856 full = opts.get(b"full", False)
1865 full = opts.get(b"full", False)
1857 clear_revbranch = opts.get(b"clear_revbranch", False)
1866 clear_revbranch = opts.get(b"clear_revbranch", False)
1858 timer, fm = gettimer(ui, opts)
1867 timer, fm = gettimer(ui, opts)
1859 def getbranchmap(filtername):
1868 def getbranchmap(filtername):
1860 """generate a benchmark function for the filtername"""
1869 """generate a benchmark function for the filtername"""
1861 if filtername is None:
1870 if filtername is None:
1862 view = repo
1871 view = repo
1863 else:
1872 else:
1864 view = repo.filtered(filtername)
1873 view = repo.filtered(filtername)
1865 def d():
1874 def d():
1866 if clear_revbranch:
1875 if clear_revbranch:
1867 repo.revbranchcache()._clear()
1876 repo.revbranchcache()._clear()
1868 if full:
1877 if full:
1869 view._branchcaches.clear()
1878 view._branchcaches.clear()
1870 else:
1879 else:
1871 view._branchcaches.pop(filtername, None)
1880 view._branchcaches.pop(filtername, None)
1872 view.branchmap()
1881 view.branchmap()
1873 return d
1882 return d
1874 # add filter in smaller subset to bigger subset
1883 # add filter in smaller subset to bigger subset
1875 possiblefilters = set(repoview.filtertable)
1884 possiblefilters = set(repoview.filtertable)
1876 if filternames:
1885 if filternames:
1877 possiblefilters &= set(filternames)
1886 possiblefilters &= set(filternames)
1878 subsettable = getbranchmapsubsettable()
1887 subsettable = getbranchmapsubsettable()
1879 allfilters = []
1888 allfilters = []
1880 while possiblefilters:
1889 while possiblefilters:
1881 for name in possiblefilters:
1890 for name in possiblefilters:
1882 subset = subsettable.get(name)
1891 subset = subsettable.get(name)
1883 if subset not in possiblefilters:
1892 if subset not in possiblefilters:
1884 break
1893 break
1885 else:
1894 else:
1886 assert False, b'subset cycle %s!' % possiblefilters
1895 assert False, b'subset cycle %s!' % possiblefilters
1887 allfilters.append(name)
1896 allfilters.append(name)
1888 possiblefilters.remove(name)
1897 possiblefilters.remove(name)
1889
1898
1890 # warm the cache
1899 # warm the cache
1891 if not full:
1900 if not full:
1892 for name in allfilters:
1901 for name in allfilters:
1893 repo.filtered(name).branchmap()
1902 repo.filtered(name).branchmap()
1894 if not filternames or b'unfiltered' in filternames:
1903 if not filternames or b'unfiltered' in filternames:
1895 # add unfiltered
1904 # add unfiltered
1896 allfilters.append(None)
1905 allfilters.append(None)
1897
1906
1898 branchcacheread = safeattrsetter(branchmap, b'read')
1907 branchcacheread = safeattrsetter(branchmap, b'read')
1899 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
1908 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
1900 branchcacheread.set(lambda repo: None)
1909 branchcacheread.set(lambda repo: None)
1901 branchcachewrite.set(lambda bc, repo: None)
1910 branchcachewrite.set(lambda bc, repo: None)
1902 try:
1911 try:
1903 for name in allfilters:
1912 for name in allfilters:
1904 printname = name
1913 printname = name
1905 if name is None:
1914 if name is None:
1906 printname = b'unfiltered'
1915 printname = b'unfiltered'
1907 timer(getbranchmap(name), title=str(printname))
1916 timer(getbranchmap(name), title=str(printname))
1908 finally:
1917 finally:
1909 branchcacheread.restore()
1918 branchcacheread.restore()
1910 branchcachewrite.restore()
1919 branchcachewrite.restore()
1911 fm.end()
1920 fm.end()
1912
1921
1913 @command(b'perfbranchmapload', [
1922 @command(b'perfbranchmapload', [
1914 (b'f', b'filter', b'', b'Specify repoview filter'),
1923 (b'f', b'filter', b'', b'Specify repoview filter'),
1915 (b'', b'list', False, b'List brachmap filter caches'),
1924 (b'', b'list', False, b'List brachmap filter caches'),
1916 ] + formatteropts)
1925 ] + formatteropts)
1917 def perfbranchmapread(ui, repo, filter=b'', list=False, **opts):
1926 def perfbranchmapread(ui, repo, filter=b'', list=False, **opts):
1918 """benchmark reading the branchmap"""
1927 """benchmark reading the branchmap"""
1919 opts = _byteskwargs(opts)
1928 opts = _byteskwargs(opts)
1920
1929
1921 if list:
1930 if list:
1922 for name, kind, st in repo.cachevfs.readdir(stat=True):
1931 for name, kind, st in repo.cachevfs.readdir(stat=True):
1923 if name.startswith(b'branch2'):
1932 if name.startswith(b'branch2'):
1924 filtername = name.partition(b'-')[2] or b'unfiltered'
1933 filtername = name.partition(b'-')[2] or b'unfiltered'
1925 ui.status(b'%s - %s\n'
1934 ui.status(b'%s - %s\n'
1926 % (filtername, util.bytecount(st.st_size)))
1935 % (filtername, util.bytecount(st.st_size)))
1927 return
1936 return
1928 if filter:
1937 if filter:
1929 repo = repoview.repoview(repo, filter)
1938 repo = repoview.repoview(repo, filter)
1930 else:
1939 else:
1931 repo = repo.unfiltered()
1940 repo = repo.unfiltered()
1932 # try once without timer, the filter may not be cached
1941 # try once without timer, the filter may not be cached
1933 if branchmap.read(repo) is None:
1942 if branchmap.read(repo) is None:
1934 raise error.Abort(b'No brachmap cached for %s repo'
1943 raise error.Abort(b'No brachmap cached for %s repo'
1935 % (filter or b'unfiltered'))
1944 % (filter or b'unfiltered'))
1936 timer, fm = gettimer(ui, opts)
1945 timer, fm = gettimer(ui, opts)
1937 timer(lambda: branchmap.read(repo) and None)
1946 timer(lambda: branchmap.read(repo) and None)
1938 fm.end()
1947 fm.end()
1939
1948
1940 @command(b'perfloadmarkers')
1949 @command(b'perfloadmarkers')
1941 def perfloadmarkers(ui, repo):
1950 def perfloadmarkers(ui, repo):
1942 """benchmark the time to parse the on-disk markers for a repo
1951 """benchmark the time to parse the on-disk markers for a repo
1943
1952
1944 Result is the number of markers in the repo."""
1953 Result is the number of markers in the repo."""
1945 timer, fm = gettimer(ui)
1954 timer, fm = gettimer(ui)
1946 svfs = getsvfs(repo)
1955 svfs = getsvfs(repo)
1947 timer(lambda: len(obsolete.obsstore(svfs)))
1956 timer(lambda: len(obsolete.obsstore(svfs)))
1948 fm.end()
1957 fm.end()
1949
1958
1950 @command(b'perflrucachedict', formatteropts +
1959 @command(b'perflrucachedict', formatteropts +
1951 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
1960 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
1952 (b'', b'mincost', 0, b'smallest cost of items in cache'),
1961 (b'', b'mincost', 0, b'smallest cost of items in cache'),
1953 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
1962 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
1954 (b'', b'size', 4, b'size of cache'),
1963 (b'', b'size', 4, b'size of cache'),
1955 (b'', b'gets', 10000, b'number of key lookups'),
1964 (b'', b'gets', 10000, b'number of key lookups'),
1956 (b'', b'sets', 10000, b'number of key sets'),
1965 (b'', b'sets', 10000, b'number of key sets'),
1957 (b'', b'mixed', 10000, b'number of mixed mode operations'),
1966 (b'', b'mixed', 10000, b'number of mixed mode operations'),
1958 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
1967 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
1959 norepo=True)
1968 norepo=True)
1960 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
1969 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
1961 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
1970 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
1962 opts = _byteskwargs(opts)
1971 opts = _byteskwargs(opts)
1963
1972
1964 def doinit():
1973 def doinit():
1965 for i in _xrange(10000):
1974 for i in _xrange(10000):
1966 util.lrucachedict(size)
1975 util.lrucachedict(size)
1967
1976
1968 costrange = list(range(mincost, maxcost + 1))
1977 costrange = list(range(mincost, maxcost + 1))
1969
1978
1970 values = []
1979 values = []
1971 for i in _xrange(size):
1980 for i in _xrange(size):
1972 values.append(random.randint(0, _maxint))
1981 values.append(random.randint(0, _maxint))
1973
1982
1974 # Get mode fills the cache and tests raw lookup performance with no
1983 # Get mode fills the cache and tests raw lookup performance with no
1975 # eviction.
1984 # eviction.
1976 getseq = []
1985 getseq = []
1977 for i in _xrange(gets):
1986 for i in _xrange(gets):
1978 getseq.append(random.choice(values))
1987 getseq.append(random.choice(values))
1979
1988
1980 def dogets():
1989 def dogets():
1981 d = util.lrucachedict(size)
1990 d = util.lrucachedict(size)
1982 for v in values:
1991 for v in values:
1983 d[v] = v
1992 d[v] = v
1984 for key in getseq:
1993 for key in getseq:
1985 value = d[key]
1994 value = d[key]
1986 value # silence pyflakes warning
1995 value # silence pyflakes warning
1987
1996
1988 def dogetscost():
1997 def dogetscost():
1989 d = util.lrucachedict(size, maxcost=costlimit)
1998 d = util.lrucachedict(size, maxcost=costlimit)
1990 for i, v in enumerate(values):
1999 for i, v in enumerate(values):
1991 d.insert(v, v, cost=costs[i])
2000 d.insert(v, v, cost=costs[i])
1992 for key in getseq:
2001 for key in getseq:
1993 try:
2002 try:
1994 value = d[key]
2003 value = d[key]
1995 value # silence pyflakes warning
2004 value # silence pyflakes warning
1996 except KeyError:
2005 except KeyError:
1997 pass
2006 pass
1998
2007
1999 # Set mode tests insertion speed with cache eviction.
2008 # Set mode tests insertion speed with cache eviction.
2000 setseq = []
2009 setseq = []
2001 costs = []
2010 costs = []
2002 for i in _xrange(sets):
2011 for i in _xrange(sets):
2003 setseq.append(random.randint(0, _maxint))
2012 setseq.append(random.randint(0, _maxint))
2004 costs.append(random.choice(costrange))
2013 costs.append(random.choice(costrange))
2005
2014
2006 def doinserts():
2015 def doinserts():
2007 d = util.lrucachedict(size)
2016 d = util.lrucachedict(size)
2008 for v in setseq:
2017 for v in setseq:
2009 d.insert(v, v)
2018 d.insert(v, v)
2010
2019
2011 def doinsertscost():
2020 def doinsertscost():
2012 d = util.lrucachedict(size, maxcost=costlimit)
2021 d = util.lrucachedict(size, maxcost=costlimit)
2013 for i, v in enumerate(setseq):
2022 for i, v in enumerate(setseq):
2014 d.insert(v, v, cost=costs[i])
2023 d.insert(v, v, cost=costs[i])
2015
2024
2016 def dosets():
2025 def dosets():
2017 d = util.lrucachedict(size)
2026 d = util.lrucachedict(size)
2018 for v in setseq:
2027 for v in setseq:
2019 d[v] = v
2028 d[v] = v
2020
2029
2021 # Mixed mode randomly performs gets and sets with eviction.
2030 # Mixed mode randomly performs gets and sets with eviction.
2022 mixedops = []
2031 mixedops = []
2023 for i in _xrange(mixed):
2032 for i in _xrange(mixed):
2024 r = random.randint(0, 100)
2033 r = random.randint(0, 100)
2025 if r < mixedgetfreq:
2034 if r < mixedgetfreq:
2026 op = 0
2035 op = 0
2027 else:
2036 else:
2028 op = 1
2037 op = 1
2029
2038
2030 mixedops.append((op,
2039 mixedops.append((op,
2031 random.randint(0, size * 2),
2040 random.randint(0, size * 2),
2032 random.choice(costrange)))
2041 random.choice(costrange)))
2033
2042
2034 def domixed():
2043 def domixed():
2035 d = util.lrucachedict(size)
2044 d = util.lrucachedict(size)
2036
2045
2037 for op, v, cost in mixedops:
2046 for op, v, cost in mixedops:
2038 if op == 0:
2047 if op == 0:
2039 try:
2048 try:
2040 d[v]
2049 d[v]
2041 except KeyError:
2050 except KeyError:
2042 pass
2051 pass
2043 else:
2052 else:
2044 d[v] = v
2053 d[v] = v
2045
2054
2046 def domixedcost():
2055 def domixedcost():
2047 d = util.lrucachedict(size, maxcost=costlimit)
2056 d = util.lrucachedict(size, maxcost=costlimit)
2048
2057
2049 for op, v, cost in mixedops:
2058 for op, v, cost in mixedops:
2050 if op == 0:
2059 if op == 0:
2051 try:
2060 try:
2052 d[v]
2061 d[v]
2053 except KeyError:
2062 except KeyError:
2054 pass
2063 pass
2055 else:
2064 else:
2056 d.insert(v, v, cost=cost)
2065 d.insert(v, v, cost=cost)
2057
2066
2058 benches = [
2067 benches = [
2059 (doinit, b'init'),
2068 (doinit, b'init'),
2060 ]
2069 ]
2061
2070
2062 if costlimit:
2071 if costlimit:
2063 benches.extend([
2072 benches.extend([
2064 (dogetscost, b'gets w/ cost limit'),
2073 (dogetscost, b'gets w/ cost limit'),
2065 (doinsertscost, b'inserts w/ cost limit'),
2074 (doinsertscost, b'inserts w/ cost limit'),
2066 (domixedcost, b'mixed w/ cost limit'),
2075 (domixedcost, b'mixed w/ cost limit'),
2067 ])
2076 ])
2068 else:
2077 else:
2069 benches.extend([
2078 benches.extend([
2070 (dogets, b'gets'),
2079 (dogets, b'gets'),
2071 (doinserts, b'inserts'),
2080 (doinserts, b'inserts'),
2072 (dosets, b'sets'),
2081 (dosets, b'sets'),
2073 (domixed, b'mixed')
2082 (domixed, b'mixed')
2074 ])
2083 ])
2075
2084
2076 for fn, title in benches:
2085 for fn, title in benches:
2077 timer, fm = gettimer(ui, opts)
2086 timer, fm = gettimer(ui, opts)
2078 timer(fn, title=title)
2087 timer(fn, title=title)
2079 fm.end()
2088 fm.end()
2080
2089
2081 @command(b'perfwrite', formatteropts)
2090 @command(b'perfwrite', formatteropts)
2082 def perfwrite(ui, repo, **opts):
2091 def perfwrite(ui, repo, **opts):
2083 """microbenchmark ui.write
2092 """microbenchmark ui.write
2084 """
2093 """
2085 opts = _byteskwargs(opts)
2094 opts = _byteskwargs(opts)
2086
2095
2087 timer, fm = gettimer(ui, opts)
2096 timer, fm = gettimer(ui, opts)
2088 def write():
2097 def write():
2089 for i in range(100000):
2098 for i in range(100000):
2090 ui.write((b'Testing write performance\n'))
2099 ui.write((b'Testing write performance\n'))
2091 timer(write)
2100 timer(write)
2092 fm.end()
2101 fm.end()
2093
2102
2094 def uisetup(ui):
2103 def uisetup(ui):
2095 if (util.safehasattr(cmdutil, b'openrevlog') and
2104 if (util.safehasattr(cmdutil, b'openrevlog') and
2096 not util.safehasattr(commands, b'debugrevlogopts')):
2105 not util.safehasattr(commands, b'debugrevlogopts')):
2097 # for "historical portability":
2106 # for "historical portability":
2098 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2107 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2099 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2108 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2100 # openrevlog() should cause failure, because it has been
2109 # openrevlog() should cause failure, because it has been
2101 # available since 3.5 (or 49c583ca48c4).
2110 # available since 3.5 (or 49c583ca48c4).
2102 def openrevlog(orig, repo, cmd, file_, opts):
2111 def openrevlog(orig, repo, cmd, file_, opts):
2103 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2112 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2104 raise error.Abort(b"This version doesn't support --dir option",
2113 raise error.Abort(b"This version doesn't support --dir option",
2105 hint=b"use 3.5 or later")
2114 hint=b"use 3.5 or later")
2106 return orig(repo, cmd, file_, opts)
2115 return orig(repo, cmd, file_, opts)
2107 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2116 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
General Comments 0
You need to be logged in to leave comments. Login now