##// END OF EJS Templates
repoview: move subsettable in a dedicated module...
marmoute -
r42309:890f450f default draft
parent child Browse files
Show More
@@ -0,0 +1,22 b''
1 # repoviewutil.py - constaints data relevant to repoview.py and other module
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
5 #
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
8
9 from __future__ import absolute_import
10
11 ### Nearest subset relation
12 # Nearest subset of filter X is a filter Y so that:
13 # * Y is included in X,
14 # * X - Y is as small as possible.
15 # This create and ordering used for branchmap purpose.
16 # the ordering may be partial
17 subsettable = {None: 'visible',
18 'visible-hidden': 'visible',
19 'visible': 'served',
20 'served.hidden': 'served',
21 'served': 'immutable',
22 'immutable': 'base'}
@@ -1,2858 +1,2863 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``run-limits``
18 ``run-limits``
19 Control the number of runs each benchmark will perform. The option value
19 Control the number of runs each benchmark will perform. The option value
20 should be a list of `<time>-<numberofrun>` pairs. After each run the
20 should be a list of `<time>-<numberofrun>` pairs. After each run the
21 conditions are considered in order with the following logic:
21 conditions are considered in order with the following logic:
22
22
23 If benchmark has been running for <time> seconds, and we have performed
23 If benchmark has been running for <time> seconds, and we have performed
24 <numberofrun> iterations, stop the benchmark,
24 <numberofrun> iterations, stop the benchmark,
25
25
26 The default value is: `3.0-100, 10.0-3`
26 The default value is: `3.0-100, 10.0-3`
27
27
28 ``stub``
28 ``stub``
29 When set, benchmarks will only be run once, useful for testing
29 When set, benchmarks will only be run once, useful for testing
30 (default: off)
30 (default: off)
31 '''
31 '''
32
32
33 # "historical portability" policy of perf.py:
33 # "historical portability" policy of perf.py:
34 #
34 #
35 # We have to do:
35 # We have to do:
36 # - make perf.py "loadable" with as wide Mercurial version as possible
36 # - make perf.py "loadable" with as wide Mercurial version as possible
37 # This doesn't mean that perf commands work correctly with that Mercurial.
37 # This doesn't mean that perf commands work correctly with that Mercurial.
38 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
38 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
39 # - make historical perf command work correctly with as wide Mercurial
39 # - make historical perf command work correctly with as wide Mercurial
40 # version as possible
40 # version as possible
41 #
41 #
42 # We have to do, if possible with reasonable cost:
42 # We have to do, if possible with reasonable cost:
43 # - make recent perf command for historical feature work correctly
43 # - make recent perf command for historical feature work correctly
44 # with early Mercurial
44 # with early Mercurial
45 #
45 #
46 # We don't have to do:
46 # We don't have to do:
47 # - make perf command for recent feature work correctly with early
47 # - make perf command for recent feature work correctly with early
48 # Mercurial
48 # Mercurial
49
49
50 from __future__ import absolute_import
50 from __future__ import absolute_import
51 import contextlib
51 import contextlib
52 import functools
52 import functools
53 import gc
53 import gc
54 import os
54 import os
55 import random
55 import random
56 import shutil
56 import shutil
57 import struct
57 import struct
58 import sys
58 import sys
59 import tempfile
59 import tempfile
60 import threading
60 import threading
61 import time
61 import time
62 from mercurial import (
62 from mercurial import (
63 changegroup,
63 changegroup,
64 cmdutil,
64 cmdutil,
65 commands,
65 commands,
66 copies,
66 copies,
67 error,
67 error,
68 extensions,
68 extensions,
69 hg,
69 hg,
70 mdiff,
70 mdiff,
71 merge,
71 merge,
72 revlog,
72 revlog,
73 util,
73 util,
74 )
74 )
75
75
76 # for "historical portability":
76 # for "historical portability":
77 # try to import modules separately (in dict order), and ignore
77 # try to import modules separately (in dict order), and ignore
78 # failure, because these aren't available with early Mercurial
78 # failure, because these aren't available with early Mercurial
79 try:
79 try:
80 from mercurial import branchmap # since 2.5 (or bcee63733aad)
80 from mercurial import branchmap # since 2.5 (or bcee63733aad)
81 except ImportError:
81 except ImportError:
82 pass
82 pass
83 try:
83 try:
84 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
84 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
85 except ImportError:
85 except ImportError:
86 pass
86 pass
87 try:
87 try:
88 from mercurial import registrar # since 3.7 (or 37d50250b696)
88 from mercurial import registrar # since 3.7 (or 37d50250b696)
89 dir(registrar) # forcibly load it
89 dir(registrar) # forcibly load it
90 except ImportError:
90 except ImportError:
91 registrar = None
91 registrar = None
92 try:
92 try:
93 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
93 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
94 except ImportError:
94 except ImportError:
95 pass
95 pass
96 try:
96 try:
97 from mercurial.utils import repoviewutil # since 5.0
98 except ImportError:
99 repoviewutil = None
100 try:
97 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
101 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
98 except ImportError:
102 except ImportError:
99 pass
103 pass
100 try:
104 try:
101 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
105 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
102 except ImportError:
106 except ImportError:
103 pass
107 pass
104
108
105
109
106 def identity(a):
110 def identity(a):
107 return a
111 return a
108
112
109 try:
113 try:
110 from mercurial import pycompat
114 from mercurial import pycompat
111 getargspec = pycompat.getargspec # added to module after 4.5
115 getargspec = pycompat.getargspec # added to module after 4.5
112 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
116 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
113 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
117 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
114 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
118 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
115 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
119 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
116 if pycompat.ispy3:
120 if pycompat.ispy3:
117 _maxint = sys.maxsize # per py3 docs for replacing maxint
121 _maxint = sys.maxsize # per py3 docs for replacing maxint
118 else:
122 else:
119 _maxint = sys.maxint
123 _maxint = sys.maxint
120 except (ImportError, AttributeError):
124 except (ImportError, AttributeError):
121 import inspect
125 import inspect
122 getargspec = inspect.getargspec
126 getargspec = inspect.getargspec
123 _byteskwargs = identity
127 _byteskwargs = identity
124 fsencode = identity # no py3 support
128 fsencode = identity # no py3 support
125 _maxint = sys.maxint # no py3 support
129 _maxint = sys.maxint # no py3 support
126 _sysstr = lambda x: x # no py3 support
130 _sysstr = lambda x: x # no py3 support
127 _xrange = xrange
131 _xrange = xrange
128
132
129 try:
133 try:
130 # 4.7+
134 # 4.7+
131 queue = pycompat.queue.Queue
135 queue = pycompat.queue.Queue
132 except (AttributeError, ImportError):
136 except (AttributeError, ImportError):
133 # <4.7.
137 # <4.7.
134 try:
138 try:
135 queue = pycompat.queue
139 queue = pycompat.queue
136 except (AttributeError, ImportError):
140 except (AttributeError, ImportError):
137 queue = util.queue
141 queue = util.queue
138
142
139 try:
143 try:
140 from mercurial import logcmdutil
144 from mercurial import logcmdutil
141 makelogtemplater = logcmdutil.maketemplater
145 makelogtemplater = logcmdutil.maketemplater
142 except (AttributeError, ImportError):
146 except (AttributeError, ImportError):
143 try:
147 try:
144 makelogtemplater = cmdutil.makelogtemplater
148 makelogtemplater = cmdutil.makelogtemplater
145 except (AttributeError, ImportError):
149 except (AttributeError, ImportError):
146 makelogtemplater = None
150 makelogtemplater = None
147
151
148 # for "historical portability":
152 # for "historical portability":
149 # define util.safehasattr forcibly, because util.safehasattr has been
153 # define util.safehasattr forcibly, because util.safehasattr has been
150 # available since 1.9.3 (or 94b200a11cf7)
154 # available since 1.9.3 (or 94b200a11cf7)
151 _undefined = object()
155 _undefined = object()
152 def safehasattr(thing, attr):
156 def safehasattr(thing, attr):
153 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
157 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
154 setattr(util, 'safehasattr', safehasattr)
158 setattr(util, 'safehasattr', safehasattr)
155
159
156 # for "historical portability":
160 # for "historical portability":
157 # define util.timer forcibly, because util.timer has been available
161 # define util.timer forcibly, because util.timer has been available
158 # since ae5d60bb70c9
162 # since ae5d60bb70c9
159 if safehasattr(time, 'perf_counter'):
163 if safehasattr(time, 'perf_counter'):
160 util.timer = time.perf_counter
164 util.timer = time.perf_counter
161 elif os.name == b'nt':
165 elif os.name == b'nt':
162 util.timer = time.clock
166 util.timer = time.clock
163 else:
167 else:
164 util.timer = time.time
168 util.timer = time.time
165
169
166 # for "historical portability":
170 # for "historical portability":
167 # use locally defined empty option list, if formatteropts isn't
171 # use locally defined empty option list, if formatteropts isn't
168 # available, because commands.formatteropts has been available since
172 # available, because commands.formatteropts has been available since
169 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
173 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
170 # available since 2.2 (or ae5f92e154d3)
174 # available since 2.2 (or ae5f92e154d3)
171 formatteropts = getattr(cmdutil, "formatteropts",
175 formatteropts = getattr(cmdutil, "formatteropts",
172 getattr(commands, "formatteropts", []))
176 getattr(commands, "formatteropts", []))
173
177
174 # for "historical portability":
178 # for "historical portability":
175 # use locally defined option list, if debugrevlogopts isn't available,
179 # use locally defined option list, if debugrevlogopts isn't available,
176 # because commands.debugrevlogopts has been available since 3.7 (or
180 # because commands.debugrevlogopts has been available since 3.7 (or
177 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
181 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
178 # since 1.9 (or a79fea6b3e77).
182 # since 1.9 (or a79fea6b3e77).
179 revlogopts = getattr(cmdutil, "debugrevlogopts",
183 revlogopts = getattr(cmdutil, "debugrevlogopts",
180 getattr(commands, "debugrevlogopts", [
184 getattr(commands, "debugrevlogopts", [
181 (b'c', b'changelog', False, (b'open changelog')),
185 (b'c', b'changelog', False, (b'open changelog')),
182 (b'm', b'manifest', False, (b'open manifest')),
186 (b'm', b'manifest', False, (b'open manifest')),
183 (b'', b'dir', False, (b'open directory manifest')),
187 (b'', b'dir', False, (b'open directory manifest')),
184 ]))
188 ]))
185
189
186 cmdtable = {}
190 cmdtable = {}
187
191
188 # for "historical portability":
192 # for "historical portability":
189 # define parsealiases locally, because cmdutil.parsealiases has been
193 # define parsealiases locally, because cmdutil.parsealiases has been
190 # available since 1.5 (or 6252852b4332)
194 # available since 1.5 (or 6252852b4332)
191 def parsealiases(cmd):
195 def parsealiases(cmd):
192 return cmd.split(b"|")
196 return cmd.split(b"|")
193
197
194 if safehasattr(registrar, 'command'):
198 if safehasattr(registrar, 'command'):
195 command = registrar.command(cmdtable)
199 command = registrar.command(cmdtable)
196 elif safehasattr(cmdutil, 'command'):
200 elif safehasattr(cmdutil, 'command'):
197 command = cmdutil.command(cmdtable)
201 command = cmdutil.command(cmdtable)
198 if b'norepo' not in getargspec(command).args:
202 if b'norepo' not in getargspec(command).args:
199 # for "historical portability":
203 # for "historical portability":
200 # wrap original cmdutil.command, because "norepo" option has
204 # wrap original cmdutil.command, because "norepo" option has
201 # been available since 3.1 (or 75a96326cecb)
205 # been available since 3.1 (or 75a96326cecb)
202 _command = command
206 _command = command
203 def command(name, options=(), synopsis=None, norepo=False):
207 def command(name, options=(), synopsis=None, norepo=False):
204 if norepo:
208 if norepo:
205 commands.norepo += b' %s' % b' '.join(parsealiases(name))
209 commands.norepo += b' %s' % b' '.join(parsealiases(name))
206 return _command(name, list(options), synopsis)
210 return _command(name, list(options), synopsis)
207 else:
211 else:
208 # for "historical portability":
212 # for "historical portability":
209 # define "@command" annotation locally, because cmdutil.command
213 # define "@command" annotation locally, because cmdutil.command
210 # has been available since 1.9 (or 2daa5179e73f)
214 # has been available since 1.9 (or 2daa5179e73f)
211 def command(name, options=(), synopsis=None, norepo=False):
215 def command(name, options=(), synopsis=None, norepo=False):
212 def decorator(func):
216 def decorator(func):
213 if synopsis:
217 if synopsis:
214 cmdtable[name] = func, list(options), synopsis
218 cmdtable[name] = func, list(options), synopsis
215 else:
219 else:
216 cmdtable[name] = func, list(options)
220 cmdtable[name] = func, list(options)
217 if norepo:
221 if norepo:
218 commands.norepo += b' %s' % b' '.join(parsealiases(name))
222 commands.norepo += b' %s' % b' '.join(parsealiases(name))
219 return func
223 return func
220 return decorator
224 return decorator
221
225
222 try:
226 try:
223 import mercurial.registrar
227 import mercurial.registrar
224 import mercurial.configitems
228 import mercurial.configitems
225 configtable = {}
229 configtable = {}
226 configitem = mercurial.registrar.configitem(configtable)
230 configitem = mercurial.registrar.configitem(configtable)
227 configitem(b'perf', b'presleep',
231 configitem(b'perf', b'presleep',
228 default=mercurial.configitems.dynamicdefault,
232 default=mercurial.configitems.dynamicdefault,
229 )
233 )
230 configitem(b'perf', b'stub',
234 configitem(b'perf', b'stub',
231 default=mercurial.configitems.dynamicdefault,
235 default=mercurial.configitems.dynamicdefault,
232 )
236 )
233 configitem(b'perf', b'parentscount',
237 configitem(b'perf', b'parentscount',
234 default=mercurial.configitems.dynamicdefault,
238 default=mercurial.configitems.dynamicdefault,
235 )
239 )
236 configitem(b'perf', b'all-timing',
240 configitem(b'perf', b'all-timing',
237 default=mercurial.configitems.dynamicdefault,
241 default=mercurial.configitems.dynamicdefault,
238 )
242 )
239 configitem(b'perf', b'run-limits',
243 configitem(b'perf', b'run-limits',
240 default=mercurial.configitems.dynamicdefault,
244 default=mercurial.configitems.dynamicdefault,
241 )
245 )
242 except (ImportError, AttributeError):
246 except (ImportError, AttributeError):
243 pass
247 pass
244
248
245 def getlen(ui):
249 def getlen(ui):
246 if ui.configbool(b"perf", b"stub", False):
250 if ui.configbool(b"perf", b"stub", False):
247 return lambda x: 1
251 return lambda x: 1
248 return len
252 return len
249
253
250 def gettimer(ui, opts=None):
254 def gettimer(ui, opts=None):
251 """return a timer function and formatter: (timer, formatter)
255 """return a timer function and formatter: (timer, formatter)
252
256
253 This function exists to gather the creation of formatter in a single
257 This function exists to gather the creation of formatter in a single
254 place instead of duplicating it in all performance commands."""
258 place instead of duplicating it in all performance commands."""
255
259
256 # enforce an idle period before execution to counteract power management
260 # enforce an idle period before execution to counteract power management
257 # experimental config: perf.presleep
261 # experimental config: perf.presleep
258 time.sleep(getint(ui, b"perf", b"presleep", 1))
262 time.sleep(getint(ui, b"perf", b"presleep", 1))
259
263
260 if opts is None:
264 if opts is None:
261 opts = {}
265 opts = {}
262 # redirect all to stderr unless buffer api is in use
266 # redirect all to stderr unless buffer api is in use
263 if not ui._buffers:
267 if not ui._buffers:
264 ui = ui.copy()
268 ui = ui.copy()
265 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
269 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
266 if uifout:
270 if uifout:
267 # for "historical portability":
271 # for "historical portability":
268 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
272 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
269 uifout.set(ui.ferr)
273 uifout.set(ui.ferr)
270
274
271 # get a formatter
275 # get a formatter
272 uiformatter = getattr(ui, 'formatter', None)
276 uiformatter = getattr(ui, 'formatter', None)
273 if uiformatter:
277 if uiformatter:
274 fm = uiformatter(b'perf', opts)
278 fm = uiformatter(b'perf', opts)
275 else:
279 else:
276 # for "historical portability":
280 # for "historical portability":
277 # define formatter locally, because ui.formatter has been
281 # define formatter locally, because ui.formatter has been
278 # available since 2.2 (or ae5f92e154d3)
282 # available since 2.2 (or ae5f92e154d3)
279 from mercurial import node
283 from mercurial import node
280 class defaultformatter(object):
284 class defaultformatter(object):
281 """Minimized composition of baseformatter and plainformatter
285 """Minimized composition of baseformatter and plainformatter
282 """
286 """
283 def __init__(self, ui, topic, opts):
287 def __init__(self, ui, topic, opts):
284 self._ui = ui
288 self._ui = ui
285 if ui.debugflag:
289 if ui.debugflag:
286 self.hexfunc = node.hex
290 self.hexfunc = node.hex
287 else:
291 else:
288 self.hexfunc = node.short
292 self.hexfunc = node.short
289 def __nonzero__(self):
293 def __nonzero__(self):
290 return False
294 return False
291 __bool__ = __nonzero__
295 __bool__ = __nonzero__
292 def startitem(self):
296 def startitem(self):
293 pass
297 pass
294 def data(self, **data):
298 def data(self, **data):
295 pass
299 pass
296 def write(self, fields, deftext, *fielddata, **opts):
300 def write(self, fields, deftext, *fielddata, **opts):
297 self._ui.write(deftext % fielddata, **opts)
301 self._ui.write(deftext % fielddata, **opts)
298 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
302 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
299 if cond:
303 if cond:
300 self._ui.write(deftext % fielddata, **opts)
304 self._ui.write(deftext % fielddata, **opts)
301 def plain(self, text, **opts):
305 def plain(self, text, **opts):
302 self._ui.write(text, **opts)
306 self._ui.write(text, **opts)
303 def end(self):
307 def end(self):
304 pass
308 pass
305 fm = defaultformatter(ui, b'perf', opts)
309 fm = defaultformatter(ui, b'perf', opts)
306
310
307 # stub function, runs code only once instead of in a loop
311 # stub function, runs code only once instead of in a loop
308 # experimental config: perf.stub
312 # experimental config: perf.stub
309 if ui.configbool(b"perf", b"stub", False):
313 if ui.configbool(b"perf", b"stub", False):
310 return functools.partial(stub_timer, fm), fm
314 return functools.partial(stub_timer, fm), fm
311
315
312 # experimental config: perf.all-timing
316 # experimental config: perf.all-timing
313 displayall = ui.configbool(b"perf", b"all-timing", False)
317 displayall = ui.configbool(b"perf", b"all-timing", False)
314
318
315 # experimental config: perf.run-limits
319 # experimental config: perf.run-limits
316 limitspec = ui.configlist(b"perf", b"run-limits", [])
320 limitspec = ui.configlist(b"perf", b"run-limits", [])
317 limits = []
321 limits = []
318 for item in limitspec:
322 for item in limitspec:
319 parts = item.split(b'-', 1)
323 parts = item.split(b'-', 1)
320 if len(parts) < 2:
324 if len(parts) < 2:
321 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
325 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
322 % item))
326 % item))
323 continue
327 continue
324 try:
328 try:
325 time_limit = float(pycompat.sysstr(parts[0]))
329 time_limit = float(pycompat.sysstr(parts[0]))
326 except ValueError as e:
330 except ValueError as e:
327 ui.warn((b'malformatted run limit entry, %s: %s\n'
331 ui.warn((b'malformatted run limit entry, %s: %s\n'
328 % (pycompat.bytestr(e), item)))
332 % (pycompat.bytestr(e), item)))
329 continue
333 continue
330 try:
334 try:
331 run_limit = int(pycompat.sysstr(parts[1]))
335 run_limit = int(pycompat.sysstr(parts[1]))
332 except ValueError as e:
336 except ValueError as e:
333 ui.warn((b'malformatted run limit entry, %s: %s\n'
337 ui.warn((b'malformatted run limit entry, %s: %s\n'
334 % (pycompat.bytestr(e), item)))
338 % (pycompat.bytestr(e), item)))
335 continue
339 continue
336 limits.append((time_limit, run_limit))
340 limits.append((time_limit, run_limit))
337 if not limits:
341 if not limits:
338 limits = DEFAULTLIMITS
342 limits = DEFAULTLIMITS
339
343
340 t = functools.partial(_timer, fm, displayall=displayall, limits=limits)
344 t = functools.partial(_timer, fm, displayall=displayall, limits=limits)
341 return t, fm
345 return t, fm
342
346
343 def stub_timer(fm, func, setup=None, title=None):
347 def stub_timer(fm, func, setup=None, title=None):
344 if setup is not None:
348 if setup is not None:
345 setup()
349 setup()
346 func()
350 func()
347
351
348 @contextlib.contextmanager
352 @contextlib.contextmanager
349 def timeone():
353 def timeone():
350 r = []
354 r = []
351 ostart = os.times()
355 ostart = os.times()
352 cstart = util.timer()
356 cstart = util.timer()
353 yield r
357 yield r
354 cstop = util.timer()
358 cstop = util.timer()
355 ostop = os.times()
359 ostop = os.times()
356 a, b = ostart, ostop
360 a, b = ostart, ostop
357 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
361 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
358
362
359
363
360 # list of stop condition (elapsed time, minimal run count)
364 # list of stop condition (elapsed time, minimal run count)
361 DEFAULTLIMITS = (
365 DEFAULTLIMITS = (
362 (3.0, 100),
366 (3.0, 100),
363 (10.0, 3),
367 (10.0, 3),
364 )
368 )
365
369
366 def _timer(fm, func, setup=None, title=None, displayall=False,
370 def _timer(fm, func, setup=None, title=None, displayall=False,
367 limits=DEFAULTLIMITS):
371 limits=DEFAULTLIMITS):
368 gc.collect()
372 gc.collect()
369 results = []
373 results = []
370 begin = util.timer()
374 begin = util.timer()
371 count = 0
375 count = 0
372 keepgoing = True
376 keepgoing = True
373 while keepgoing:
377 while keepgoing:
374 if setup is not None:
378 if setup is not None:
375 setup()
379 setup()
376 with timeone() as item:
380 with timeone() as item:
377 r = func()
381 r = func()
378 count += 1
382 count += 1
379 results.append(item[0])
383 results.append(item[0])
380 cstop = util.timer()
384 cstop = util.timer()
381 # Look for a stop condition.
385 # Look for a stop condition.
382 elapsed = cstop - begin
386 elapsed = cstop - begin
383 for t, mincount in limits:
387 for t, mincount in limits:
384 if elapsed >= t and count >= mincount:
388 if elapsed >= t and count >= mincount:
385 keepgoing = False
389 keepgoing = False
386 break
390 break
387
391
388 formatone(fm, results, title=title, result=r,
392 formatone(fm, results, title=title, result=r,
389 displayall=displayall)
393 displayall=displayall)
390
394
391 def formatone(fm, timings, title=None, result=None, displayall=False):
395 def formatone(fm, timings, title=None, result=None, displayall=False):
392
396
393 count = len(timings)
397 count = len(timings)
394
398
395 fm.startitem()
399 fm.startitem()
396
400
397 if title:
401 if title:
398 fm.write(b'title', b'! %s\n', title)
402 fm.write(b'title', b'! %s\n', title)
399 if result:
403 if result:
400 fm.write(b'result', b'! result: %s\n', result)
404 fm.write(b'result', b'! result: %s\n', result)
401 def display(role, entry):
405 def display(role, entry):
402 prefix = b''
406 prefix = b''
403 if role != b'best':
407 if role != b'best':
404 prefix = b'%s.' % role
408 prefix = b'%s.' % role
405 fm.plain(b'!')
409 fm.plain(b'!')
406 fm.write(prefix + b'wall', b' wall %f', entry[0])
410 fm.write(prefix + b'wall', b' wall %f', entry[0])
407 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
411 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
408 fm.write(prefix + b'user', b' user %f', entry[1])
412 fm.write(prefix + b'user', b' user %f', entry[1])
409 fm.write(prefix + b'sys', b' sys %f', entry[2])
413 fm.write(prefix + b'sys', b' sys %f', entry[2])
410 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
414 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
411 fm.plain(b'\n')
415 fm.plain(b'\n')
412 timings.sort()
416 timings.sort()
413 min_val = timings[0]
417 min_val = timings[0]
414 display(b'best', min_val)
418 display(b'best', min_val)
415 if displayall:
419 if displayall:
416 max_val = timings[-1]
420 max_val = timings[-1]
417 display(b'max', max_val)
421 display(b'max', max_val)
418 avg = tuple([sum(x) / count for x in zip(*timings)])
422 avg = tuple([sum(x) / count for x in zip(*timings)])
419 display(b'avg', avg)
423 display(b'avg', avg)
420 median = timings[len(timings) // 2]
424 median = timings[len(timings) // 2]
421 display(b'median', median)
425 display(b'median', median)
422
426
423 # utilities for historical portability
427 # utilities for historical portability
424
428
425 def getint(ui, section, name, default):
429 def getint(ui, section, name, default):
426 # for "historical portability":
430 # for "historical portability":
427 # ui.configint has been available since 1.9 (or fa2b596db182)
431 # ui.configint has been available since 1.9 (or fa2b596db182)
428 v = ui.config(section, name, None)
432 v = ui.config(section, name, None)
429 if v is None:
433 if v is None:
430 return default
434 return default
431 try:
435 try:
432 return int(v)
436 return int(v)
433 except ValueError:
437 except ValueError:
434 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
438 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
435 % (section, name, v))
439 % (section, name, v))
436
440
437 def safeattrsetter(obj, name, ignoremissing=False):
441 def safeattrsetter(obj, name, ignoremissing=False):
438 """Ensure that 'obj' has 'name' attribute before subsequent setattr
442 """Ensure that 'obj' has 'name' attribute before subsequent setattr
439
443
440 This function is aborted, if 'obj' doesn't have 'name' attribute
444 This function is aborted, if 'obj' doesn't have 'name' attribute
441 at runtime. This avoids overlooking removal of an attribute, which
445 at runtime. This avoids overlooking removal of an attribute, which
442 breaks assumption of performance measurement, in the future.
446 breaks assumption of performance measurement, in the future.
443
447
444 This function returns the object to (1) assign a new value, and
448 This function returns the object to (1) assign a new value, and
445 (2) restore an original value to the attribute.
449 (2) restore an original value to the attribute.
446
450
447 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
451 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
448 abortion, and this function returns None. This is useful to
452 abortion, and this function returns None. This is useful to
449 examine an attribute, which isn't ensured in all Mercurial
453 examine an attribute, which isn't ensured in all Mercurial
450 versions.
454 versions.
451 """
455 """
452 if not util.safehasattr(obj, name):
456 if not util.safehasattr(obj, name):
453 if ignoremissing:
457 if ignoremissing:
454 return None
458 return None
455 raise error.Abort((b"missing attribute %s of %s might break assumption"
459 raise error.Abort((b"missing attribute %s of %s might break assumption"
456 b" of performance measurement") % (name, obj))
460 b" of performance measurement") % (name, obj))
457
461
458 origvalue = getattr(obj, _sysstr(name))
462 origvalue = getattr(obj, _sysstr(name))
459 class attrutil(object):
463 class attrutil(object):
460 def set(self, newvalue):
464 def set(self, newvalue):
461 setattr(obj, _sysstr(name), newvalue)
465 setattr(obj, _sysstr(name), newvalue)
462 def restore(self):
466 def restore(self):
463 setattr(obj, _sysstr(name), origvalue)
467 setattr(obj, _sysstr(name), origvalue)
464
468
465 return attrutil()
469 return attrutil()
466
470
467 # utilities to examine each internal API changes
471 # utilities to examine each internal API changes
468
472
469 def getbranchmapsubsettable():
473 def getbranchmapsubsettable():
470 # for "historical portability":
474 # for "historical portability":
471 # subsettable is defined in:
475 # subsettable is defined in:
472 # - branchmap since 2.9 (or 175c6fd8cacc)
476 # - branchmap since 2.9 (or 175c6fd8cacc)
473 # - repoview since 2.5 (or 59a9f18d4587)
477 # - repoview since 2.5 (or 59a9f18d4587)
474 for mod in (branchmap, repoview):
478 # - repoviewutil since 5.0
479 for mod in (branchmap, repoview, repoviewutil):
475 subsettable = getattr(mod, 'subsettable', None)
480 subsettable = getattr(mod, 'subsettable', None)
476 if subsettable:
481 if subsettable:
477 return subsettable
482 return subsettable
478
483
479 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
484 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
480 # branchmap and repoview modules exist, but subsettable attribute
485 # branchmap and repoview modules exist, but subsettable attribute
481 # doesn't)
486 # doesn't)
482 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
487 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
483 hint=b"use 2.5 or later")
488 hint=b"use 2.5 or later")
484
489
485 def getsvfs(repo):
490 def getsvfs(repo):
486 """Return appropriate object to access files under .hg/store
491 """Return appropriate object to access files under .hg/store
487 """
492 """
488 # for "historical portability":
493 # for "historical portability":
489 # repo.svfs has been available since 2.3 (or 7034365089bf)
494 # repo.svfs has been available since 2.3 (or 7034365089bf)
490 svfs = getattr(repo, 'svfs', None)
495 svfs = getattr(repo, 'svfs', None)
491 if svfs:
496 if svfs:
492 return svfs
497 return svfs
493 else:
498 else:
494 return getattr(repo, 'sopener')
499 return getattr(repo, 'sopener')
495
500
496 def getvfs(repo):
501 def getvfs(repo):
497 """Return appropriate object to access files under .hg
502 """Return appropriate object to access files under .hg
498 """
503 """
499 # for "historical portability":
504 # for "historical portability":
500 # repo.vfs has been available since 2.3 (or 7034365089bf)
505 # repo.vfs has been available since 2.3 (or 7034365089bf)
501 vfs = getattr(repo, 'vfs', None)
506 vfs = getattr(repo, 'vfs', None)
502 if vfs:
507 if vfs:
503 return vfs
508 return vfs
504 else:
509 else:
505 return getattr(repo, 'opener')
510 return getattr(repo, 'opener')
506
511
507 def repocleartagscachefunc(repo):
512 def repocleartagscachefunc(repo):
508 """Return the function to clear tags cache according to repo internal API
513 """Return the function to clear tags cache according to repo internal API
509 """
514 """
510 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
515 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
511 # in this case, setattr(repo, '_tagscache', None) or so isn't
516 # in this case, setattr(repo, '_tagscache', None) or so isn't
512 # correct way to clear tags cache, because existing code paths
517 # correct way to clear tags cache, because existing code paths
513 # expect _tagscache to be a structured object.
518 # expect _tagscache to be a structured object.
514 def clearcache():
519 def clearcache():
515 # _tagscache has been filteredpropertycache since 2.5 (or
520 # _tagscache has been filteredpropertycache since 2.5 (or
516 # 98c867ac1330), and delattr() can't work in such case
521 # 98c867ac1330), and delattr() can't work in such case
517 if b'_tagscache' in vars(repo):
522 if b'_tagscache' in vars(repo):
518 del repo.__dict__[b'_tagscache']
523 del repo.__dict__[b'_tagscache']
519 return clearcache
524 return clearcache
520
525
521 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
526 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
522 if repotags: # since 1.4 (or 5614a628d173)
527 if repotags: # since 1.4 (or 5614a628d173)
523 return lambda : repotags.set(None)
528 return lambda : repotags.set(None)
524
529
525 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
530 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
526 if repotagscache: # since 0.6 (or d7df759d0e97)
531 if repotagscache: # since 0.6 (or d7df759d0e97)
527 return lambda : repotagscache.set(None)
532 return lambda : repotagscache.set(None)
528
533
529 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
534 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
530 # this point, but it isn't so problematic, because:
535 # this point, but it isn't so problematic, because:
531 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
536 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
532 # in perftags() causes failure soon
537 # in perftags() causes failure soon
533 # - perf.py itself has been available since 1.1 (or eb240755386d)
538 # - perf.py itself has been available since 1.1 (or eb240755386d)
534 raise error.Abort((b"tags API of this hg command is unknown"))
539 raise error.Abort((b"tags API of this hg command is unknown"))
535
540
536 # utilities to clear cache
541 # utilities to clear cache
537
542
538 def clearfilecache(obj, attrname):
543 def clearfilecache(obj, attrname):
539 unfiltered = getattr(obj, 'unfiltered', None)
544 unfiltered = getattr(obj, 'unfiltered', None)
540 if unfiltered is not None:
545 if unfiltered is not None:
541 obj = obj.unfiltered()
546 obj = obj.unfiltered()
542 if attrname in vars(obj):
547 if attrname in vars(obj):
543 delattr(obj, attrname)
548 delattr(obj, attrname)
544 obj._filecache.pop(attrname, None)
549 obj._filecache.pop(attrname, None)
545
550
546 def clearchangelog(repo):
551 def clearchangelog(repo):
547 if repo is not repo.unfiltered():
552 if repo is not repo.unfiltered():
548 object.__setattr__(repo, r'_clcachekey', None)
553 object.__setattr__(repo, r'_clcachekey', None)
549 object.__setattr__(repo, r'_clcache', None)
554 object.__setattr__(repo, r'_clcache', None)
550 clearfilecache(repo.unfiltered(), 'changelog')
555 clearfilecache(repo.unfiltered(), 'changelog')
551
556
552 # perf commands
557 # perf commands
553
558
554 @command(b'perfwalk', formatteropts)
559 @command(b'perfwalk', formatteropts)
555 def perfwalk(ui, repo, *pats, **opts):
560 def perfwalk(ui, repo, *pats, **opts):
556 opts = _byteskwargs(opts)
561 opts = _byteskwargs(opts)
557 timer, fm = gettimer(ui, opts)
562 timer, fm = gettimer(ui, opts)
558 m = scmutil.match(repo[None], pats, {})
563 m = scmutil.match(repo[None], pats, {})
559 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
564 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
560 ignored=False))))
565 ignored=False))))
561 fm.end()
566 fm.end()
562
567
563 @command(b'perfannotate', formatteropts)
568 @command(b'perfannotate', formatteropts)
564 def perfannotate(ui, repo, f, **opts):
569 def perfannotate(ui, repo, f, **opts):
565 opts = _byteskwargs(opts)
570 opts = _byteskwargs(opts)
566 timer, fm = gettimer(ui, opts)
571 timer, fm = gettimer(ui, opts)
567 fc = repo[b'.'][f]
572 fc = repo[b'.'][f]
568 timer(lambda: len(fc.annotate(True)))
573 timer(lambda: len(fc.annotate(True)))
569 fm.end()
574 fm.end()
570
575
571 @command(b'perfstatus',
576 @command(b'perfstatus',
572 [(b'u', b'unknown', False,
577 [(b'u', b'unknown', False,
573 b'ask status to look for unknown files')] + formatteropts)
578 b'ask status to look for unknown files')] + formatteropts)
574 def perfstatus(ui, repo, **opts):
579 def perfstatus(ui, repo, **opts):
575 opts = _byteskwargs(opts)
580 opts = _byteskwargs(opts)
576 #m = match.always(repo.root, repo.getcwd())
581 #m = match.always(repo.root, repo.getcwd())
577 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
582 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
578 # False))))
583 # False))))
579 timer, fm = gettimer(ui, opts)
584 timer, fm = gettimer(ui, opts)
580 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
585 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
581 fm.end()
586 fm.end()
582
587
583 @command(b'perfaddremove', formatteropts)
588 @command(b'perfaddremove', formatteropts)
584 def perfaddremove(ui, repo, **opts):
589 def perfaddremove(ui, repo, **opts):
585 opts = _byteskwargs(opts)
590 opts = _byteskwargs(opts)
586 timer, fm = gettimer(ui, opts)
591 timer, fm = gettimer(ui, opts)
587 try:
592 try:
588 oldquiet = repo.ui.quiet
593 oldquiet = repo.ui.quiet
589 repo.ui.quiet = True
594 repo.ui.quiet = True
590 matcher = scmutil.match(repo[None])
595 matcher = scmutil.match(repo[None])
591 opts[b'dry_run'] = True
596 opts[b'dry_run'] = True
592 if b'uipathfn' in getargspec(scmutil.addremove).args:
597 if b'uipathfn' in getargspec(scmutil.addremove).args:
593 uipathfn = scmutil.getuipathfn(repo)
598 uipathfn = scmutil.getuipathfn(repo)
594 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
599 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
595 else:
600 else:
596 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
601 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
597 finally:
602 finally:
598 repo.ui.quiet = oldquiet
603 repo.ui.quiet = oldquiet
599 fm.end()
604 fm.end()
600
605
601 def clearcaches(cl):
606 def clearcaches(cl):
602 # behave somewhat consistently across internal API changes
607 # behave somewhat consistently across internal API changes
603 if util.safehasattr(cl, b'clearcaches'):
608 if util.safehasattr(cl, b'clearcaches'):
604 cl.clearcaches()
609 cl.clearcaches()
605 elif util.safehasattr(cl, b'_nodecache'):
610 elif util.safehasattr(cl, b'_nodecache'):
606 from mercurial.node import nullid, nullrev
611 from mercurial.node import nullid, nullrev
607 cl._nodecache = {nullid: nullrev}
612 cl._nodecache = {nullid: nullrev}
608 cl._nodepos = None
613 cl._nodepos = None
609
614
610 @command(b'perfheads', formatteropts)
615 @command(b'perfheads', formatteropts)
611 def perfheads(ui, repo, **opts):
616 def perfheads(ui, repo, **opts):
612 """benchmark the computation of a changelog heads"""
617 """benchmark the computation of a changelog heads"""
613 opts = _byteskwargs(opts)
618 opts = _byteskwargs(opts)
614 timer, fm = gettimer(ui, opts)
619 timer, fm = gettimer(ui, opts)
615 cl = repo.changelog
620 cl = repo.changelog
616 def s():
621 def s():
617 clearcaches(cl)
622 clearcaches(cl)
618 def d():
623 def d():
619 len(cl.headrevs())
624 len(cl.headrevs())
620 timer(d, setup=s)
625 timer(d, setup=s)
621 fm.end()
626 fm.end()
622
627
623 @command(b'perftags', formatteropts+
628 @command(b'perftags', formatteropts+
624 [
629 [
625 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
630 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
626 ])
631 ])
627 def perftags(ui, repo, **opts):
632 def perftags(ui, repo, **opts):
628 opts = _byteskwargs(opts)
633 opts = _byteskwargs(opts)
629 timer, fm = gettimer(ui, opts)
634 timer, fm = gettimer(ui, opts)
630 repocleartagscache = repocleartagscachefunc(repo)
635 repocleartagscache = repocleartagscachefunc(repo)
631 clearrevlogs = opts[b'clear_revlogs']
636 clearrevlogs = opts[b'clear_revlogs']
632 def s():
637 def s():
633 if clearrevlogs:
638 if clearrevlogs:
634 clearchangelog(repo)
639 clearchangelog(repo)
635 clearfilecache(repo.unfiltered(), 'manifest')
640 clearfilecache(repo.unfiltered(), 'manifest')
636 repocleartagscache()
641 repocleartagscache()
637 def t():
642 def t():
638 return len(repo.tags())
643 return len(repo.tags())
639 timer(t, setup=s)
644 timer(t, setup=s)
640 fm.end()
645 fm.end()
641
646
642 @command(b'perfancestors', formatteropts)
647 @command(b'perfancestors', formatteropts)
643 def perfancestors(ui, repo, **opts):
648 def perfancestors(ui, repo, **opts):
644 opts = _byteskwargs(opts)
649 opts = _byteskwargs(opts)
645 timer, fm = gettimer(ui, opts)
650 timer, fm = gettimer(ui, opts)
646 heads = repo.changelog.headrevs()
651 heads = repo.changelog.headrevs()
647 def d():
652 def d():
648 for a in repo.changelog.ancestors(heads):
653 for a in repo.changelog.ancestors(heads):
649 pass
654 pass
650 timer(d)
655 timer(d)
651 fm.end()
656 fm.end()
652
657
653 @command(b'perfancestorset', formatteropts)
658 @command(b'perfancestorset', formatteropts)
654 def perfancestorset(ui, repo, revset, **opts):
659 def perfancestorset(ui, repo, revset, **opts):
655 opts = _byteskwargs(opts)
660 opts = _byteskwargs(opts)
656 timer, fm = gettimer(ui, opts)
661 timer, fm = gettimer(ui, opts)
657 revs = repo.revs(revset)
662 revs = repo.revs(revset)
658 heads = repo.changelog.headrevs()
663 heads = repo.changelog.headrevs()
659 def d():
664 def d():
660 s = repo.changelog.ancestors(heads)
665 s = repo.changelog.ancestors(heads)
661 for rev in revs:
666 for rev in revs:
662 rev in s
667 rev in s
663 timer(d)
668 timer(d)
664 fm.end()
669 fm.end()
665
670
666 @command(b'perfdiscovery', formatteropts, b'PATH')
671 @command(b'perfdiscovery', formatteropts, b'PATH')
667 def perfdiscovery(ui, repo, path, **opts):
672 def perfdiscovery(ui, repo, path, **opts):
668 """benchmark discovery between local repo and the peer at given path
673 """benchmark discovery between local repo and the peer at given path
669 """
674 """
670 repos = [repo, None]
675 repos = [repo, None]
671 timer, fm = gettimer(ui, opts)
676 timer, fm = gettimer(ui, opts)
672 path = ui.expandpath(path)
677 path = ui.expandpath(path)
673
678
674 def s():
679 def s():
675 repos[1] = hg.peer(ui, opts, path)
680 repos[1] = hg.peer(ui, opts, path)
676 def d():
681 def d():
677 setdiscovery.findcommonheads(ui, *repos)
682 setdiscovery.findcommonheads(ui, *repos)
678 timer(d, setup=s)
683 timer(d, setup=s)
679 fm.end()
684 fm.end()
680
685
681 @command(b'perfbookmarks', formatteropts +
686 @command(b'perfbookmarks', formatteropts +
682 [
687 [
683 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
688 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
684 ])
689 ])
685 def perfbookmarks(ui, repo, **opts):
690 def perfbookmarks(ui, repo, **opts):
686 """benchmark parsing bookmarks from disk to memory"""
691 """benchmark parsing bookmarks from disk to memory"""
687 opts = _byteskwargs(opts)
692 opts = _byteskwargs(opts)
688 timer, fm = gettimer(ui, opts)
693 timer, fm = gettimer(ui, opts)
689
694
690 clearrevlogs = opts[b'clear_revlogs']
695 clearrevlogs = opts[b'clear_revlogs']
691 def s():
696 def s():
692 if clearrevlogs:
697 if clearrevlogs:
693 clearchangelog(repo)
698 clearchangelog(repo)
694 clearfilecache(repo, b'_bookmarks')
699 clearfilecache(repo, b'_bookmarks')
695 def d():
700 def d():
696 repo._bookmarks
701 repo._bookmarks
697 timer(d, setup=s)
702 timer(d, setup=s)
698 fm.end()
703 fm.end()
699
704
700 @command(b'perfbundleread', formatteropts, b'BUNDLE')
705 @command(b'perfbundleread', formatteropts, b'BUNDLE')
701 def perfbundleread(ui, repo, bundlepath, **opts):
706 def perfbundleread(ui, repo, bundlepath, **opts):
702 """Benchmark reading of bundle files.
707 """Benchmark reading of bundle files.
703
708
704 This command is meant to isolate the I/O part of bundle reading as
709 This command is meant to isolate the I/O part of bundle reading as
705 much as possible.
710 much as possible.
706 """
711 """
707 from mercurial import (
712 from mercurial import (
708 bundle2,
713 bundle2,
709 exchange,
714 exchange,
710 streamclone,
715 streamclone,
711 )
716 )
712
717
713 opts = _byteskwargs(opts)
718 opts = _byteskwargs(opts)
714
719
715 def makebench(fn):
720 def makebench(fn):
716 def run():
721 def run():
717 with open(bundlepath, b'rb') as fh:
722 with open(bundlepath, b'rb') as fh:
718 bundle = exchange.readbundle(ui, fh, bundlepath)
723 bundle = exchange.readbundle(ui, fh, bundlepath)
719 fn(bundle)
724 fn(bundle)
720
725
721 return run
726 return run
722
727
723 def makereadnbytes(size):
728 def makereadnbytes(size):
724 def run():
729 def run():
725 with open(bundlepath, b'rb') as fh:
730 with open(bundlepath, b'rb') as fh:
726 bundle = exchange.readbundle(ui, fh, bundlepath)
731 bundle = exchange.readbundle(ui, fh, bundlepath)
727 while bundle.read(size):
732 while bundle.read(size):
728 pass
733 pass
729
734
730 return run
735 return run
731
736
732 def makestdioread(size):
737 def makestdioread(size):
733 def run():
738 def run():
734 with open(bundlepath, b'rb') as fh:
739 with open(bundlepath, b'rb') as fh:
735 while fh.read(size):
740 while fh.read(size):
736 pass
741 pass
737
742
738 return run
743 return run
739
744
740 # bundle1
745 # bundle1
741
746
742 def deltaiter(bundle):
747 def deltaiter(bundle):
743 for delta in bundle.deltaiter():
748 for delta in bundle.deltaiter():
744 pass
749 pass
745
750
746 def iterchunks(bundle):
751 def iterchunks(bundle):
747 for chunk in bundle.getchunks():
752 for chunk in bundle.getchunks():
748 pass
753 pass
749
754
750 # bundle2
755 # bundle2
751
756
752 def forwardchunks(bundle):
757 def forwardchunks(bundle):
753 for chunk in bundle._forwardchunks():
758 for chunk in bundle._forwardchunks():
754 pass
759 pass
755
760
756 def iterparts(bundle):
761 def iterparts(bundle):
757 for part in bundle.iterparts():
762 for part in bundle.iterparts():
758 pass
763 pass
759
764
760 def iterpartsseekable(bundle):
765 def iterpartsseekable(bundle):
761 for part in bundle.iterparts(seekable=True):
766 for part in bundle.iterparts(seekable=True):
762 pass
767 pass
763
768
764 def seek(bundle):
769 def seek(bundle):
765 for part in bundle.iterparts(seekable=True):
770 for part in bundle.iterparts(seekable=True):
766 part.seek(0, os.SEEK_END)
771 part.seek(0, os.SEEK_END)
767
772
768 def makepartreadnbytes(size):
773 def makepartreadnbytes(size):
769 def run():
774 def run():
770 with open(bundlepath, b'rb') as fh:
775 with open(bundlepath, b'rb') as fh:
771 bundle = exchange.readbundle(ui, fh, bundlepath)
776 bundle = exchange.readbundle(ui, fh, bundlepath)
772 for part in bundle.iterparts():
777 for part in bundle.iterparts():
773 while part.read(size):
778 while part.read(size):
774 pass
779 pass
775
780
776 return run
781 return run
777
782
778 benches = [
783 benches = [
779 (makestdioread(8192), b'read(8k)'),
784 (makestdioread(8192), b'read(8k)'),
780 (makestdioread(16384), b'read(16k)'),
785 (makestdioread(16384), b'read(16k)'),
781 (makestdioread(32768), b'read(32k)'),
786 (makestdioread(32768), b'read(32k)'),
782 (makestdioread(131072), b'read(128k)'),
787 (makestdioread(131072), b'read(128k)'),
783 ]
788 ]
784
789
785 with open(bundlepath, b'rb') as fh:
790 with open(bundlepath, b'rb') as fh:
786 bundle = exchange.readbundle(ui, fh, bundlepath)
791 bundle = exchange.readbundle(ui, fh, bundlepath)
787
792
788 if isinstance(bundle, changegroup.cg1unpacker):
793 if isinstance(bundle, changegroup.cg1unpacker):
789 benches.extend([
794 benches.extend([
790 (makebench(deltaiter), b'cg1 deltaiter()'),
795 (makebench(deltaiter), b'cg1 deltaiter()'),
791 (makebench(iterchunks), b'cg1 getchunks()'),
796 (makebench(iterchunks), b'cg1 getchunks()'),
792 (makereadnbytes(8192), b'cg1 read(8k)'),
797 (makereadnbytes(8192), b'cg1 read(8k)'),
793 (makereadnbytes(16384), b'cg1 read(16k)'),
798 (makereadnbytes(16384), b'cg1 read(16k)'),
794 (makereadnbytes(32768), b'cg1 read(32k)'),
799 (makereadnbytes(32768), b'cg1 read(32k)'),
795 (makereadnbytes(131072), b'cg1 read(128k)'),
800 (makereadnbytes(131072), b'cg1 read(128k)'),
796 ])
801 ])
797 elif isinstance(bundle, bundle2.unbundle20):
802 elif isinstance(bundle, bundle2.unbundle20):
798 benches.extend([
803 benches.extend([
799 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
804 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
800 (makebench(iterparts), b'bundle2 iterparts()'),
805 (makebench(iterparts), b'bundle2 iterparts()'),
801 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
806 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
802 (makebench(seek), b'bundle2 part seek()'),
807 (makebench(seek), b'bundle2 part seek()'),
803 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
808 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
804 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
809 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
805 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
810 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
806 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
811 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
807 ])
812 ])
808 elif isinstance(bundle, streamclone.streamcloneapplier):
813 elif isinstance(bundle, streamclone.streamcloneapplier):
809 raise error.Abort(b'stream clone bundles not supported')
814 raise error.Abort(b'stream clone bundles not supported')
810 else:
815 else:
811 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
816 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
812
817
813 for fn, title in benches:
818 for fn, title in benches:
814 timer, fm = gettimer(ui, opts)
819 timer, fm = gettimer(ui, opts)
815 timer(fn, title=title)
820 timer(fn, title=title)
816 fm.end()
821 fm.end()
817
822
818 @command(b'perfchangegroupchangelog', formatteropts +
823 @command(b'perfchangegroupchangelog', formatteropts +
819 [(b'', b'cgversion', b'02', b'changegroup version'),
824 [(b'', b'cgversion', b'02', b'changegroup version'),
820 (b'r', b'rev', b'', b'revisions to add to changegroup')])
825 (b'r', b'rev', b'', b'revisions to add to changegroup')])
821 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
826 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
822 """Benchmark producing a changelog group for a changegroup.
827 """Benchmark producing a changelog group for a changegroup.
823
828
824 This measures the time spent processing the changelog during a
829 This measures the time spent processing the changelog during a
825 bundle operation. This occurs during `hg bundle` and on a server
830 bundle operation. This occurs during `hg bundle` and on a server
826 processing a `getbundle` wire protocol request (handles clones
831 processing a `getbundle` wire protocol request (handles clones
827 and pull requests).
832 and pull requests).
828
833
829 By default, all revisions are added to the changegroup.
834 By default, all revisions are added to the changegroup.
830 """
835 """
831 opts = _byteskwargs(opts)
836 opts = _byteskwargs(opts)
832 cl = repo.changelog
837 cl = repo.changelog
833 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
838 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
834 bundler = changegroup.getbundler(cgversion, repo)
839 bundler = changegroup.getbundler(cgversion, repo)
835
840
836 def d():
841 def d():
837 state, chunks = bundler._generatechangelog(cl, nodes)
842 state, chunks = bundler._generatechangelog(cl, nodes)
838 for chunk in chunks:
843 for chunk in chunks:
839 pass
844 pass
840
845
841 timer, fm = gettimer(ui, opts)
846 timer, fm = gettimer(ui, opts)
842
847
843 # Terminal printing can interfere with timing. So disable it.
848 # Terminal printing can interfere with timing. So disable it.
844 with ui.configoverride({(b'progress', b'disable'): True}):
849 with ui.configoverride({(b'progress', b'disable'): True}):
845 timer(d)
850 timer(d)
846
851
847 fm.end()
852 fm.end()
848
853
849 @command(b'perfdirs', formatteropts)
854 @command(b'perfdirs', formatteropts)
850 def perfdirs(ui, repo, **opts):
855 def perfdirs(ui, repo, **opts):
851 opts = _byteskwargs(opts)
856 opts = _byteskwargs(opts)
852 timer, fm = gettimer(ui, opts)
857 timer, fm = gettimer(ui, opts)
853 dirstate = repo.dirstate
858 dirstate = repo.dirstate
854 b'a' in dirstate
859 b'a' in dirstate
855 def d():
860 def d():
856 dirstate.hasdir(b'a')
861 dirstate.hasdir(b'a')
857 del dirstate._map._dirs
862 del dirstate._map._dirs
858 timer(d)
863 timer(d)
859 fm.end()
864 fm.end()
860
865
861 @command(b'perfdirstate', formatteropts)
866 @command(b'perfdirstate', formatteropts)
862 def perfdirstate(ui, repo, **opts):
867 def perfdirstate(ui, repo, **opts):
863 opts = _byteskwargs(opts)
868 opts = _byteskwargs(opts)
864 timer, fm = gettimer(ui, opts)
869 timer, fm = gettimer(ui, opts)
865 b"a" in repo.dirstate
870 b"a" in repo.dirstate
866 def d():
871 def d():
867 repo.dirstate.invalidate()
872 repo.dirstate.invalidate()
868 b"a" in repo.dirstate
873 b"a" in repo.dirstate
869 timer(d)
874 timer(d)
870 fm.end()
875 fm.end()
871
876
872 @command(b'perfdirstatedirs', formatteropts)
877 @command(b'perfdirstatedirs', formatteropts)
873 def perfdirstatedirs(ui, repo, **opts):
878 def perfdirstatedirs(ui, repo, **opts):
874 opts = _byteskwargs(opts)
879 opts = _byteskwargs(opts)
875 timer, fm = gettimer(ui, opts)
880 timer, fm = gettimer(ui, opts)
876 b"a" in repo.dirstate
881 b"a" in repo.dirstate
877 def d():
882 def d():
878 repo.dirstate.hasdir(b"a")
883 repo.dirstate.hasdir(b"a")
879 del repo.dirstate._map._dirs
884 del repo.dirstate._map._dirs
880 timer(d)
885 timer(d)
881 fm.end()
886 fm.end()
882
887
883 @command(b'perfdirstatefoldmap', formatteropts)
888 @command(b'perfdirstatefoldmap', formatteropts)
884 def perfdirstatefoldmap(ui, repo, **opts):
889 def perfdirstatefoldmap(ui, repo, **opts):
885 opts = _byteskwargs(opts)
890 opts = _byteskwargs(opts)
886 timer, fm = gettimer(ui, opts)
891 timer, fm = gettimer(ui, opts)
887 dirstate = repo.dirstate
892 dirstate = repo.dirstate
888 b'a' in dirstate
893 b'a' in dirstate
889 def d():
894 def d():
890 dirstate._map.filefoldmap.get(b'a')
895 dirstate._map.filefoldmap.get(b'a')
891 del dirstate._map.filefoldmap
896 del dirstate._map.filefoldmap
892 timer(d)
897 timer(d)
893 fm.end()
898 fm.end()
894
899
895 @command(b'perfdirfoldmap', formatteropts)
900 @command(b'perfdirfoldmap', formatteropts)
896 def perfdirfoldmap(ui, repo, **opts):
901 def perfdirfoldmap(ui, repo, **opts):
897 opts = _byteskwargs(opts)
902 opts = _byteskwargs(opts)
898 timer, fm = gettimer(ui, opts)
903 timer, fm = gettimer(ui, opts)
899 dirstate = repo.dirstate
904 dirstate = repo.dirstate
900 b'a' in dirstate
905 b'a' in dirstate
901 def d():
906 def d():
902 dirstate._map.dirfoldmap.get(b'a')
907 dirstate._map.dirfoldmap.get(b'a')
903 del dirstate._map.dirfoldmap
908 del dirstate._map.dirfoldmap
904 del dirstate._map._dirs
909 del dirstate._map._dirs
905 timer(d)
910 timer(d)
906 fm.end()
911 fm.end()
907
912
908 @command(b'perfdirstatewrite', formatteropts)
913 @command(b'perfdirstatewrite', formatteropts)
909 def perfdirstatewrite(ui, repo, **opts):
914 def perfdirstatewrite(ui, repo, **opts):
910 opts = _byteskwargs(opts)
915 opts = _byteskwargs(opts)
911 timer, fm = gettimer(ui, opts)
916 timer, fm = gettimer(ui, opts)
912 ds = repo.dirstate
917 ds = repo.dirstate
913 b"a" in ds
918 b"a" in ds
914 def d():
919 def d():
915 ds._dirty = True
920 ds._dirty = True
916 ds.write(repo.currenttransaction())
921 ds.write(repo.currenttransaction())
917 timer(d)
922 timer(d)
918 fm.end()
923 fm.end()
919
924
920 @command(b'perfmergecalculate',
925 @command(b'perfmergecalculate',
921 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
926 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
922 def perfmergecalculate(ui, repo, rev, **opts):
927 def perfmergecalculate(ui, repo, rev, **opts):
923 opts = _byteskwargs(opts)
928 opts = _byteskwargs(opts)
924 timer, fm = gettimer(ui, opts)
929 timer, fm = gettimer(ui, opts)
925 wctx = repo[None]
930 wctx = repo[None]
926 rctx = scmutil.revsingle(repo, rev, rev)
931 rctx = scmutil.revsingle(repo, rev, rev)
927 ancestor = wctx.ancestor(rctx)
932 ancestor = wctx.ancestor(rctx)
928 # we don't want working dir files to be stat'd in the benchmark, so prime
933 # we don't want working dir files to be stat'd in the benchmark, so prime
929 # that cache
934 # that cache
930 wctx.dirty()
935 wctx.dirty()
931 def d():
936 def d():
932 # acceptremote is True because we don't want prompts in the middle of
937 # acceptremote is True because we don't want prompts in the middle of
933 # our benchmark
938 # our benchmark
934 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
939 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
935 acceptremote=True, followcopies=True)
940 acceptremote=True, followcopies=True)
936 timer(d)
941 timer(d)
937 fm.end()
942 fm.end()
938
943
939 @command(b'perfpathcopies', [], b"REV REV")
944 @command(b'perfpathcopies', [], b"REV REV")
940 def perfpathcopies(ui, repo, rev1, rev2, **opts):
945 def perfpathcopies(ui, repo, rev1, rev2, **opts):
941 """benchmark the copy tracing logic"""
946 """benchmark the copy tracing logic"""
942 opts = _byteskwargs(opts)
947 opts = _byteskwargs(opts)
943 timer, fm = gettimer(ui, opts)
948 timer, fm = gettimer(ui, opts)
944 ctx1 = scmutil.revsingle(repo, rev1, rev1)
949 ctx1 = scmutil.revsingle(repo, rev1, rev1)
945 ctx2 = scmutil.revsingle(repo, rev2, rev2)
950 ctx2 = scmutil.revsingle(repo, rev2, rev2)
946 def d():
951 def d():
947 copies.pathcopies(ctx1, ctx2)
952 copies.pathcopies(ctx1, ctx2)
948 timer(d)
953 timer(d)
949 fm.end()
954 fm.end()
950
955
951 @command(b'perfphases',
956 @command(b'perfphases',
952 [(b'', b'full', False, b'include file reading time too'),
957 [(b'', b'full', False, b'include file reading time too'),
953 ], b"")
958 ], b"")
954 def perfphases(ui, repo, **opts):
959 def perfphases(ui, repo, **opts):
955 """benchmark phasesets computation"""
960 """benchmark phasesets computation"""
956 opts = _byteskwargs(opts)
961 opts = _byteskwargs(opts)
957 timer, fm = gettimer(ui, opts)
962 timer, fm = gettimer(ui, opts)
958 _phases = repo._phasecache
963 _phases = repo._phasecache
959 full = opts.get(b'full')
964 full = opts.get(b'full')
960 def d():
965 def d():
961 phases = _phases
966 phases = _phases
962 if full:
967 if full:
963 clearfilecache(repo, b'_phasecache')
968 clearfilecache(repo, b'_phasecache')
964 phases = repo._phasecache
969 phases = repo._phasecache
965 phases.invalidate()
970 phases.invalidate()
966 phases.loadphaserevs(repo)
971 phases.loadphaserevs(repo)
967 timer(d)
972 timer(d)
968 fm.end()
973 fm.end()
969
974
970 @command(b'perfphasesremote',
975 @command(b'perfphasesremote',
971 [], b"[DEST]")
976 [], b"[DEST]")
972 def perfphasesremote(ui, repo, dest=None, **opts):
977 def perfphasesremote(ui, repo, dest=None, **opts):
973 """benchmark time needed to analyse phases of the remote server"""
978 """benchmark time needed to analyse phases of the remote server"""
974 from mercurial.node import (
979 from mercurial.node import (
975 bin,
980 bin,
976 )
981 )
977 from mercurial import (
982 from mercurial import (
978 exchange,
983 exchange,
979 hg,
984 hg,
980 phases,
985 phases,
981 )
986 )
982 opts = _byteskwargs(opts)
987 opts = _byteskwargs(opts)
983 timer, fm = gettimer(ui, opts)
988 timer, fm = gettimer(ui, opts)
984
989
985 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
990 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
986 if not path:
991 if not path:
987 raise error.Abort((b'default repository not configured!'),
992 raise error.Abort((b'default repository not configured!'),
988 hint=(b"see 'hg help config.paths'"))
993 hint=(b"see 'hg help config.paths'"))
989 dest = path.pushloc or path.loc
994 dest = path.pushloc or path.loc
990 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
995 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
991 other = hg.peer(repo, opts, dest)
996 other = hg.peer(repo, opts, dest)
992
997
993 # easier to perform discovery through the operation
998 # easier to perform discovery through the operation
994 op = exchange.pushoperation(repo, other)
999 op = exchange.pushoperation(repo, other)
995 exchange._pushdiscoverychangeset(op)
1000 exchange._pushdiscoverychangeset(op)
996
1001
997 remotesubset = op.fallbackheads
1002 remotesubset = op.fallbackheads
998
1003
999 with other.commandexecutor() as e:
1004 with other.commandexecutor() as e:
1000 remotephases = e.callcommand(b'listkeys',
1005 remotephases = e.callcommand(b'listkeys',
1001 {b'namespace': b'phases'}).result()
1006 {b'namespace': b'phases'}).result()
1002 del other
1007 del other
1003 publishing = remotephases.get(b'publishing', False)
1008 publishing = remotephases.get(b'publishing', False)
1004 if publishing:
1009 if publishing:
1005 ui.status((b'publishing: yes\n'))
1010 ui.status((b'publishing: yes\n'))
1006 else:
1011 else:
1007 ui.status((b'publishing: no\n'))
1012 ui.status((b'publishing: no\n'))
1008
1013
1009 nodemap = repo.changelog.nodemap
1014 nodemap = repo.changelog.nodemap
1010 nonpublishroots = 0
1015 nonpublishroots = 0
1011 for nhex, phase in remotephases.iteritems():
1016 for nhex, phase in remotephases.iteritems():
1012 if nhex == b'publishing': # ignore data related to publish option
1017 if nhex == b'publishing': # ignore data related to publish option
1013 continue
1018 continue
1014 node = bin(nhex)
1019 node = bin(nhex)
1015 if node in nodemap and int(phase):
1020 if node in nodemap and int(phase):
1016 nonpublishroots += 1
1021 nonpublishroots += 1
1017 ui.status((b'number of roots: %d\n') % len(remotephases))
1022 ui.status((b'number of roots: %d\n') % len(remotephases))
1018 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1023 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1019 def d():
1024 def d():
1020 phases.remotephasessummary(repo,
1025 phases.remotephasessummary(repo,
1021 remotesubset,
1026 remotesubset,
1022 remotephases)
1027 remotephases)
1023 timer(d)
1028 timer(d)
1024 fm.end()
1029 fm.end()
1025
1030
1026 @command(b'perfmanifest',[
1031 @command(b'perfmanifest',[
1027 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1032 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1028 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1033 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1029 ] + formatteropts, b'REV|NODE')
1034 ] + formatteropts, b'REV|NODE')
1030 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1035 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1031 """benchmark the time to read a manifest from disk and return a usable
1036 """benchmark the time to read a manifest from disk and return a usable
1032 dict-like object
1037 dict-like object
1033
1038
1034 Manifest caches are cleared before retrieval."""
1039 Manifest caches are cleared before retrieval."""
1035 opts = _byteskwargs(opts)
1040 opts = _byteskwargs(opts)
1036 timer, fm = gettimer(ui, opts)
1041 timer, fm = gettimer(ui, opts)
1037 if not manifest_rev:
1042 if not manifest_rev:
1038 ctx = scmutil.revsingle(repo, rev, rev)
1043 ctx = scmutil.revsingle(repo, rev, rev)
1039 t = ctx.manifestnode()
1044 t = ctx.manifestnode()
1040 else:
1045 else:
1041 from mercurial.node import bin
1046 from mercurial.node import bin
1042
1047
1043 if len(rev) == 40:
1048 if len(rev) == 40:
1044 t = bin(rev)
1049 t = bin(rev)
1045 else:
1050 else:
1046 try:
1051 try:
1047 rev = int(rev)
1052 rev = int(rev)
1048
1053
1049 if util.safehasattr(repo.manifestlog, b'getstorage'):
1054 if util.safehasattr(repo.manifestlog, b'getstorage'):
1050 t = repo.manifestlog.getstorage(b'').node(rev)
1055 t = repo.manifestlog.getstorage(b'').node(rev)
1051 else:
1056 else:
1052 t = repo.manifestlog._revlog.lookup(rev)
1057 t = repo.manifestlog._revlog.lookup(rev)
1053 except ValueError:
1058 except ValueError:
1054 raise error.Abort(b'manifest revision must be integer or full '
1059 raise error.Abort(b'manifest revision must be integer or full '
1055 b'node')
1060 b'node')
1056 def d():
1061 def d():
1057 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1062 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1058 repo.manifestlog[t].read()
1063 repo.manifestlog[t].read()
1059 timer(d)
1064 timer(d)
1060 fm.end()
1065 fm.end()
1061
1066
1062 @command(b'perfchangeset', formatteropts)
1067 @command(b'perfchangeset', formatteropts)
1063 def perfchangeset(ui, repo, rev, **opts):
1068 def perfchangeset(ui, repo, rev, **opts):
1064 opts = _byteskwargs(opts)
1069 opts = _byteskwargs(opts)
1065 timer, fm = gettimer(ui, opts)
1070 timer, fm = gettimer(ui, opts)
1066 n = scmutil.revsingle(repo, rev).node()
1071 n = scmutil.revsingle(repo, rev).node()
1067 def d():
1072 def d():
1068 repo.changelog.read(n)
1073 repo.changelog.read(n)
1069 #repo.changelog._cache = None
1074 #repo.changelog._cache = None
1070 timer(d)
1075 timer(d)
1071 fm.end()
1076 fm.end()
1072
1077
1073 @command(b'perfignore', formatteropts)
1078 @command(b'perfignore', formatteropts)
1074 def perfignore(ui, repo, **opts):
1079 def perfignore(ui, repo, **opts):
1075 """benchmark operation related to computing ignore"""
1080 """benchmark operation related to computing ignore"""
1076 opts = _byteskwargs(opts)
1081 opts = _byteskwargs(opts)
1077 timer, fm = gettimer(ui, opts)
1082 timer, fm = gettimer(ui, opts)
1078 dirstate = repo.dirstate
1083 dirstate = repo.dirstate
1079
1084
1080 def setupone():
1085 def setupone():
1081 dirstate.invalidate()
1086 dirstate.invalidate()
1082 clearfilecache(dirstate, b'_ignore')
1087 clearfilecache(dirstate, b'_ignore')
1083
1088
1084 def runone():
1089 def runone():
1085 dirstate._ignore
1090 dirstate._ignore
1086
1091
1087 timer(runone, setup=setupone, title=b"load")
1092 timer(runone, setup=setupone, title=b"load")
1088 fm.end()
1093 fm.end()
1089
1094
1090 @command(b'perfindex', [
1095 @command(b'perfindex', [
1091 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1096 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1092 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1097 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1093 ] + formatteropts)
1098 ] + formatteropts)
1094 def perfindex(ui, repo, **opts):
1099 def perfindex(ui, repo, **opts):
1095 """benchmark index creation time followed by a lookup
1100 """benchmark index creation time followed by a lookup
1096
1101
1097 The default is to look `tip` up. Depending on the index implementation,
1102 The default is to look `tip` up. Depending on the index implementation,
1098 the revision looked up can matters. For example, an implementation
1103 the revision looked up can matters. For example, an implementation
1099 scanning the index will have a faster lookup time for `--rev tip` than for
1104 scanning the index will have a faster lookup time for `--rev tip` than for
1100 `--rev 0`. The number of looked up revisions and their order can also
1105 `--rev 0`. The number of looked up revisions and their order can also
1101 matters.
1106 matters.
1102
1107
1103 Example of useful set to test:
1108 Example of useful set to test:
1104 * tip
1109 * tip
1105 * 0
1110 * 0
1106 * -10:
1111 * -10:
1107 * :10
1112 * :10
1108 * -10: + :10
1113 * -10: + :10
1109 * :10: + -10:
1114 * :10: + -10:
1110 * -10000:
1115 * -10000:
1111 * -10000: + 0
1116 * -10000: + 0
1112
1117
1113 It is not currently possible to check for lookup of a missing node. For
1118 It is not currently possible to check for lookup of a missing node. For
1114 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1119 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1115 import mercurial.revlog
1120 import mercurial.revlog
1116 opts = _byteskwargs(opts)
1121 opts = _byteskwargs(opts)
1117 timer, fm = gettimer(ui, opts)
1122 timer, fm = gettimer(ui, opts)
1118 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1123 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1119 if opts[b'no_lookup']:
1124 if opts[b'no_lookup']:
1120 if opts['rev']:
1125 if opts['rev']:
1121 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1126 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1122 nodes = []
1127 nodes = []
1123 elif not opts[b'rev']:
1128 elif not opts[b'rev']:
1124 nodes = [repo[b"tip"].node()]
1129 nodes = [repo[b"tip"].node()]
1125 else:
1130 else:
1126 revs = scmutil.revrange(repo, opts[b'rev'])
1131 revs = scmutil.revrange(repo, opts[b'rev'])
1127 cl = repo.changelog
1132 cl = repo.changelog
1128 nodes = [cl.node(r) for r in revs]
1133 nodes = [cl.node(r) for r in revs]
1129
1134
1130 unfi = repo.unfiltered()
1135 unfi = repo.unfiltered()
1131 # find the filecache func directly
1136 # find the filecache func directly
1132 # This avoid polluting the benchmark with the filecache logic
1137 # This avoid polluting the benchmark with the filecache logic
1133 makecl = unfi.__class__.changelog.func
1138 makecl = unfi.__class__.changelog.func
1134 def setup():
1139 def setup():
1135 # probably not necessary, but for good measure
1140 # probably not necessary, but for good measure
1136 clearchangelog(unfi)
1141 clearchangelog(unfi)
1137 def d():
1142 def d():
1138 cl = makecl(unfi)
1143 cl = makecl(unfi)
1139 for n in nodes:
1144 for n in nodes:
1140 cl.rev(n)
1145 cl.rev(n)
1141 timer(d, setup=setup)
1146 timer(d, setup=setup)
1142 fm.end()
1147 fm.end()
1143
1148
1144 @command(b'perfnodemap', [
1149 @command(b'perfnodemap', [
1145 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1150 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1146 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1151 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1147 ] + formatteropts)
1152 ] + formatteropts)
1148 def perfnodemap(ui, repo, **opts):
1153 def perfnodemap(ui, repo, **opts):
1149 """benchmark the time necessary to look up revision from a cold nodemap
1154 """benchmark the time necessary to look up revision from a cold nodemap
1150
1155
1151 Depending on the implementation, the amount and order of revision we look
1156 Depending on the implementation, the amount and order of revision we look
1152 up can varies. Example of useful set to test:
1157 up can varies. Example of useful set to test:
1153 * tip
1158 * tip
1154 * 0
1159 * 0
1155 * -10:
1160 * -10:
1156 * :10
1161 * :10
1157 * -10: + :10
1162 * -10: + :10
1158 * :10: + -10:
1163 * :10: + -10:
1159 * -10000:
1164 * -10000:
1160 * -10000: + 0
1165 * -10000: + 0
1161
1166
1162 The command currently focus on valid binary lookup. Benchmarking for
1167 The command currently focus on valid binary lookup. Benchmarking for
1163 hexlookup, prefix lookup and missing lookup would also be valuable.
1168 hexlookup, prefix lookup and missing lookup would also be valuable.
1164 """
1169 """
1165 import mercurial.revlog
1170 import mercurial.revlog
1166 opts = _byteskwargs(opts)
1171 opts = _byteskwargs(opts)
1167 timer, fm = gettimer(ui, opts)
1172 timer, fm = gettimer(ui, opts)
1168 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1173 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1169
1174
1170 unfi = repo.unfiltered()
1175 unfi = repo.unfiltered()
1171 clearcaches = opts['clear_caches']
1176 clearcaches = opts['clear_caches']
1172 # find the filecache func directly
1177 # find the filecache func directly
1173 # This avoid polluting the benchmark with the filecache logic
1178 # This avoid polluting the benchmark with the filecache logic
1174 makecl = unfi.__class__.changelog.func
1179 makecl = unfi.__class__.changelog.func
1175 if not opts[b'rev']:
1180 if not opts[b'rev']:
1176 raise error.Abort('use --rev to specify revisions to look up')
1181 raise error.Abort('use --rev to specify revisions to look up')
1177 revs = scmutil.revrange(repo, opts[b'rev'])
1182 revs = scmutil.revrange(repo, opts[b'rev'])
1178 cl = repo.changelog
1183 cl = repo.changelog
1179 nodes = [cl.node(r) for r in revs]
1184 nodes = [cl.node(r) for r in revs]
1180
1185
1181 # use a list to pass reference to a nodemap from one closure to the next
1186 # use a list to pass reference to a nodemap from one closure to the next
1182 nodeget = [None]
1187 nodeget = [None]
1183 def setnodeget():
1188 def setnodeget():
1184 # probably not necessary, but for good measure
1189 # probably not necessary, but for good measure
1185 clearchangelog(unfi)
1190 clearchangelog(unfi)
1186 nodeget[0] = makecl(unfi).nodemap.get
1191 nodeget[0] = makecl(unfi).nodemap.get
1187
1192
1188 def d():
1193 def d():
1189 get = nodeget[0]
1194 get = nodeget[0]
1190 for n in nodes:
1195 for n in nodes:
1191 get(n)
1196 get(n)
1192
1197
1193 setup = None
1198 setup = None
1194 if clearcaches:
1199 if clearcaches:
1195 def setup():
1200 def setup():
1196 setnodeget()
1201 setnodeget()
1197 else:
1202 else:
1198 setnodeget()
1203 setnodeget()
1199 d() # prewarm the data structure
1204 d() # prewarm the data structure
1200 timer(d, setup=setup)
1205 timer(d, setup=setup)
1201 fm.end()
1206 fm.end()
1202
1207
1203 @command(b'perfstartup', formatteropts)
1208 @command(b'perfstartup', formatteropts)
1204 def perfstartup(ui, repo, **opts):
1209 def perfstartup(ui, repo, **opts):
1205 opts = _byteskwargs(opts)
1210 opts = _byteskwargs(opts)
1206 timer, fm = gettimer(ui, opts)
1211 timer, fm = gettimer(ui, opts)
1207 def d():
1212 def d():
1208 if os.name != r'nt':
1213 if os.name != r'nt':
1209 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1214 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1210 fsencode(sys.argv[0]))
1215 fsencode(sys.argv[0]))
1211 else:
1216 else:
1212 os.environ[r'HGRCPATH'] = r' '
1217 os.environ[r'HGRCPATH'] = r' '
1213 os.system(r"%s version -q > NUL" % sys.argv[0])
1218 os.system(r"%s version -q > NUL" % sys.argv[0])
1214 timer(d)
1219 timer(d)
1215 fm.end()
1220 fm.end()
1216
1221
1217 @command(b'perfparents', formatteropts)
1222 @command(b'perfparents', formatteropts)
1218 def perfparents(ui, repo, **opts):
1223 def perfparents(ui, repo, **opts):
1219 """benchmark the time necessary to fetch one changeset's parents.
1224 """benchmark the time necessary to fetch one changeset's parents.
1220
1225
1221 The fetch is done using the `node identifier`, traversing all object layers
1226 The fetch is done using the `node identifier`, traversing all object layers
1222 from the repository object. The first N revisions will be used for this
1227 from the repository object. The first N revisions will be used for this
1223 benchmark. N is controlled by the ``perf.parentscount`` config option
1228 benchmark. N is controlled by the ``perf.parentscount`` config option
1224 (default: 1000).
1229 (default: 1000).
1225 """
1230 """
1226 opts = _byteskwargs(opts)
1231 opts = _byteskwargs(opts)
1227 timer, fm = gettimer(ui, opts)
1232 timer, fm = gettimer(ui, opts)
1228 # control the number of commits perfparents iterates over
1233 # control the number of commits perfparents iterates over
1229 # experimental config: perf.parentscount
1234 # experimental config: perf.parentscount
1230 count = getint(ui, b"perf", b"parentscount", 1000)
1235 count = getint(ui, b"perf", b"parentscount", 1000)
1231 if len(repo.changelog) < count:
1236 if len(repo.changelog) < count:
1232 raise error.Abort(b"repo needs %d commits for this test" % count)
1237 raise error.Abort(b"repo needs %d commits for this test" % count)
1233 repo = repo.unfiltered()
1238 repo = repo.unfiltered()
1234 nl = [repo.changelog.node(i) for i in _xrange(count)]
1239 nl = [repo.changelog.node(i) for i in _xrange(count)]
1235 def d():
1240 def d():
1236 for n in nl:
1241 for n in nl:
1237 repo.changelog.parents(n)
1242 repo.changelog.parents(n)
1238 timer(d)
1243 timer(d)
1239 fm.end()
1244 fm.end()
1240
1245
1241 @command(b'perfctxfiles', formatteropts)
1246 @command(b'perfctxfiles', formatteropts)
1242 def perfctxfiles(ui, repo, x, **opts):
1247 def perfctxfiles(ui, repo, x, **opts):
1243 opts = _byteskwargs(opts)
1248 opts = _byteskwargs(opts)
1244 x = int(x)
1249 x = int(x)
1245 timer, fm = gettimer(ui, opts)
1250 timer, fm = gettimer(ui, opts)
1246 def d():
1251 def d():
1247 len(repo[x].files())
1252 len(repo[x].files())
1248 timer(d)
1253 timer(d)
1249 fm.end()
1254 fm.end()
1250
1255
1251 @command(b'perfrawfiles', formatteropts)
1256 @command(b'perfrawfiles', formatteropts)
1252 def perfrawfiles(ui, repo, x, **opts):
1257 def perfrawfiles(ui, repo, x, **opts):
1253 opts = _byteskwargs(opts)
1258 opts = _byteskwargs(opts)
1254 x = int(x)
1259 x = int(x)
1255 timer, fm = gettimer(ui, opts)
1260 timer, fm = gettimer(ui, opts)
1256 cl = repo.changelog
1261 cl = repo.changelog
1257 def d():
1262 def d():
1258 len(cl.read(x)[3])
1263 len(cl.read(x)[3])
1259 timer(d)
1264 timer(d)
1260 fm.end()
1265 fm.end()
1261
1266
1262 @command(b'perflookup', formatteropts)
1267 @command(b'perflookup', formatteropts)
1263 def perflookup(ui, repo, rev, **opts):
1268 def perflookup(ui, repo, rev, **opts):
1264 opts = _byteskwargs(opts)
1269 opts = _byteskwargs(opts)
1265 timer, fm = gettimer(ui, opts)
1270 timer, fm = gettimer(ui, opts)
1266 timer(lambda: len(repo.lookup(rev)))
1271 timer(lambda: len(repo.lookup(rev)))
1267 fm.end()
1272 fm.end()
1268
1273
1269 @command(b'perflinelogedits',
1274 @command(b'perflinelogedits',
1270 [(b'n', b'edits', 10000, b'number of edits'),
1275 [(b'n', b'edits', 10000, b'number of edits'),
1271 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1276 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1272 ], norepo=True)
1277 ], norepo=True)
1273 def perflinelogedits(ui, **opts):
1278 def perflinelogedits(ui, **opts):
1274 from mercurial import linelog
1279 from mercurial import linelog
1275
1280
1276 opts = _byteskwargs(opts)
1281 opts = _byteskwargs(opts)
1277
1282
1278 edits = opts[b'edits']
1283 edits = opts[b'edits']
1279 maxhunklines = opts[b'max_hunk_lines']
1284 maxhunklines = opts[b'max_hunk_lines']
1280
1285
1281 maxb1 = 100000
1286 maxb1 = 100000
1282 random.seed(0)
1287 random.seed(0)
1283 randint = random.randint
1288 randint = random.randint
1284 currentlines = 0
1289 currentlines = 0
1285 arglist = []
1290 arglist = []
1286 for rev in _xrange(edits):
1291 for rev in _xrange(edits):
1287 a1 = randint(0, currentlines)
1292 a1 = randint(0, currentlines)
1288 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1293 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1289 b1 = randint(0, maxb1)
1294 b1 = randint(0, maxb1)
1290 b2 = randint(b1, b1 + maxhunklines)
1295 b2 = randint(b1, b1 + maxhunklines)
1291 currentlines += (b2 - b1) - (a2 - a1)
1296 currentlines += (b2 - b1) - (a2 - a1)
1292 arglist.append((rev, a1, a2, b1, b2))
1297 arglist.append((rev, a1, a2, b1, b2))
1293
1298
1294 def d():
1299 def d():
1295 ll = linelog.linelog()
1300 ll = linelog.linelog()
1296 for args in arglist:
1301 for args in arglist:
1297 ll.replacelines(*args)
1302 ll.replacelines(*args)
1298
1303
1299 timer, fm = gettimer(ui, opts)
1304 timer, fm = gettimer(ui, opts)
1300 timer(d)
1305 timer(d)
1301 fm.end()
1306 fm.end()
1302
1307
1303 @command(b'perfrevrange', formatteropts)
1308 @command(b'perfrevrange', formatteropts)
1304 def perfrevrange(ui, repo, *specs, **opts):
1309 def perfrevrange(ui, repo, *specs, **opts):
1305 opts = _byteskwargs(opts)
1310 opts = _byteskwargs(opts)
1306 timer, fm = gettimer(ui, opts)
1311 timer, fm = gettimer(ui, opts)
1307 revrange = scmutil.revrange
1312 revrange = scmutil.revrange
1308 timer(lambda: len(revrange(repo, specs)))
1313 timer(lambda: len(revrange(repo, specs)))
1309 fm.end()
1314 fm.end()
1310
1315
1311 @command(b'perfnodelookup', formatteropts)
1316 @command(b'perfnodelookup', formatteropts)
1312 def perfnodelookup(ui, repo, rev, **opts):
1317 def perfnodelookup(ui, repo, rev, **opts):
1313 opts = _byteskwargs(opts)
1318 opts = _byteskwargs(opts)
1314 timer, fm = gettimer(ui, opts)
1319 timer, fm = gettimer(ui, opts)
1315 import mercurial.revlog
1320 import mercurial.revlog
1316 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1321 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1317 n = scmutil.revsingle(repo, rev).node()
1322 n = scmutil.revsingle(repo, rev).node()
1318 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1323 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1319 def d():
1324 def d():
1320 cl.rev(n)
1325 cl.rev(n)
1321 clearcaches(cl)
1326 clearcaches(cl)
1322 timer(d)
1327 timer(d)
1323 fm.end()
1328 fm.end()
1324
1329
1325 @command(b'perflog',
1330 @command(b'perflog',
1326 [(b'', b'rename', False, b'ask log to follow renames')
1331 [(b'', b'rename', False, b'ask log to follow renames')
1327 ] + formatteropts)
1332 ] + formatteropts)
1328 def perflog(ui, repo, rev=None, **opts):
1333 def perflog(ui, repo, rev=None, **opts):
1329 opts = _byteskwargs(opts)
1334 opts = _byteskwargs(opts)
1330 if rev is None:
1335 if rev is None:
1331 rev=[]
1336 rev=[]
1332 timer, fm = gettimer(ui, opts)
1337 timer, fm = gettimer(ui, opts)
1333 ui.pushbuffer()
1338 ui.pushbuffer()
1334 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1339 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1335 copies=opts.get(b'rename')))
1340 copies=opts.get(b'rename')))
1336 ui.popbuffer()
1341 ui.popbuffer()
1337 fm.end()
1342 fm.end()
1338
1343
1339 @command(b'perfmoonwalk', formatteropts)
1344 @command(b'perfmoonwalk', formatteropts)
1340 def perfmoonwalk(ui, repo, **opts):
1345 def perfmoonwalk(ui, repo, **opts):
1341 """benchmark walking the changelog backwards
1346 """benchmark walking the changelog backwards
1342
1347
1343 This also loads the changelog data for each revision in the changelog.
1348 This also loads the changelog data for each revision in the changelog.
1344 """
1349 """
1345 opts = _byteskwargs(opts)
1350 opts = _byteskwargs(opts)
1346 timer, fm = gettimer(ui, opts)
1351 timer, fm = gettimer(ui, opts)
1347 def moonwalk():
1352 def moonwalk():
1348 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1353 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1349 ctx = repo[i]
1354 ctx = repo[i]
1350 ctx.branch() # read changelog data (in addition to the index)
1355 ctx.branch() # read changelog data (in addition to the index)
1351 timer(moonwalk)
1356 timer(moonwalk)
1352 fm.end()
1357 fm.end()
1353
1358
1354 @command(b'perftemplating',
1359 @command(b'perftemplating',
1355 [(b'r', b'rev', [], b'revisions to run the template on'),
1360 [(b'r', b'rev', [], b'revisions to run the template on'),
1356 ] + formatteropts)
1361 ] + formatteropts)
1357 def perftemplating(ui, repo, testedtemplate=None, **opts):
1362 def perftemplating(ui, repo, testedtemplate=None, **opts):
1358 """test the rendering time of a given template"""
1363 """test the rendering time of a given template"""
1359 if makelogtemplater is None:
1364 if makelogtemplater is None:
1360 raise error.Abort((b"perftemplating not available with this Mercurial"),
1365 raise error.Abort((b"perftemplating not available with this Mercurial"),
1361 hint=b"use 4.3 or later")
1366 hint=b"use 4.3 or later")
1362
1367
1363 opts = _byteskwargs(opts)
1368 opts = _byteskwargs(opts)
1364
1369
1365 nullui = ui.copy()
1370 nullui = ui.copy()
1366 nullui.fout = open(os.devnull, r'wb')
1371 nullui.fout = open(os.devnull, r'wb')
1367 nullui.disablepager()
1372 nullui.disablepager()
1368 revs = opts.get(b'rev')
1373 revs = opts.get(b'rev')
1369 if not revs:
1374 if not revs:
1370 revs = [b'all()']
1375 revs = [b'all()']
1371 revs = list(scmutil.revrange(repo, revs))
1376 revs = list(scmutil.revrange(repo, revs))
1372
1377
1373 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1378 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1374 b' {author|person}: {desc|firstline}\n')
1379 b' {author|person}: {desc|firstline}\n')
1375 if testedtemplate is None:
1380 if testedtemplate is None:
1376 testedtemplate = defaulttemplate
1381 testedtemplate = defaulttemplate
1377 displayer = makelogtemplater(nullui, repo, testedtemplate)
1382 displayer = makelogtemplater(nullui, repo, testedtemplate)
1378 def format():
1383 def format():
1379 for r in revs:
1384 for r in revs:
1380 ctx = repo[r]
1385 ctx = repo[r]
1381 displayer.show(ctx)
1386 displayer.show(ctx)
1382 displayer.flush(ctx)
1387 displayer.flush(ctx)
1383
1388
1384 timer, fm = gettimer(ui, opts)
1389 timer, fm = gettimer(ui, opts)
1385 timer(format)
1390 timer(format)
1386 fm.end()
1391 fm.end()
1387
1392
1388 @command(b'perfhelper-pathcopies', formatteropts +
1393 @command(b'perfhelper-pathcopies', formatteropts +
1389 [
1394 [
1390 (b'r', b'revs', [], b'restrict search to these revisions'),
1395 (b'r', b'revs', [], b'restrict search to these revisions'),
1391 (b'', b'timing', False, b'provides extra data (costly)'),
1396 (b'', b'timing', False, b'provides extra data (costly)'),
1392 ])
1397 ])
1393 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1398 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1394 """find statistic about potential parameters for the `perftracecopies`
1399 """find statistic about potential parameters for the `perftracecopies`
1395
1400
1396 This command find source-destination pair relevant for copytracing testing.
1401 This command find source-destination pair relevant for copytracing testing.
1397 It report value for some of the parameters that impact copy tracing time.
1402 It report value for some of the parameters that impact copy tracing time.
1398
1403
1399 If `--timing` is set, rename detection is run and the associated timing
1404 If `--timing` is set, rename detection is run and the associated timing
1400 will be reported. The extra details comes at the cost of a slower command
1405 will be reported. The extra details comes at the cost of a slower command
1401 execution.
1406 execution.
1402
1407
1403 Since the rename detection is only run once, other factors might easily
1408 Since the rename detection is only run once, other factors might easily
1404 affect the precision of the timing. However it should give a good
1409 affect the precision of the timing. However it should give a good
1405 approximation of which revision pairs are very costly.
1410 approximation of which revision pairs are very costly.
1406 """
1411 """
1407 opts = _byteskwargs(opts)
1412 opts = _byteskwargs(opts)
1408 fm = ui.formatter(b'perf', opts)
1413 fm = ui.formatter(b'perf', opts)
1409 dotiming = opts[b'timing']
1414 dotiming = opts[b'timing']
1410
1415
1411 if dotiming:
1416 if dotiming:
1412 header = '%12s %12s %12s %12s %12s %12s\n'
1417 header = '%12s %12s %12s %12s %12s %12s\n'
1413 output = ("%(source)12s %(destination)12s "
1418 output = ("%(source)12s %(destination)12s "
1414 "%(nbrevs)12d %(nbmissingfiles)12d "
1419 "%(nbrevs)12d %(nbmissingfiles)12d "
1415 "%(nbrenamedfiles)12d %(time)18.5f\n")
1420 "%(nbrenamedfiles)12d %(time)18.5f\n")
1416 header_names = ("source", "destination", "nb-revs", "nb-files",
1421 header_names = ("source", "destination", "nb-revs", "nb-files",
1417 "nb-renames", "time")
1422 "nb-renames", "time")
1418 fm.plain(header % header_names)
1423 fm.plain(header % header_names)
1419 else:
1424 else:
1420 header = '%12s %12s %12s %12s\n'
1425 header = '%12s %12s %12s %12s\n'
1421 output = ("%(source)12s %(destination)12s "
1426 output = ("%(source)12s %(destination)12s "
1422 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1427 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1423 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1428 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1424
1429
1425 if not revs:
1430 if not revs:
1426 revs = ['all()']
1431 revs = ['all()']
1427 revs = scmutil.revrange(repo, revs)
1432 revs = scmutil.revrange(repo, revs)
1428
1433
1429 roi = repo.revs('merge() and %ld', revs)
1434 roi = repo.revs('merge() and %ld', revs)
1430 for r in roi:
1435 for r in roi:
1431 ctx = repo[r]
1436 ctx = repo[r]
1432 p1 = ctx.p1().rev()
1437 p1 = ctx.p1().rev()
1433 p2 = ctx.p2().rev()
1438 p2 = ctx.p2().rev()
1434 bases = repo.changelog._commonancestorsheads(p1, p2)
1439 bases = repo.changelog._commonancestorsheads(p1, p2)
1435 for p in (p1, p2):
1440 for p in (p1, p2):
1436 for b in bases:
1441 for b in bases:
1437 base = repo[b]
1442 base = repo[b]
1438 parent = repo[p]
1443 parent = repo[p]
1439 missing = copies._computeforwardmissing(base, parent)
1444 missing = copies._computeforwardmissing(base, parent)
1440 if not missing:
1445 if not missing:
1441 continue
1446 continue
1442 data = {
1447 data = {
1443 b'source': base.hex(),
1448 b'source': base.hex(),
1444 b'destination': parent.hex(),
1449 b'destination': parent.hex(),
1445 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1450 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1446 b'nbmissingfiles': len(missing),
1451 b'nbmissingfiles': len(missing),
1447 }
1452 }
1448 if dotiming:
1453 if dotiming:
1449 begin = util.timer()
1454 begin = util.timer()
1450 renames = copies.pathcopies(base, parent)
1455 renames = copies.pathcopies(base, parent)
1451 end = util.timer()
1456 end = util.timer()
1452 # not very stable timing since we did only one run
1457 # not very stable timing since we did only one run
1453 data['time'] = end - begin
1458 data['time'] = end - begin
1454 data['nbrenamedfiles'] = len(renames)
1459 data['nbrenamedfiles'] = len(renames)
1455 fm.startitem()
1460 fm.startitem()
1456 fm.data(**data)
1461 fm.data(**data)
1457 out = data.copy()
1462 out = data.copy()
1458 out['source'] = fm.hexfunc(base.node())
1463 out['source'] = fm.hexfunc(base.node())
1459 out['destination'] = fm.hexfunc(parent.node())
1464 out['destination'] = fm.hexfunc(parent.node())
1460 fm.plain(output % out)
1465 fm.plain(output % out)
1461
1466
1462 fm.end()
1467 fm.end()
1463
1468
1464 @command(b'perfcca', formatteropts)
1469 @command(b'perfcca', formatteropts)
1465 def perfcca(ui, repo, **opts):
1470 def perfcca(ui, repo, **opts):
1466 opts = _byteskwargs(opts)
1471 opts = _byteskwargs(opts)
1467 timer, fm = gettimer(ui, opts)
1472 timer, fm = gettimer(ui, opts)
1468 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1473 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1469 fm.end()
1474 fm.end()
1470
1475
1471 @command(b'perffncacheload', formatteropts)
1476 @command(b'perffncacheload', formatteropts)
1472 def perffncacheload(ui, repo, **opts):
1477 def perffncacheload(ui, repo, **opts):
1473 opts = _byteskwargs(opts)
1478 opts = _byteskwargs(opts)
1474 timer, fm = gettimer(ui, opts)
1479 timer, fm = gettimer(ui, opts)
1475 s = repo.store
1480 s = repo.store
1476 def d():
1481 def d():
1477 s.fncache._load()
1482 s.fncache._load()
1478 timer(d)
1483 timer(d)
1479 fm.end()
1484 fm.end()
1480
1485
1481 @command(b'perffncachewrite', formatteropts)
1486 @command(b'perffncachewrite', formatteropts)
1482 def perffncachewrite(ui, repo, **opts):
1487 def perffncachewrite(ui, repo, **opts):
1483 opts = _byteskwargs(opts)
1488 opts = _byteskwargs(opts)
1484 timer, fm = gettimer(ui, opts)
1489 timer, fm = gettimer(ui, opts)
1485 s = repo.store
1490 s = repo.store
1486 lock = repo.lock()
1491 lock = repo.lock()
1487 s.fncache._load()
1492 s.fncache._load()
1488 tr = repo.transaction(b'perffncachewrite')
1493 tr = repo.transaction(b'perffncachewrite')
1489 tr.addbackup(b'fncache')
1494 tr.addbackup(b'fncache')
1490 def d():
1495 def d():
1491 s.fncache._dirty = True
1496 s.fncache._dirty = True
1492 s.fncache.write(tr)
1497 s.fncache.write(tr)
1493 timer(d)
1498 timer(d)
1494 tr.close()
1499 tr.close()
1495 lock.release()
1500 lock.release()
1496 fm.end()
1501 fm.end()
1497
1502
1498 @command(b'perffncacheencode', formatteropts)
1503 @command(b'perffncacheencode', formatteropts)
1499 def perffncacheencode(ui, repo, **opts):
1504 def perffncacheencode(ui, repo, **opts):
1500 opts = _byteskwargs(opts)
1505 opts = _byteskwargs(opts)
1501 timer, fm = gettimer(ui, opts)
1506 timer, fm = gettimer(ui, opts)
1502 s = repo.store
1507 s = repo.store
1503 s.fncache._load()
1508 s.fncache._load()
1504 def d():
1509 def d():
1505 for p in s.fncache.entries:
1510 for p in s.fncache.entries:
1506 s.encode(p)
1511 s.encode(p)
1507 timer(d)
1512 timer(d)
1508 fm.end()
1513 fm.end()
1509
1514
1510 def _bdiffworker(q, blocks, xdiff, ready, done):
1515 def _bdiffworker(q, blocks, xdiff, ready, done):
1511 while not done.is_set():
1516 while not done.is_set():
1512 pair = q.get()
1517 pair = q.get()
1513 while pair is not None:
1518 while pair is not None:
1514 if xdiff:
1519 if xdiff:
1515 mdiff.bdiff.xdiffblocks(*pair)
1520 mdiff.bdiff.xdiffblocks(*pair)
1516 elif blocks:
1521 elif blocks:
1517 mdiff.bdiff.blocks(*pair)
1522 mdiff.bdiff.blocks(*pair)
1518 else:
1523 else:
1519 mdiff.textdiff(*pair)
1524 mdiff.textdiff(*pair)
1520 q.task_done()
1525 q.task_done()
1521 pair = q.get()
1526 pair = q.get()
1522 q.task_done() # for the None one
1527 q.task_done() # for the None one
1523 with ready:
1528 with ready:
1524 ready.wait()
1529 ready.wait()
1525
1530
1526 def _manifestrevision(repo, mnode):
1531 def _manifestrevision(repo, mnode):
1527 ml = repo.manifestlog
1532 ml = repo.manifestlog
1528
1533
1529 if util.safehasattr(ml, b'getstorage'):
1534 if util.safehasattr(ml, b'getstorage'):
1530 store = ml.getstorage(b'')
1535 store = ml.getstorage(b'')
1531 else:
1536 else:
1532 store = ml._revlog
1537 store = ml._revlog
1533
1538
1534 return store.revision(mnode)
1539 return store.revision(mnode)
1535
1540
1536 @command(b'perfbdiff', revlogopts + formatteropts + [
1541 @command(b'perfbdiff', revlogopts + formatteropts + [
1537 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1542 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1538 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1543 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1539 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1544 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1540 (b'', b'blocks', False, b'test computing diffs into blocks'),
1545 (b'', b'blocks', False, b'test computing diffs into blocks'),
1541 (b'', b'xdiff', False, b'use xdiff algorithm'),
1546 (b'', b'xdiff', False, b'use xdiff algorithm'),
1542 ],
1547 ],
1543
1548
1544 b'-c|-m|FILE REV')
1549 b'-c|-m|FILE REV')
1545 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1550 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1546 """benchmark a bdiff between revisions
1551 """benchmark a bdiff between revisions
1547
1552
1548 By default, benchmark a bdiff between its delta parent and itself.
1553 By default, benchmark a bdiff between its delta parent and itself.
1549
1554
1550 With ``--count``, benchmark bdiffs between delta parents and self for N
1555 With ``--count``, benchmark bdiffs between delta parents and self for N
1551 revisions starting at the specified revision.
1556 revisions starting at the specified revision.
1552
1557
1553 With ``--alldata``, assume the requested revision is a changeset and
1558 With ``--alldata``, assume the requested revision is a changeset and
1554 measure bdiffs for all changes related to that changeset (manifest
1559 measure bdiffs for all changes related to that changeset (manifest
1555 and filelogs).
1560 and filelogs).
1556 """
1561 """
1557 opts = _byteskwargs(opts)
1562 opts = _byteskwargs(opts)
1558
1563
1559 if opts[b'xdiff'] and not opts[b'blocks']:
1564 if opts[b'xdiff'] and not opts[b'blocks']:
1560 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1565 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1561
1566
1562 if opts[b'alldata']:
1567 if opts[b'alldata']:
1563 opts[b'changelog'] = True
1568 opts[b'changelog'] = True
1564
1569
1565 if opts.get(b'changelog') or opts.get(b'manifest'):
1570 if opts.get(b'changelog') or opts.get(b'manifest'):
1566 file_, rev = None, file_
1571 file_, rev = None, file_
1567 elif rev is None:
1572 elif rev is None:
1568 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1573 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1569
1574
1570 blocks = opts[b'blocks']
1575 blocks = opts[b'blocks']
1571 xdiff = opts[b'xdiff']
1576 xdiff = opts[b'xdiff']
1572 textpairs = []
1577 textpairs = []
1573
1578
1574 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1579 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1575
1580
1576 startrev = r.rev(r.lookup(rev))
1581 startrev = r.rev(r.lookup(rev))
1577 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1582 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1578 if opts[b'alldata']:
1583 if opts[b'alldata']:
1579 # Load revisions associated with changeset.
1584 # Load revisions associated with changeset.
1580 ctx = repo[rev]
1585 ctx = repo[rev]
1581 mtext = _manifestrevision(repo, ctx.manifestnode())
1586 mtext = _manifestrevision(repo, ctx.manifestnode())
1582 for pctx in ctx.parents():
1587 for pctx in ctx.parents():
1583 pman = _manifestrevision(repo, pctx.manifestnode())
1588 pman = _manifestrevision(repo, pctx.manifestnode())
1584 textpairs.append((pman, mtext))
1589 textpairs.append((pman, mtext))
1585
1590
1586 # Load filelog revisions by iterating manifest delta.
1591 # Load filelog revisions by iterating manifest delta.
1587 man = ctx.manifest()
1592 man = ctx.manifest()
1588 pman = ctx.p1().manifest()
1593 pman = ctx.p1().manifest()
1589 for filename, change in pman.diff(man).items():
1594 for filename, change in pman.diff(man).items():
1590 fctx = repo.file(filename)
1595 fctx = repo.file(filename)
1591 f1 = fctx.revision(change[0][0] or -1)
1596 f1 = fctx.revision(change[0][0] or -1)
1592 f2 = fctx.revision(change[1][0] or -1)
1597 f2 = fctx.revision(change[1][0] or -1)
1593 textpairs.append((f1, f2))
1598 textpairs.append((f1, f2))
1594 else:
1599 else:
1595 dp = r.deltaparent(rev)
1600 dp = r.deltaparent(rev)
1596 textpairs.append((r.revision(dp), r.revision(rev)))
1601 textpairs.append((r.revision(dp), r.revision(rev)))
1597
1602
1598 withthreads = threads > 0
1603 withthreads = threads > 0
1599 if not withthreads:
1604 if not withthreads:
1600 def d():
1605 def d():
1601 for pair in textpairs:
1606 for pair in textpairs:
1602 if xdiff:
1607 if xdiff:
1603 mdiff.bdiff.xdiffblocks(*pair)
1608 mdiff.bdiff.xdiffblocks(*pair)
1604 elif blocks:
1609 elif blocks:
1605 mdiff.bdiff.blocks(*pair)
1610 mdiff.bdiff.blocks(*pair)
1606 else:
1611 else:
1607 mdiff.textdiff(*pair)
1612 mdiff.textdiff(*pair)
1608 else:
1613 else:
1609 q = queue()
1614 q = queue()
1610 for i in _xrange(threads):
1615 for i in _xrange(threads):
1611 q.put(None)
1616 q.put(None)
1612 ready = threading.Condition()
1617 ready = threading.Condition()
1613 done = threading.Event()
1618 done = threading.Event()
1614 for i in _xrange(threads):
1619 for i in _xrange(threads):
1615 threading.Thread(target=_bdiffworker,
1620 threading.Thread(target=_bdiffworker,
1616 args=(q, blocks, xdiff, ready, done)).start()
1621 args=(q, blocks, xdiff, ready, done)).start()
1617 q.join()
1622 q.join()
1618 def d():
1623 def d():
1619 for pair in textpairs:
1624 for pair in textpairs:
1620 q.put(pair)
1625 q.put(pair)
1621 for i in _xrange(threads):
1626 for i in _xrange(threads):
1622 q.put(None)
1627 q.put(None)
1623 with ready:
1628 with ready:
1624 ready.notify_all()
1629 ready.notify_all()
1625 q.join()
1630 q.join()
1626 timer, fm = gettimer(ui, opts)
1631 timer, fm = gettimer(ui, opts)
1627 timer(d)
1632 timer(d)
1628 fm.end()
1633 fm.end()
1629
1634
1630 if withthreads:
1635 if withthreads:
1631 done.set()
1636 done.set()
1632 for i in _xrange(threads):
1637 for i in _xrange(threads):
1633 q.put(None)
1638 q.put(None)
1634 with ready:
1639 with ready:
1635 ready.notify_all()
1640 ready.notify_all()
1636
1641
1637 @command(b'perfunidiff', revlogopts + formatteropts + [
1642 @command(b'perfunidiff', revlogopts + formatteropts + [
1638 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1643 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1639 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1644 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1640 ], b'-c|-m|FILE REV')
1645 ], b'-c|-m|FILE REV')
1641 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1646 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1642 """benchmark a unified diff between revisions
1647 """benchmark a unified diff between revisions
1643
1648
1644 This doesn't include any copy tracing - it's just a unified diff
1649 This doesn't include any copy tracing - it's just a unified diff
1645 of the texts.
1650 of the texts.
1646
1651
1647 By default, benchmark a diff between its delta parent and itself.
1652 By default, benchmark a diff between its delta parent and itself.
1648
1653
1649 With ``--count``, benchmark diffs between delta parents and self for N
1654 With ``--count``, benchmark diffs between delta parents and self for N
1650 revisions starting at the specified revision.
1655 revisions starting at the specified revision.
1651
1656
1652 With ``--alldata``, assume the requested revision is a changeset and
1657 With ``--alldata``, assume the requested revision is a changeset and
1653 measure diffs for all changes related to that changeset (manifest
1658 measure diffs for all changes related to that changeset (manifest
1654 and filelogs).
1659 and filelogs).
1655 """
1660 """
1656 opts = _byteskwargs(opts)
1661 opts = _byteskwargs(opts)
1657 if opts[b'alldata']:
1662 if opts[b'alldata']:
1658 opts[b'changelog'] = True
1663 opts[b'changelog'] = True
1659
1664
1660 if opts.get(b'changelog') or opts.get(b'manifest'):
1665 if opts.get(b'changelog') or opts.get(b'manifest'):
1661 file_, rev = None, file_
1666 file_, rev = None, file_
1662 elif rev is None:
1667 elif rev is None:
1663 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1668 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1664
1669
1665 textpairs = []
1670 textpairs = []
1666
1671
1667 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1672 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1668
1673
1669 startrev = r.rev(r.lookup(rev))
1674 startrev = r.rev(r.lookup(rev))
1670 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1675 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1671 if opts[b'alldata']:
1676 if opts[b'alldata']:
1672 # Load revisions associated with changeset.
1677 # Load revisions associated with changeset.
1673 ctx = repo[rev]
1678 ctx = repo[rev]
1674 mtext = _manifestrevision(repo, ctx.manifestnode())
1679 mtext = _manifestrevision(repo, ctx.manifestnode())
1675 for pctx in ctx.parents():
1680 for pctx in ctx.parents():
1676 pman = _manifestrevision(repo, pctx.manifestnode())
1681 pman = _manifestrevision(repo, pctx.manifestnode())
1677 textpairs.append((pman, mtext))
1682 textpairs.append((pman, mtext))
1678
1683
1679 # Load filelog revisions by iterating manifest delta.
1684 # Load filelog revisions by iterating manifest delta.
1680 man = ctx.manifest()
1685 man = ctx.manifest()
1681 pman = ctx.p1().manifest()
1686 pman = ctx.p1().manifest()
1682 for filename, change in pman.diff(man).items():
1687 for filename, change in pman.diff(man).items():
1683 fctx = repo.file(filename)
1688 fctx = repo.file(filename)
1684 f1 = fctx.revision(change[0][0] or -1)
1689 f1 = fctx.revision(change[0][0] or -1)
1685 f2 = fctx.revision(change[1][0] or -1)
1690 f2 = fctx.revision(change[1][0] or -1)
1686 textpairs.append((f1, f2))
1691 textpairs.append((f1, f2))
1687 else:
1692 else:
1688 dp = r.deltaparent(rev)
1693 dp = r.deltaparent(rev)
1689 textpairs.append((r.revision(dp), r.revision(rev)))
1694 textpairs.append((r.revision(dp), r.revision(rev)))
1690
1695
1691 def d():
1696 def d():
1692 for left, right in textpairs:
1697 for left, right in textpairs:
1693 # The date strings don't matter, so we pass empty strings.
1698 # The date strings don't matter, so we pass empty strings.
1694 headerlines, hunks = mdiff.unidiff(
1699 headerlines, hunks = mdiff.unidiff(
1695 left, b'', right, b'', b'left', b'right', binary=False)
1700 left, b'', right, b'', b'left', b'right', binary=False)
1696 # consume iterators in roughly the way patch.py does
1701 # consume iterators in roughly the way patch.py does
1697 b'\n'.join(headerlines)
1702 b'\n'.join(headerlines)
1698 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1703 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1699 timer, fm = gettimer(ui, opts)
1704 timer, fm = gettimer(ui, opts)
1700 timer(d)
1705 timer(d)
1701 fm.end()
1706 fm.end()
1702
1707
1703 @command(b'perfdiffwd', formatteropts)
1708 @command(b'perfdiffwd', formatteropts)
1704 def perfdiffwd(ui, repo, **opts):
1709 def perfdiffwd(ui, repo, **opts):
1705 """Profile diff of working directory changes"""
1710 """Profile diff of working directory changes"""
1706 opts = _byteskwargs(opts)
1711 opts = _byteskwargs(opts)
1707 timer, fm = gettimer(ui, opts)
1712 timer, fm = gettimer(ui, opts)
1708 options = {
1713 options = {
1709 'w': 'ignore_all_space',
1714 'w': 'ignore_all_space',
1710 'b': 'ignore_space_change',
1715 'b': 'ignore_space_change',
1711 'B': 'ignore_blank_lines',
1716 'B': 'ignore_blank_lines',
1712 }
1717 }
1713
1718
1714 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1719 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1715 opts = dict((options[c], b'1') for c in diffopt)
1720 opts = dict((options[c], b'1') for c in diffopt)
1716 def d():
1721 def d():
1717 ui.pushbuffer()
1722 ui.pushbuffer()
1718 commands.diff(ui, repo, **opts)
1723 commands.diff(ui, repo, **opts)
1719 ui.popbuffer()
1724 ui.popbuffer()
1720 diffopt = diffopt.encode('ascii')
1725 diffopt = diffopt.encode('ascii')
1721 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1726 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1722 timer(d, title=title)
1727 timer(d, title=title)
1723 fm.end()
1728 fm.end()
1724
1729
1725 @command(b'perfrevlogindex', revlogopts + formatteropts,
1730 @command(b'perfrevlogindex', revlogopts + formatteropts,
1726 b'-c|-m|FILE')
1731 b'-c|-m|FILE')
1727 def perfrevlogindex(ui, repo, file_=None, **opts):
1732 def perfrevlogindex(ui, repo, file_=None, **opts):
1728 """Benchmark operations against a revlog index.
1733 """Benchmark operations against a revlog index.
1729
1734
1730 This tests constructing a revlog instance, reading index data,
1735 This tests constructing a revlog instance, reading index data,
1731 parsing index data, and performing various operations related to
1736 parsing index data, and performing various operations related to
1732 index data.
1737 index data.
1733 """
1738 """
1734
1739
1735 opts = _byteskwargs(opts)
1740 opts = _byteskwargs(opts)
1736
1741
1737 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1742 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1738
1743
1739 opener = getattr(rl, 'opener') # trick linter
1744 opener = getattr(rl, 'opener') # trick linter
1740 indexfile = rl.indexfile
1745 indexfile = rl.indexfile
1741 data = opener.read(indexfile)
1746 data = opener.read(indexfile)
1742
1747
1743 header = struct.unpack(b'>I', data[0:4])[0]
1748 header = struct.unpack(b'>I', data[0:4])[0]
1744 version = header & 0xFFFF
1749 version = header & 0xFFFF
1745 if version == 1:
1750 if version == 1:
1746 revlogio = revlog.revlogio()
1751 revlogio = revlog.revlogio()
1747 inline = header & (1 << 16)
1752 inline = header & (1 << 16)
1748 else:
1753 else:
1749 raise error.Abort((b'unsupported revlog version: %d') % version)
1754 raise error.Abort((b'unsupported revlog version: %d') % version)
1750
1755
1751 rllen = len(rl)
1756 rllen = len(rl)
1752
1757
1753 node0 = rl.node(0)
1758 node0 = rl.node(0)
1754 node25 = rl.node(rllen // 4)
1759 node25 = rl.node(rllen // 4)
1755 node50 = rl.node(rllen // 2)
1760 node50 = rl.node(rllen // 2)
1756 node75 = rl.node(rllen // 4 * 3)
1761 node75 = rl.node(rllen // 4 * 3)
1757 node100 = rl.node(rllen - 1)
1762 node100 = rl.node(rllen - 1)
1758
1763
1759 allrevs = range(rllen)
1764 allrevs = range(rllen)
1760 allrevsrev = list(reversed(allrevs))
1765 allrevsrev = list(reversed(allrevs))
1761 allnodes = [rl.node(rev) for rev in range(rllen)]
1766 allnodes = [rl.node(rev) for rev in range(rllen)]
1762 allnodesrev = list(reversed(allnodes))
1767 allnodesrev = list(reversed(allnodes))
1763
1768
1764 def constructor():
1769 def constructor():
1765 revlog.revlog(opener, indexfile)
1770 revlog.revlog(opener, indexfile)
1766
1771
1767 def read():
1772 def read():
1768 with opener(indexfile) as fh:
1773 with opener(indexfile) as fh:
1769 fh.read()
1774 fh.read()
1770
1775
1771 def parseindex():
1776 def parseindex():
1772 revlogio.parseindex(data, inline)
1777 revlogio.parseindex(data, inline)
1773
1778
1774 def getentry(revornode):
1779 def getentry(revornode):
1775 index = revlogio.parseindex(data, inline)[0]
1780 index = revlogio.parseindex(data, inline)[0]
1776 index[revornode]
1781 index[revornode]
1777
1782
1778 def getentries(revs, count=1):
1783 def getentries(revs, count=1):
1779 index = revlogio.parseindex(data, inline)[0]
1784 index = revlogio.parseindex(data, inline)[0]
1780
1785
1781 for i in range(count):
1786 for i in range(count):
1782 for rev in revs:
1787 for rev in revs:
1783 index[rev]
1788 index[rev]
1784
1789
1785 def resolvenode(node):
1790 def resolvenode(node):
1786 nodemap = revlogio.parseindex(data, inline)[1]
1791 nodemap = revlogio.parseindex(data, inline)[1]
1787 # This only works for the C code.
1792 # This only works for the C code.
1788 if nodemap is None:
1793 if nodemap is None:
1789 return
1794 return
1790
1795
1791 try:
1796 try:
1792 nodemap[node]
1797 nodemap[node]
1793 except error.RevlogError:
1798 except error.RevlogError:
1794 pass
1799 pass
1795
1800
1796 def resolvenodes(nodes, count=1):
1801 def resolvenodes(nodes, count=1):
1797 nodemap = revlogio.parseindex(data, inline)[1]
1802 nodemap = revlogio.parseindex(data, inline)[1]
1798 if nodemap is None:
1803 if nodemap is None:
1799 return
1804 return
1800
1805
1801 for i in range(count):
1806 for i in range(count):
1802 for node in nodes:
1807 for node in nodes:
1803 try:
1808 try:
1804 nodemap[node]
1809 nodemap[node]
1805 except error.RevlogError:
1810 except error.RevlogError:
1806 pass
1811 pass
1807
1812
1808 benches = [
1813 benches = [
1809 (constructor, b'revlog constructor'),
1814 (constructor, b'revlog constructor'),
1810 (read, b'read'),
1815 (read, b'read'),
1811 (parseindex, b'create index object'),
1816 (parseindex, b'create index object'),
1812 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1817 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1813 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1818 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1814 (lambda: resolvenode(node0), b'look up node at rev 0'),
1819 (lambda: resolvenode(node0), b'look up node at rev 0'),
1815 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1820 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1816 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1821 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1817 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1822 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1818 (lambda: resolvenode(node100), b'look up node at tip'),
1823 (lambda: resolvenode(node100), b'look up node at tip'),
1819 # 2x variation is to measure caching impact.
1824 # 2x variation is to measure caching impact.
1820 (lambda: resolvenodes(allnodes),
1825 (lambda: resolvenodes(allnodes),
1821 b'look up all nodes (forward)'),
1826 b'look up all nodes (forward)'),
1822 (lambda: resolvenodes(allnodes, 2),
1827 (lambda: resolvenodes(allnodes, 2),
1823 b'look up all nodes 2x (forward)'),
1828 b'look up all nodes 2x (forward)'),
1824 (lambda: resolvenodes(allnodesrev),
1829 (lambda: resolvenodes(allnodesrev),
1825 b'look up all nodes (reverse)'),
1830 b'look up all nodes (reverse)'),
1826 (lambda: resolvenodes(allnodesrev, 2),
1831 (lambda: resolvenodes(allnodesrev, 2),
1827 b'look up all nodes 2x (reverse)'),
1832 b'look up all nodes 2x (reverse)'),
1828 (lambda: getentries(allrevs),
1833 (lambda: getentries(allrevs),
1829 b'retrieve all index entries (forward)'),
1834 b'retrieve all index entries (forward)'),
1830 (lambda: getentries(allrevs, 2),
1835 (lambda: getentries(allrevs, 2),
1831 b'retrieve all index entries 2x (forward)'),
1836 b'retrieve all index entries 2x (forward)'),
1832 (lambda: getentries(allrevsrev),
1837 (lambda: getentries(allrevsrev),
1833 b'retrieve all index entries (reverse)'),
1838 b'retrieve all index entries (reverse)'),
1834 (lambda: getentries(allrevsrev, 2),
1839 (lambda: getentries(allrevsrev, 2),
1835 b'retrieve all index entries 2x (reverse)'),
1840 b'retrieve all index entries 2x (reverse)'),
1836 ]
1841 ]
1837
1842
1838 for fn, title in benches:
1843 for fn, title in benches:
1839 timer, fm = gettimer(ui, opts)
1844 timer, fm = gettimer(ui, opts)
1840 timer(fn, title=title)
1845 timer(fn, title=title)
1841 fm.end()
1846 fm.end()
1842
1847
1843 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1848 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1844 [(b'd', b'dist', 100, b'distance between the revisions'),
1849 [(b'd', b'dist', 100, b'distance between the revisions'),
1845 (b's', b'startrev', 0, b'revision to start reading at'),
1850 (b's', b'startrev', 0, b'revision to start reading at'),
1846 (b'', b'reverse', False, b'read in reverse')],
1851 (b'', b'reverse', False, b'read in reverse')],
1847 b'-c|-m|FILE')
1852 b'-c|-m|FILE')
1848 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1853 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1849 **opts):
1854 **opts):
1850 """Benchmark reading a series of revisions from a revlog.
1855 """Benchmark reading a series of revisions from a revlog.
1851
1856
1852 By default, we read every ``-d/--dist`` revision from 0 to tip of
1857 By default, we read every ``-d/--dist`` revision from 0 to tip of
1853 the specified revlog.
1858 the specified revlog.
1854
1859
1855 The start revision can be defined via ``-s/--startrev``.
1860 The start revision can be defined via ``-s/--startrev``.
1856 """
1861 """
1857 opts = _byteskwargs(opts)
1862 opts = _byteskwargs(opts)
1858
1863
1859 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1864 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1860 rllen = getlen(ui)(rl)
1865 rllen = getlen(ui)(rl)
1861
1866
1862 if startrev < 0:
1867 if startrev < 0:
1863 startrev = rllen + startrev
1868 startrev = rllen + startrev
1864
1869
1865 def d():
1870 def d():
1866 rl.clearcaches()
1871 rl.clearcaches()
1867
1872
1868 beginrev = startrev
1873 beginrev = startrev
1869 endrev = rllen
1874 endrev = rllen
1870 dist = opts[b'dist']
1875 dist = opts[b'dist']
1871
1876
1872 if reverse:
1877 if reverse:
1873 beginrev, endrev = endrev - 1, beginrev - 1
1878 beginrev, endrev = endrev - 1, beginrev - 1
1874 dist = -1 * dist
1879 dist = -1 * dist
1875
1880
1876 for x in _xrange(beginrev, endrev, dist):
1881 for x in _xrange(beginrev, endrev, dist):
1877 # Old revisions don't support passing int.
1882 # Old revisions don't support passing int.
1878 n = rl.node(x)
1883 n = rl.node(x)
1879 rl.revision(n)
1884 rl.revision(n)
1880
1885
1881 timer, fm = gettimer(ui, opts)
1886 timer, fm = gettimer(ui, opts)
1882 timer(d)
1887 timer(d)
1883 fm.end()
1888 fm.end()
1884
1889
1885 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1890 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1886 [(b's', b'startrev', 1000, b'revision to start writing at'),
1891 [(b's', b'startrev', 1000, b'revision to start writing at'),
1887 (b'', b'stoprev', -1, b'last revision to write'),
1892 (b'', b'stoprev', -1, b'last revision to write'),
1888 (b'', b'count', 3, b'last revision to write'),
1893 (b'', b'count', 3, b'last revision to write'),
1889 (b'', b'details', False, b'print timing for every revisions tested'),
1894 (b'', b'details', False, b'print timing for every revisions tested'),
1890 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1895 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1891 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1896 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1892 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1897 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1893 ],
1898 ],
1894 b'-c|-m|FILE')
1899 b'-c|-m|FILE')
1895 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1900 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1896 """Benchmark writing a series of revisions to a revlog.
1901 """Benchmark writing a series of revisions to a revlog.
1897
1902
1898 Possible source values are:
1903 Possible source values are:
1899 * `full`: add from a full text (default).
1904 * `full`: add from a full text (default).
1900 * `parent-1`: add from a delta to the first parent
1905 * `parent-1`: add from a delta to the first parent
1901 * `parent-2`: add from a delta to the second parent if it exists
1906 * `parent-2`: add from a delta to the second parent if it exists
1902 (use a delta from the first parent otherwise)
1907 (use a delta from the first parent otherwise)
1903 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1908 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1904 * `storage`: add from the existing precomputed deltas
1909 * `storage`: add from the existing precomputed deltas
1905 """
1910 """
1906 opts = _byteskwargs(opts)
1911 opts = _byteskwargs(opts)
1907
1912
1908 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1913 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1909 rllen = getlen(ui)(rl)
1914 rllen = getlen(ui)(rl)
1910 if startrev < 0:
1915 if startrev < 0:
1911 startrev = rllen + startrev
1916 startrev = rllen + startrev
1912 if stoprev < 0:
1917 if stoprev < 0:
1913 stoprev = rllen + stoprev
1918 stoprev = rllen + stoprev
1914
1919
1915 lazydeltabase = opts['lazydeltabase']
1920 lazydeltabase = opts['lazydeltabase']
1916 source = opts['source']
1921 source = opts['source']
1917 clearcaches = opts['clear_caches']
1922 clearcaches = opts['clear_caches']
1918 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1923 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1919 b'storage')
1924 b'storage')
1920 if source not in validsource:
1925 if source not in validsource:
1921 raise error.Abort('invalid source type: %s' % source)
1926 raise error.Abort('invalid source type: %s' % source)
1922
1927
1923 ### actually gather results
1928 ### actually gather results
1924 count = opts['count']
1929 count = opts['count']
1925 if count <= 0:
1930 if count <= 0:
1926 raise error.Abort('invalide run count: %d' % count)
1931 raise error.Abort('invalide run count: %d' % count)
1927 allresults = []
1932 allresults = []
1928 for c in range(count):
1933 for c in range(count):
1929 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1934 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1930 lazydeltabase=lazydeltabase,
1935 lazydeltabase=lazydeltabase,
1931 clearcaches=clearcaches)
1936 clearcaches=clearcaches)
1932 allresults.append(timing)
1937 allresults.append(timing)
1933
1938
1934 ### consolidate the results in a single list
1939 ### consolidate the results in a single list
1935 results = []
1940 results = []
1936 for idx, (rev, t) in enumerate(allresults[0]):
1941 for idx, (rev, t) in enumerate(allresults[0]):
1937 ts = [t]
1942 ts = [t]
1938 for other in allresults[1:]:
1943 for other in allresults[1:]:
1939 orev, ot = other[idx]
1944 orev, ot = other[idx]
1940 assert orev == rev
1945 assert orev == rev
1941 ts.append(ot)
1946 ts.append(ot)
1942 results.append((rev, ts))
1947 results.append((rev, ts))
1943 resultcount = len(results)
1948 resultcount = len(results)
1944
1949
1945 ### Compute and display relevant statistics
1950 ### Compute and display relevant statistics
1946
1951
1947 # get a formatter
1952 # get a formatter
1948 fm = ui.formatter(b'perf', opts)
1953 fm = ui.formatter(b'perf', opts)
1949 displayall = ui.configbool(b"perf", b"all-timing", False)
1954 displayall = ui.configbool(b"perf", b"all-timing", False)
1950
1955
1951 # print individual details if requested
1956 # print individual details if requested
1952 if opts['details']:
1957 if opts['details']:
1953 for idx, item in enumerate(results, 1):
1958 for idx, item in enumerate(results, 1):
1954 rev, data = item
1959 rev, data = item
1955 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1960 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1956 formatone(fm, data, title=title, displayall=displayall)
1961 formatone(fm, data, title=title, displayall=displayall)
1957
1962
1958 # sorts results by median time
1963 # sorts results by median time
1959 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1964 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1960 # list of (name, index) to display)
1965 # list of (name, index) to display)
1961 relevants = [
1966 relevants = [
1962 ("min", 0),
1967 ("min", 0),
1963 ("10%", resultcount * 10 // 100),
1968 ("10%", resultcount * 10 // 100),
1964 ("25%", resultcount * 25 // 100),
1969 ("25%", resultcount * 25 // 100),
1965 ("50%", resultcount * 70 // 100),
1970 ("50%", resultcount * 70 // 100),
1966 ("75%", resultcount * 75 // 100),
1971 ("75%", resultcount * 75 // 100),
1967 ("90%", resultcount * 90 // 100),
1972 ("90%", resultcount * 90 // 100),
1968 ("95%", resultcount * 95 // 100),
1973 ("95%", resultcount * 95 // 100),
1969 ("99%", resultcount * 99 // 100),
1974 ("99%", resultcount * 99 // 100),
1970 ("99.9%", resultcount * 999 // 1000),
1975 ("99.9%", resultcount * 999 // 1000),
1971 ("99.99%", resultcount * 9999 // 10000),
1976 ("99.99%", resultcount * 9999 // 10000),
1972 ("99.999%", resultcount * 99999 // 100000),
1977 ("99.999%", resultcount * 99999 // 100000),
1973 ("max", -1),
1978 ("max", -1),
1974 ]
1979 ]
1975 if not ui.quiet:
1980 if not ui.quiet:
1976 for name, idx in relevants:
1981 for name, idx in relevants:
1977 data = results[idx]
1982 data = results[idx]
1978 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1983 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1979 formatone(fm, data[1], title=title, displayall=displayall)
1984 formatone(fm, data[1], title=title, displayall=displayall)
1980
1985
1981 # XXX summing that many float will not be very precise, we ignore this fact
1986 # XXX summing that many float will not be very precise, we ignore this fact
1982 # for now
1987 # for now
1983 totaltime = []
1988 totaltime = []
1984 for item in allresults:
1989 for item in allresults:
1985 totaltime.append((sum(x[1][0] for x in item),
1990 totaltime.append((sum(x[1][0] for x in item),
1986 sum(x[1][1] for x in item),
1991 sum(x[1][1] for x in item),
1987 sum(x[1][2] for x in item),)
1992 sum(x[1][2] for x in item),)
1988 )
1993 )
1989 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1994 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1990 displayall=displayall)
1995 displayall=displayall)
1991 fm.end()
1996 fm.end()
1992
1997
1993 class _faketr(object):
1998 class _faketr(object):
1994 def add(s, x, y, z=None):
1999 def add(s, x, y, z=None):
1995 return None
2000 return None
1996
2001
1997 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2002 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1998 lazydeltabase=True, clearcaches=True):
2003 lazydeltabase=True, clearcaches=True):
1999 timings = []
2004 timings = []
2000 tr = _faketr()
2005 tr = _faketr()
2001 with _temprevlog(ui, orig, startrev) as dest:
2006 with _temprevlog(ui, orig, startrev) as dest:
2002 dest._lazydeltabase = lazydeltabase
2007 dest._lazydeltabase = lazydeltabase
2003 revs = list(orig.revs(startrev, stoprev))
2008 revs = list(orig.revs(startrev, stoprev))
2004 total = len(revs)
2009 total = len(revs)
2005 topic = 'adding'
2010 topic = 'adding'
2006 if runidx is not None:
2011 if runidx is not None:
2007 topic += ' (run #%d)' % runidx
2012 topic += ' (run #%d)' % runidx
2008 # Support both old and new progress API
2013 # Support both old and new progress API
2009 if util.safehasattr(ui, 'makeprogress'):
2014 if util.safehasattr(ui, 'makeprogress'):
2010 progress = ui.makeprogress(topic, unit='revs', total=total)
2015 progress = ui.makeprogress(topic, unit='revs', total=total)
2011 def updateprogress(pos):
2016 def updateprogress(pos):
2012 progress.update(pos)
2017 progress.update(pos)
2013 def completeprogress():
2018 def completeprogress():
2014 progress.complete()
2019 progress.complete()
2015 else:
2020 else:
2016 def updateprogress(pos):
2021 def updateprogress(pos):
2017 ui.progress(topic, pos, unit='revs', total=total)
2022 ui.progress(topic, pos, unit='revs', total=total)
2018 def completeprogress():
2023 def completeprogress():
2019 ui.progress(topic, None, unit='revs', total=total)
2024 ui.progress(topic, None, unit='revs', total=total)
2020
2025
2021 for idx, rev in enumerate(revs):
2026 for idx, rev in enumerate(revs):
2022 updateprogress(idx)
2027 updateprogress(idx)
2023 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2028 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2024 if clearcaches:
2029 if clearcaches:
2025 dest.index.clearcaches()
2030 dest.index.clearcaches()
2026 dest.clearcaches()
2031 dest.clearcaches()
2027 with timeone() as r:
2032 with timeone() as r:
2028 dest.addrawrevision(*addargs, **addkwargs)
2033 dest.addrawrevision(*addargs, **addkwargs)
2029 timings.append((rev, r[0]))
2034 timings.append((rev, r[0]))
2030 updateprogress(total)
2035 updateprogress(total)
2031 completeprogress()
2036 completeprogress()
2032 return timings
2037 return timings
2033
2038
2034 def _getrevisionseed(orig, rev, tr, source):
2039 def _getrevisionseed(orig, rev, tr, source):
2035 from mercurial.node import nullid
2040 from mercurial.node import nullid
2036
2041
2037 linkrev = orig.linkrev(rev)
2042 linkrev = orig.linkrev(rev)
2038 node = orig.node(rev)
2043 node = orig.node(rev)
2039 p1, p2 = orig.parents(node)
2044 p1, p2 = orig.parents(node)
2040 flags = orig.flags(rev)
2045 flags = orig.flags(rev)
2041 cachedelta = None
2046 cachedelta = None
2042 text = None
2047 text = None
2043
2048
2044 if source == b'full':
2049 if source == b'full':
2045 text = orig.revision(rev)
2050 text = orig.revision(rev)
2046 elif source == b'parent-1':
2051 elif source == b'parent-1':
2047 baserev = orig.rev(p1)
2052 baserev = orig.rev(p1)
2048 cachedelta = (baserev, orig.revdiff(p1, rev))
2053 cachedelta = (baserev, orig.revdiff(p1, rev))
2049 elif source == b'parent-2':
2054 elif source == b'parent-2':
2050 parent = p2
2055 parent = p2
2051 if p2 == nullid:
2056 if p2 == nullid:
2052 parent = p1
2057 parent = p1
2053 baserev = orig.rev(parent)
2058 baserev = orig.rev(parent)
2054 cachedelta = (baserev, orig.revdiff(parent, rev))
2059 cachedelta = (baserev, orig.revdiff(parent, rev))
2055 elif source == b'parent-smallest':
2060 elif source == b'parent-smallest':
2056 p1diff = orig.revdiff(p1, rev)
2061 p1diff = orig.revdiff(p1, rev)
2057 parent = p1
2062 parent = p1
2058 diff = p1diff
2063 diff = p1diff
2059 if p2 != nullid:
2064 if p2 != nullid:
2060 p2diff = orig.revdiff(p2, rev)
2065 p2diff = orig.revdiff(p2, rev)
2061 if len(p1diff) > len(p2diff):
2066 if len(p1diff) > len(p2diff):
2062 parent = p2
2067 parent = p2
2063 diff = p2diff
2068 diff = p2diff
2064 baserev = orig.rev(parent)
2069 baserev = orig.rev(parent)
2065 cachedelta = (baserev, diff)
2070 cachedelta = (baserev, diff)
2066 elif source == b'storage':
2071 elif source == b'storage':
2067 baserev = orig.deltaparent(rev)
2072 baserev = orig.deltaparent(rev)
2068 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2073 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2069
2074
2070 return ((text, tr, linkrev, p1, p2),
2075 return ((text, tr, linkrev, p1, p2),
2071 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2076 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2072
2077
2073 @contextlib.contextmanager
2078 @contextlib.contextmanager
2074 def _temprevlog(ui, orig, truncaterev):
2079 def _temprevlog(ui, orig, truncaterev):
2075 from mercurial import vfs as vfsmod
2080 from mercurial import vfs as vfsmod
2076
2081
2077 if orig._inline:
2082 if orig._inline:
2078 raise error.Abort('not supporting inline revlog (yet)')
2083 raise error.Abort('not supporting inline revlog (yet)')
2079
2084
2080 origindexpath = orig.opener.join(orig.indexfile)
2085 origindexpath = orig.opener.join(orig.indexfile)
2081 origdatapath = orig.opener.join(orig.datafile)
2086 origdatapath = orig.opener.join(orig.datafile)
2082 indexname = 'revlog.i'
2087 indexname = 'revlog.i'
2083 dataname = 'revlog.d'
2088 dataname = 'revlog.d'
2084
2089
2085 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2090 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2086 try:
2091 try:
2087 # copy the data file in a temporary directory
2092 # copy the data file in a temporary directory
2088 ui.debug('copying data in %s\n' % tmpdir)
2093 ui.debug('copying data in %s\n' % tmpdir)
2089 destindexpath = os.path.join(tmpdir, 'revlog.i')
2094 destindexpath = os.path.join(tmpdir, 'revlog.i')
2090 destdatapath = os.path.join(tmpdir, 'revlog.d')
2095 destdatapath = os.path.join(tmpdir, 'revlog.d')
2091 shutil.copyfile(origindexpath, destindexpath)
2096 shutil.copyfile(origindexpath, destindexpath)
2092 shutil.copyfile(origdatapath, destdatapath)
2097 shutil.copyfile(origdatapath, destdatapath)
2093
2098
2094 # remove the data we want to add again
2099 # remove the data we want to add again
2095 ui.debug('truncating data to be rewritten\n')
2100 ui.debug('truncating data to be rewritten\n')
2096 with open(destindexpath, 'ab') as index:
2101 with open(destindexpath, 'ab') as index:
2097 index.seek(0)
2102 index.seek(0)
2098 index.truncate(truncaterev * orig._io.size)
2103 index.truncate(truncaterev * orig._io.size)
2099 with open(destdatapath, 'ab') as data:
2104 with open(destdatapath, 'ab') as data:
2100 data.seek(0)
2105 data.seek(0)
2101 data.truncate(orig.start(truncaterev))
2106 data.truncate(orig.start(truncaterev))
2102
2107
2103 # instantiate a new revlog from the temporary copy
2108 # instantiate a new revlog from the temporary copy
2104 ui.debug('truncating adding to be rewritten\n')
2109 ui.debug('truncating adding to be rewritten\n')
2105 vfs = vfsmod.vfs(tmpdir)
2110 vfs = vfsmod.vfs(tmpdir)
2106 vfs.options = getattr(orig.opener, 'options', None)
2111 vfs.options = getattr(orig.opener, 'options', None)
2107
2112
2108 dest = revlog.revlog(vfs,
2113 dest = revlog.revlog(vfs,
2109 indexfile=indexname,
2114 indexfile=indexname,
2110 datafile=dataname)
2115 datafile=dataname)
2111 if dest._inline:
2116 if dest._inline:
2112 raise error.Abort('not supporting inline revlog (yet)')
2117 raise error.Abort('not supporting inline revlog (yet)')
2113 # make sure internals are initialized
2118 # make sure internals are initialized
2114 dest.revision(len(dest) - 1)
2119 dest.revision(len(dest) - 1)
2115 yield dest
2120 yield dest
2116 del dest, vfs
2121 del dest, vfs
2117 finally:
2122 finally:
2118 shutil.rmtree(tmpdir, True)
2123 shutil.rmtree(tmpdir, True)
2119
2124
2120 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2125 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2121 [(b'e', b'engines', b'', b'compression engines to use'),
2126 [(b'e', b'engines', b'', b'compression engines to use'),
2122 (b's', b'startrev', 0, b'revision to start at')],
2127 (b's', b'startrev', 0, b'revision to start at')],
2123 b'-c|-m|FILE')
2128 b'-c|-m|FILE')
2124 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2129 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2125 """Benchmark operations on revlog chunks.
2130 """Benchmark operations on revlog chunks.
2126
2131
2127 Logically, each revlog is a collection of fulltext revisions. However,
2132 Logically, each revlog is a collection of fulltext revisions. However,
2128 stored within each revlog are "chunks" of possibly compressed data. This
2133 stored within each revlog are "chunks" of possibly compressed data. This
2129 data needs to be read and decompressed or compressed and written.
2134 data needs to be read and decompressed or compressed and written.
2130
2135
2131 This command measures the time it takes to read+decompress and recompress
2136 This command measures the time it takes to read+decompress and recompress
2132 chunks in a revlog. It effectively isolates I/O and compression performance.
2137 chunks in a revlog. It effectively isolates I/O and compression performance.
2133 For measurements of higher-level operations like resolving revisions,
2138 For measurements of higher-level operations like resolving revisions,
2134 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2139 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2135 """
2140 """
2136 opts = _byteskwargs(opts)
2141 opts = _byteskwargs(opts)
2137
2142
2138 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2143 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2139
2144
2140 # _chunkraw was renamed to _getsegmentforrevs.
2145 # _chunkraw was renamed to _getsegmentforrevs.
2141 try:
2146 try:
2142 segmentforrevs = rl._getsegmentforrevs
2147 segmentforrevs = rl._getsegmentforrevs
2143 except AttributeError:
2148 except AttributeError:
2144 segmentforrevs = rl._chunkraw
2149 segmentforrevs = rl._chunkraw
2145
2150
2146 # Verify engines argument.
2151 # Verify engines argument.
2147 if engines:
2152 if engines:
2148 engines = set(e.strip() for e in engines.split(b','))
2153 engines = set(e.strip() for e in engines.split(b','))
2149 for engine in engines:
2154 for engine in engines:
2150 try:
2155 try:
2151 util.compressionengines[engine]
2156 util.compressionengines[engine]
2152 except KeyError:
2157 except KeyError:
2153 raise error.Abort(b'unknown compression engine: %s' % engine)
2158 raise error.Abort(b'unknown compression engine: %s' % engine)
2154 else:
2159 else:
2155 engines = []
2160 engines = []
2156 for e in util.compengines:
2161 for e in util.compengines:
2157 engine = util.compengines[e]
2162 engine = util.compengines[e]
2158 try:
2163 try:
2159 if engine.available():
2164 if engine.available():
2160 engine.revlogcompressor().compress(b'dummy')
2165 engine.revlogcompressor().compress(b'dummy')
2161 engines.append(e)
2166 engines.append(e)
2162 except NotImplementedError:
2167 except NotImplementedError:
2163 pass
2168 pass
2164
2169
2165 revs = list(rl.revs(startrev, len(rl) - 1))
2170 revs = list(rl.revs(startrev, len(rl) - 1))
2166
2171
2167 def rlfh(rl):
2172 def rlfh(rl):
2168 if rl._inline:
2173 if rl._inline:
2169 return getsvfs(repo)(rl.indexfile)
2174 return getsvfs(repo)(rl.indexfile)
2170 else:
2175 else:
2171 return getsvfs(repo)(rl.datafile)
2176 return getsvfs(repo)(rl.datafile)
2172
2177
2173 def doread():
2178 def doread():
2174 rl.clearcaches()
2179 rl.clearcaches()
2175 for rev in revs:
2180 for rev in revs:
2176 segmentforrevs(rev, rev)
2181 segmentforrevs(rev, rev)
2177
2182
2178 def doreadcachedfh():
2183 def doreadcachedfh():
2179 rl.clearcaches()
2184 rl.clearcaches()
2180 fh = rlfh(rl)
2185 fh = rlfh(rl)
2181 for rev in revs:
2186 for rev in revs:
2182 segmentforrevs(rev, rev, df=fh)
2187 segmentforrevs(rev, rev, df=fh)
2183
2188
2184 def doreadbatch():
2189 def doreadbatch():
2185 rl.clearcaches()
2190 rl.clearcaches()
2186 segmentforrevs(revs[0], revs[-1])
2191 segmentforrevs(revs[0], revs[-1])
2187
2192
2188 def doreadbatchcachedfh():
2193 def doreadbatchcachedfh():
2189 rl.clearcaches()
2194 rl.clearcaches()
2190 fh = rlfh(rl)
2195 fh = rlfh(rl)
2191 segmentforrevs(revs[0], revs[-1], df=fh)
2196 segmentforrevs(revs[0], revs[-1], df=fh)
2192
2197
2193 def dochunk():
2198 def dochunk():
2194 rl.clearcaches()
2199 rl.clearcaches()
2195 fh = rlfh(rl)
2200 fh = rlfh(rl)
2196 for rev in revs:
2201 for rev in revs:
2197 rl._chunk(rev, df=fh)
2202 rl._chunk(rev, df=fh)
2198
2203
2199 chunks = [None]
2204 chunks = [None]
2200
2205
2201 def dochunkbatch():
2206 def dochunkbatch():
2202 rl.clearcaches()
2207 rl.clearcaches()
2203 fh = rlfh(rl)
2208 fh = rlfh(rl)
2204 # Save chunks as a side-effect.
2209 # Save chunks as a side-effect.
2205 chunks[0] = rl._chunks(revs, df=fh)
2210 chunks[0] = rl._chunks(revs, df=fh)
2206
2211
2207 def docompress(compressor):
2212 def docompress(compressor):
2208 rl.clearcaches()
2213 rl.clearcaches()
2209
2214
2210 try:
2215 try:
2211 # Swap in the requested compression engine.
2216 # Swap in the requested compression engine.
2212 oldcompressor = rl._compressor
2217 oldcompressor = rl._compressor
2213 rl._compressor = compressor
2218 rl._compressor = compressor
2214 for chunk in chunks[0]:
2219 for chunk in chunks[0]:
2215 rl.compress(chunk)
2220 rl.compress(chunk)
2216 finally:
2221 finally:
2217 rl._compressor = oldcompressor
2222 rl._compressor = oldcompressor
2218
2223
2219 benches = [
2224 benches = [
2220 (lambda: doread(), b'read'),
2225 (lambda: doread(), b'read'),
2221 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2226 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2222 (lambda: doreadbatch(), b'read batch'),
2227 (lambda: doreadbatch(), b'read batch'),
2223 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2228 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2224 (lambda: dochunk(), b'chunk'),
2229 (lambda: dochunk(), b'chunk'),
2225 (lambda: dochunkbatch(), b'chunk batch'),
2230 (lambda: dochunkbatch(), b'chunk batch'),
2226 ]
2231 ]
2227
2232
2228 for engine in sorted(engines):
2233 for engine in sorted(engines):
2229 compressor = util.compengines[engine].revlogcompressor()
2234 compressor = util.compengines[engine].revlogcompressor()
2230 benches.append((functools.partial(docompress, compressor),
2235 benches.append((functools.partial(docompress, compressor),
2231 b'compress w/ %s' % engine))
2236 b'compress w/ %s' % engine))
2232
2237
2233 for fn, title in benches:
2238 for fn, title in benches:
2234 timer, fm = gettimer(ui, opts)
2239 timer, fm = gettimer(ui, opts)
2235 timer(fn, title=title)
2240 timer(fn, title=title)
2236 fm.end()
2241 fm.end()
2237
2242
2238 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2243 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2239 [(b'', b'cache', False, b'use caches instead of clearing')],
2244 [(b'', b'cache', False, b'use caches instead of clearing')],
2240 b'-c|-m|FILE REV')
2245 b'-c|-m|FILE REV')
2241 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2246 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2242 """Benchmark obtaining a revlog revision.
2247 """Benchmark obtaining a revlog revision.
2243
2248
2244 Obtaining a revlog revision consists of roughly the following steps:
2249 Obtaining a revlog revision consists of roughly the following steps:
2245
2250
2246 1. Compute the delta chain
2251 1. Compute the delta chain
2247 2. Slice the delta chain if applicable
2252 2. Slice the delta chain if applicable
2248 3. Obtain the raw chunks for that delta chain
2253 3. Obtain the raw chunks for that delta chain
2249 4. Decompress each raw chunk
2254 4. Decompress each raw chunk
2250 5. Apply binary patches to obtain fulltext
2255 5. Apply binary patches to obtain fulltext
2251 6. Verify hash of fulltext
2256 6. Verify hash of fulltext
2252
2257
2253 This command measures the time spent in each of these phases.
2258 This command measures the time spent in each of these phases.
2254 """
2259 """
2255 opts = _byteskwargs(opts)
2260 opts = _byteskwargs(opts)
2256
2261
2257 if opts.get(b'changelog') or opts.get(b'manifest'):
2262 if opts.get(b'changelog') or opts.get(b'manifest'):
2258 file_, rev = None, file_
2263 file_, rev = None, file_
2259 elif rev is None:
2264 elif rev is None:
2260 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2265 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2261
2266
2262 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2267 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2263
2268
2264 # _chunkraw was renamed to _getsegmentforrevs.
2269 # _chunkraw was renamed to _getsegmentforrevs.
2265 try:
2270 try:
2266 segmentforrevs = r._getsegmentforrevs
2271 segmentforrevs = r._getsegmentforrevs
2267 except AttributeError:
2272 except AttributeError:
2268 segmentforrevs = r._chunkraw
2273 segmentforrevs = r._chunkraw
2269
2274
2270 node = r.lookup(rev)
2275 node = r.lookup(rev)
2271 rev = r.rev(node)
2276 rev = r.rev(node)
2272
2277
2273 def getrawchunks(data, chain):
2278 def getrawchunks(data, chain):
2274 start = r.start
2279 start = r.start
2275 length = r.length
2280 length = r.length
2276 inline = r._inline
2281 inline = r._inline
2277 iosize = r._io.size
2282 iosize = r._io.size
2278 buffer = util.buffer
2283 buffer = util.buffer
2279
2284
2280 chunks = []
2285 chunks = []
2281 ladd = chunks.append
2286 ladd = chunks.append
2282 for idx, item in enumerate(chain):
2287 for idx, item in enumerate(chain):
2283 offset = start(item[0])
2288 offset = start(item[0])
2284 bits = data[idx]
2289 bits = data[idx]
2285 for rev in item:
2290 for rev in item:
2286 chunkstart = start(rev)
2291 chunkstart = start(rev)
2287 if inline:
2292 if inline:
2288 chunkstart += (rev + 1) * iosize
2293 chunkstart += (rev + 1) * iosize
2289 chunklength = length(rev)
2294 chunklength = length(rev)
2290 ladd(buffer(bits, chunkstart - offset, chunklength))
2295 ladd(buffer(bits, chunkstart - offset, chunklength))
2291
2296
2292 return chunks
2297 return chunks
2293
2298
2294 def dodeltachain(rev):
2299 def dodeltachain(rev):
2295 if not cache:
2300 if not cache:
2296 r.clearcaches()
2301 r.clearcaches()
2297 r._deltachain(rev)
2302 r._deltachain(rev)
2298
2303
2299 def doread(chain):
2304 def doread(chain):
2300 if not cache:
2305 if not cache:
2301 r.clearcaches()
2306 r.clearcaches()
2302 for item in slicedchain:
2307 for item in slicedchain:
2303 segmentforrevs(item[0], item[-1])
2308 segmentforrevs(item[0], item[-1])
2304
2309
2305 def doslice(r, chain, size):
2310 def doslice(r, chain, size):
2306 for s in slicechunk(r, chain, targetsize=size):
2311 for s in slicechunk(r, chain, targetsize=size):
2307 pass
2312 pass
2308
2313
2309 def dorawchunks(data, chain):
2314 def dorawchunks(data, chain):
2310 if not cache:
2315 if not cache:
2311 r.clearcaches()
2316 r.clearcaches()
2312 getrawchunks(data, chain)
2317 getrawchunks(data, chain)
2313
2318
2314 def dodecompress(chunks):
2319 def dodecompress(chunks):
2315 decomp = r.decompress
2320 decomp = r.decompress
2316 for chunk in chunks:
2321 for chunk in chunks:
2317 decomp(chunk)
2322 decomp(chunk)
2318
2323
2319 def dopatch(text, bins):
2324 def dopatch(text, bins):
2320 if not cache:
2325 if not cache:
2321 r.clearcaches()
2326 r.clearcaches()
2322 mdiff.patches(text, bins)
2327 mdiff.patches(text, bins)
2323
2328
2324 def dohash(text):
2329 def dohash(text):
2325 if not cache:
2330 if not cache:
2326 r.clearcaches()
2331 r.clearcaches()
2327 r.checkhash(text, node, rev=rev)
2332 r.checkhash(text, node, rev=rev)
2328
2333
2329 def dorevision():
2334 def dorevision():
2330 if not cache:
2335 if not cache:
2331 r.clearcaches()
2336 r.clearcaches()
2332 r.revision(node)
2337 r.revision(node)
2333
2338
2334 try:
2339 try:
2335 from mercurial.revlogutils.deltas import slicechunk
2340 from mercurial.revlogutils.deltas import slicechunk
2336 except ImportError:
2341 except ImportError:
2337 slicechunk = getattr(revlog, '_slicechunk', None)
2342 slicechunk = getattr(revlog, '_slicechunk', None)
2338
2343
2339 size = r.length(rev)
2344 size = r.length(rev)
2340 chain = r._deltachain(rev)[0]
2345 chain = r._deltachain(rev)[0]
2341 if not getattr(r, '_withsparseread', False):
2346 if not getattr(r, '_withsparseread', False):
2342 slicedchain = (chain,)
2347 slicedchain = (chain,)
2343 else:
2348 else:
2344 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2349 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2345 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2350 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2346 rawchunks = getrawchunks(data, slicedchain)
2351 rawchunks = getrawchunks(data, slicedchain)
2347 bins = r._chunks(chain)
2352 bins = r._chunks(chain)
2348 text = bytes(bins[0])
2353 text = bytes(bins[0])
2349 bins = bins[1:]
2354 bins = bins[1:]
2350 text = mdiff.patches(text, bins)
2355 text = mdiff.patches(text, bins)
2351
2356
2352 benches = [
2357 benches = [
2353 (lambda: dorevision(), b'full'),
2358 (lambda: dorevision(), b'full'),
2354 (lambda: dodeltachain(rev), b'deltachain'),
2359 (lambda: dodeltachain(rev), b'deltachain'),
2355 (lambda: doread(chain), b'read'),
2360 (lambda: doread(chain), b'read'),
2356 ]
2361 ]
2357
2362
2358 if getattr(r, '_withsparseread', False):
2363 if getattr(r, '_withsparseread', False):
2359 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2364 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2360 benches.append(slicing)
2365 benches.append(slicing)
2361
2366
2362 benches.extend([
2367 benches.extend([
2363 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2368 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2364 (lambda: dodecompress(rawchunks), b'decompress'),
2369 (lambda: dodecompress(rawchunks), b'decompress'),
2365 (lambda: dopatch(text, bins), b'patch'),
2370 (lambda: dopatch(text, bins), b'patch'),
2366 (lambda: dohash(text), b'hash'),
2371 (lambda: dohash(text), b'hash'),
2367 ])
2372 ])
2368
2373
2369 timer, fm = gettimer(ui, opts)
2374 timer, fm = gettimer(ui, opts)
2370 for fn, title in benches:
2375 for fn, title in benches:
2371 timer(fn, title=title)
2376 timer(fn, title=title)
2372 fm.end()
2377 fm.end()
2373
2378
2374 @command(b'perfrevset',
2379 @command(b'perfrevset',
2375 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2380 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2376 (b'', b'contexts', False, b'obtain changectx for each revision')]
2381 (b'', b'contexts', False, b'obtain changectx for each revision')]
2377 + formatteropts, b"REVSET")
2382 + formatteropts, b"REVSET")
2378 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2383 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2379 """benchmark the execution time of a revset
2384 """benchmark the execution time of a revset
2380
2385
2381 Use the --clean option if need to evaluate the impact of build volatile
2386 Use the --clean option if need to evaluate the impact of build volatile
2382 revisions set cache on the revset execution. Volatile cache hold filtered
2387 revisions set cache on the revset execution. Volatile cache hold filtered
2383 and obsolete related cache."""
2388 and obsolete related cache."""
2384 opts = _byteskwargs(opts)
2389 opts = _byteskwargs(opts)
2385
2390
2386 timer, fm = gettimer(ui, opts)
2391 timer, fm = gettimer(ui, opts)
2387 def d():
2392 def d():
2388 if clear:
2393 if clear:
2389 repo.invalidatevolatilesets()
2394 repo.invalidatevolatilesets()
2390 if contexts:
2395 if contexts:
2391 for ctx in repo.set(expr): pass
2396 for ctx in repo.set(expr): pass
2392 else:
2397 else:
2393 for r in repo.revs(expr): pass
2398 for r in repo.revs(expr): pass
2394 timer(d)
2399 timer(d)
2395 fm.end()
2400 fm.end()
2396
2401
2397 @command(b'perfvolatilesets',
2402 @command(b'perfvolatilesets',
2398 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2403 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2399 ] + formatteropts)
2404 ] + formatteropts)
2400 def perfvolatilesets(ui, repo, *names, **opts):
2405 def perfvolatilesets(ui, repo, *names, **opts):
2401 """benchmark the computation of various volatile set
2406 """benchmark the computation of various volatile set
2402
2407
2403 Volatile set computes element related to filtering and obsolescence."""
2408 Volatile set computes element related to filtering and obsolescence."""
2404 opts = _byteskwargs(opts)
2409 opts = _byteskwargs(opts)
2405 timer, fm = gettimer(ui, opts)
2410 timer, fm = gettimer(ui, opts)
2406 repo = repo.unfiltered()
2411 repo = repo.unfiltered()
2407
2412
2408 def getobs(name):
2413 def getobs(name):
2409 def d():
2414 def d():
2410 repo.invalidatevolatilesets()
2415 repo.invalidatevolatilesets()
2411 if opts[b'clear_obsstore']:
2416 if opts[b'clear_obsstore']:
2412 clearfilecache(repo, b'obsstore')
2417 clearfilecache(repo, b'obsstore')
2413 obsolete.getrevs(repo, name)
2418 obsolete.getrevs(repo, name)
2414 return d
2419 return d
2415
2420
2416 allobs = sorted(obsolete.cachefuncs)
2421 allobs = sorted(obsolete.cachefuncs)
2417 if names:
2422 if names:
2418 allobs = [n for n in allobs if n in names]
2423 allobs = [n for n in allobs if n in names]
2419
2424
2420 for name in allobs:
2425 for name in allobs:
2421 timer(getobs(name), title=name)
2426 timer(getobs(name), title=name)
2422
2427
2423 def getfiltered(name):
2428 def getfiltered(name):
2424 def d():
2429 def d():
2425 repo.invalidatevolatilesets()
2430 repo.invalidatevolatilesets()
2426 if opts[b'clear_obsstore']:
2431 if opts[b'clear_obsstore']:
2427 clearfilecache(repo, b'obsstore')
2432 clearfilecache(repo, b'obsstore')
2428 repoview.filterrevs(repo, name)
2433 repoview.filterrevs(repo, name)
2429 return d
2434 return d
2430
2435
2431 allfilter = sorted(repoview.filtertable)
2436 allfilter = sorted(repoview.filtertable)
2432 if names:
2437 if names:
2433 allfilter = [n for n in allfilter if n in names]
2438 allfilter = [n for n in allfilter if n in names]
2434
2439
2435 for name in allfilter:
2440 for name in allfilter:
2436 timer(getfiltered(name), title=name)
2441 timer(getfiltered(name), title=name)
2437 fm.end()
2442 fm.end()
2438
2443
2439 @command(b'perfbranchmap',
2444 @command(b'perfbranchmap',
2440 [(b'f', b'full', False,
2445 [(b'f', b'full', False,
2441 b'Includes build time of subset'),
2446 b'Includes build time of subset'),
2442 (b'', b'clear-revbranch', False,
2447 (b'', b'clear-revbranch', False,
2443 b'purge the revbranch cache between computation'),
2448 b'purge the revbranch cache between computation'),
2444 ] + formatteropts)
2449 ] + formatteropts)
2445 def perfbranchmap(ui, repo, *filternames, **opts):
2450 def perfbranchmap(ui, repo, *filternames, **opts):
2446 """benchmark the update of a branchmap
2451 """benchmark the update of a branchmap
2447
2452
2448 This benchmarks the full repo.branchmap() call with read and write disabled
2453 This benchmarks the full repo.branchmap() call with read and write disabled
2449 """
2454 """
2450 opts = _byteskwargs(opts)
2455 opts = _byteskwargs(opts)
2451 full = opts.get(b"full", False)
2456 full = opts.get(b"full", False)
2452 clear_revbranch = opts.get(b"clear_revbranch", False)
2457 clear_revbranch = opts.get(b"clear_revbranch", False)
2453 timer, fm = gettimer(ui, opts)
2458 timer, fm = gettimer(ui, opts)
2454 def getbranchmap(filtername):
2459 def getbranchmap(filtername):
2455 """generate a benchmark function for the filtername"""
2460 """generate a benchmark function for the filtername"""
2456 if filtername is None:
2461 if filtername is None:
2457 view = repo
2462 view = repo
2458 else:
2463 else:
2459 view = repo.filtered(filtername)
2464 view = repo.filtered(filtername)
2460 if util.safehasattr(view._branchcaches, '_per_filter'):
2465 if util.safehasattr(view._branchcaches, '_per_filter'):
2461 filtered = view._branchcaches._per_filter
2466 filtered = view._branchcaches._per_filter
2462 else:
2467 else:
2463 # older versions
2468 # older versions
2464 filtered = view._branchcaches
2469 filtered = view._branchcaches
2465 def d():
2470 def d():
2466 if clear_revbranch:
2471 if clear_revbranch:
2467 repo.revbranchcache()._clear()
2472 repo.revbranchcache()._clear()
2468 if full:
2473 if full:
2469 view._branchcaches.clear()
2474 view._branchcaches.clear()
2470 else:
2475 else:
2471 filtered.pop(filtername, None)
2476 filtered.pop(filtername, None)
2472 view.branchmap()
2477 view.branchmap()
2473 return d
2478 return d
2474 # add filter in smaller subset to bigger subset
2479 # add filter in smaller subset to bigger subset
2475 possiblefilters = set(repoview.filtertable)
2480 possiblefilters = set(repoview.filtertable)
2476 if filternames:
2481 if filternames:
2477 possiblefilters &= set(filternames)
2482 possiblefilters &= set(filternames)
2478 subsettable = getbranchmapsubsettable()
2483 subsettable = getbranchmapsubsettable()
2479 allfilters = []
2484 allfilters = []
2480 while possiblefilters:
2485 while possiblefilters:
2481 for name in possiblefilters:
2486 for name in possiblefilters:
2482 subset = subsettable.get(name)
2487 subset = subsettable.get(name)
2483 if subset not in possiblefilters:
2488 if subset not in possiblefilters:
2484 break
2489 break
2485 else:
2490 else:
2486 assert False, b'subset cycle %s!' % possiblefilters
2491 assert False, b'subset cycle %s!' % possiblefilters
2487 allfilters.append(name)
2492 allfilters.append(name)
2488 possiblefilters.remove(name)
2493 possiblefilters.remove(name)
2489
2494
2490 # warm the cache
2495 # warm the cache
2491 if not full:
2496 if not full:
2492 for name in allfilters:
2497 for name in allfilters:
2493 repo.filtered(name).branchmap()
2498 repo.filtered(name).branchmap()
2494 if not filternames or b'unfiltered' in filternames:
2499 if not filternames or b'unfiltered' in filternames:
2495 # add unfiltered
2500 # add unfiltered
2496 allfilters.append(None)
2501 allfilters.append(None)
2497
2502
2498 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2503 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2499 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2504 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2500 branchcacheread.set(classmethod(lambda *args: None))
2505 branchcacheread.set(classmethod(lambda *args: None))
2501 else:
2506 else:
2502 # older versions
2507 # older versions
2503 branchcacheread = safeattrsetter(branchmap, b'read')
2508 branchcacheread = safeattrsetter(branchmap, b'read')
2504 branchcacheread.set(lambda *args: None)
2509 branchcacheread.set(lambda *args: None)
2505 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2510 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2506 branchcachewrite.set(lambda *args: None)
2511 branchcachewrite.set(lambda *args: None)
2507 try:
2512 try:
2508 for name in allfilters:
2513 for name in allfilters:
2509 printname = name
2514 printname = name
2510 if name is None:
2515 if name is None:
2511 printname = b'unfiltered'
2516 printname = b'unfiltered'
2512 timer(getbranchmap(name), title=str(printname))
2517 timer(getbranchmap(name), title=str(printname))
2513 finally:
2518 finally:
2514 branchcacheread.restore()
2519 branchcacheread.restore()
2515 branchcachewrite.restore()
2520 branchcachewrite.restore()
2516 fm.end()
2521 fm.end()
2517
2522
2518 @command(b'perfbranchmapupdate', [
2523 @command(b'perfbranchmapupdate', [
2519 (b'', b'base', [], b'subset of revision to start from'),
2524 (b'', b'base', [], b'subset of revision to start from'),
2520 (b'', b'target', [], b'subset of revision to end with'),
2525 (b'', b'target', [], b'subset of revision to end with'),
2521 (b'', b'clear-caches', False, b'clear cache between each runs')
2526 (b'', b'clear-caches', False, b'clear cache between each runs')
2522 ] + formatteropts)
2527 ] + formatteropts)
2523 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2528 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2524 """benchmark branchmap update from for <base> revs to <target> revs
2529 """benchmark branchmap update from for <base> revs to <target> revs
2525
2530
2526 If `--clear-caches` is passed, the following items will be reset before
2531 If `--clear-caches` is passed, the following items will be reset before
2527 each update:
2532 each update:
2528 * the changelog instance and associated indexes
2533 * the changelog instance and associated indexes
2529 * the rev-branch-cache instance
2534 * the rev-branch-cache instance
2530
2535
2531 Examples:
2536 Examples:
2532
2537
2533 # update for the one last revision
2538 # update for the one last revision
2534 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2539 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2535
2540
2536 $ update for change coming with a new branch
2541 $ update for change coming with a new branch
2537 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2542 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2538 """
2543 """
2539 from mercurial import branchmap
2544 from mercurial import branchmap
2540 from mercurial import repoview
2545 from mercurial import repoview
2541 opts = _byteskwargs(opts)
2546 opts = _byteskwargs(opts)
2542 timer, fm = gettimer(ui, opts)
2547 timer, fm = gettimer(ui, opts)
2543 clearcaches = opts[b'clear_caches']
2548 clearcaches = opts[b'clear_caches']
2544 unfi = repo.unfiltered()
2549 unfi = repo.unfiltered()
2545 x = [None] # used to pass data between closure
2550 x = [None] # used to pass data between closure
2546
2551
2547 # we use a `list` here to avoid possible side effect from smartset
2552 # we use a `list` here to avoid possible side effect from smartset
2548 baserevs = list(scmutil.revrange(repo, base))
2553 baserevs = list(scmutil.revrange(repo, base))
2549 targetrevs = list(scmutil.revrange(repo, target))
2554 targetrevs = list(scmutil.revrange(repo, target))
2550 if not baserevs:
2555 if not baserevs:
2551 raise error.Abort(b'no revisions selected for --base')
2556 raise error.Abort(b'no revisions selected for --base')
2552 if not targetrevs:
2557 if not targetrevs:
2553 raise error.Abort(b'no revisions selected for --target')
2558 raise error.Abort(b'no revisions selected for --target')
2554
2559
2555 # make sure the target branchmap also contains the one in the base
2560 # make sure the target branchmap also contains the one in the base
2556 targetrevs = list(set(baserevs) | set(targetrevs))
2561 targetrevs = list(set(baserevs) | set(targetrevs))
2557 targetrevs.sort()
2562 targetrevs.sort()
2558
2563
2559 cl = repo.changelog
2564 cl = repo.changelog
2560 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2565 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2561 allbaserevs.sort()
2566 allbaserevs.sort()
2562 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2567 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2563
2568
2564 newrevs = list(alltargetrevs.difference(allbaserevs))
2569 newrevs = list(alltargetrevs.difference(allbaserevs))
2565 newrevs.sort()
2570 newrevs.sort()
2566
2571
2567 allrevs = frozenset(unfi.changelog.revs())
2572 allrevs = frozenset(unfi.changelog.revs())
2568 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2573 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2569 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2574 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2570
2575
2571 def basefilter(repo, visibilityexceptions=None):
2576 def basefilter(repo, visibilityexceptions=None):
2572 return basefilterrevs
2577 return basefilterrevs
2573
2578
2574 def targetfilter(repo, visibilityexceptions=None):
2579 def targetfilter(repo, visibilityexceptions=None):
2575 return targetfilterrevs
2580 return targetfilterrevs
2576
2581
2577 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2582 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2578 ui.status(msg % (len(allbaserevs), len(newrevs)))
2583 ui.status(msg % (len(allbaserevs), len(newrevs)))
2579 if targetfilterrevs:
2584 if targetfilterrevs:
2580 msg = b'(%d revisions still filtered)\n'
2585 msg = b'(%d revisions still filtered)\n'
2581 ui.status(msg % len(targetfilterrevs))
2586 ui.status(msg % len(targetfilterrevs))
2582
2587
2583 try:
2588 try:
2584 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2589 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2585 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2590 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2586
2591
2587 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2592 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2588 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2593 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2589
2594
2590 # try to find an existing branchmap to reuse
2595 # try to find an existing branchmap to reuse
2591 subsettable = getbranchmapsubsettable()
2596 subsettable = getbranchmapsubsettable()
2592 candidatefilter = subsettable.get(None)
2597 candidatefilter = subsettable.get(None)
2593 while candidatefilter is not None:
2598 while candidatefilter is not None:
2594 candidatebm = repo.filtered(candidatefilter).branchmap()
2599 candidatebm = repo.filtered(candidatefilter).branchmap()
2595 if candidatebm.validfor(baserepo):
2600 if candidatebm.validfor(baserepo):
2596 filtered = repoview.filterrevs(repo, candidatefilter)
2601 filtered = repoview.filterrevs(repo, candidatefilter)
2597 missing = [r for r in allbaserevs if r in filtered]
2602 missing = [r for r in allbaserevs if r in filtered]
2598 base = candidatebm.copy()
2603 base = candidatebm.copy()
2599 base.update(baserepo, missing)
2604 base.update(baserepo, missing)
2600 break
2605 break
2601 candidatefilter = subsettable.get(candidatefilter)
2606 candidatefilter = subsettable.get(candidatefilter)
2602 else:
2607 else:
2603 # no suitable subset where found
2608 # no suitable subset where found
2604 base = branchmap.branchcache()
2609 base = branchmap.branchcache()
2605 base.update(baserepo, allbaserevs)
2610 base.update(baserepo, allbaserevs)
2606
2611
2607 def setup():
2612 def setup():
2608 x[0] = base.copy()
2613 x[0] = base.copy()
2609 if clearcaches:
2614 if clearcaches:
2610 unfi._revbranchcache = None
2615 unfi._revbranchcache = None
2611 clearchangelog(repo)
2616 clearchangelog(repo)
2612
2617
2613 def bench():
2618 def bench():
2614 x[0].update(targetrepo, newrevs)
2619 x[0].update(targetrepo, newrevs)
2615
2620
2616 timer(bench, setup=setup)
2621 timer(bench, setup=setup)
2617 fm.end()
2622 fm.end()
2618 finally:
2623 finally:
2619 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2624 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2620 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2625 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2621
2626
2622 @command(b'perfbranchmapload', [
2627 @command(b'perfbranchmapload', [
2623 (b'f', b'filter', b'', b'Specify repoview filter'),
2628 (b'f', b'filter', b'', b'Specify repoview filter'),
2624 (b'', b'list', False, b'List brachmap filter caches'),
2629 (b'', b'list', False, b'List brachmap filter caches'),
2625 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2630 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2626
2631
2627 ] + formatteropts)
2632 ] + formatteropts)
2628 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2633 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2629 """benchmark reading the branchmap"""
2634 """benchmark reading the branchmap"""
2630 opts = _byteskwargs(opts)
2635 opts = _byteskwargs(opts)
2631 clearrevlogs = opts[b'clear_revlogs']
2636 clearrevlogs = opts[b'clear_revlogs']
2632
2637
2633 if list:
2638 if list:
2634 for name, kind, st in repo.cachevfs.readdir(stat=True):
2639 for name, kind, st in repo.cachevfs.readdir(stat=True):
2635 if name.startswith(b'branch2'):
2640 if name.startswith(b'branch2'):
2636 filtername = name.partition(b'-')[2] or b'unfiltered'
2641 filtername = name.partition(b'-')[2] or b'unfiltered'
2637 ui.status(b'%s - %s\n'
2642 ui.status(b'%s - %s\n'
2638 % (filtername, util.bytecount(st.st_size)))
2643 % (filtername, util.bytecount(st.st_size)))
2639 return
2644 return
2640 if not filter:
2645 if not filter:
2641 filter = None
2646 filter = None
2642 subsettable = getbranchmapsubsettable()
2647 subsettable = getbranchmapsubsettable()
2643 if filter is None:
2648 if filter is None:
2644 repo = repo.unfiltered()
2649 repo = repo.unfiltered()
2645 else:
2650 else:
2646 repo = repoview.repoview(repo, filter)
2651 repo = repoview.repoview(repo, filter)
2647
2652
2648 repo.branchmap() # make sure we have a relevant, up to date branchmap
2653 repo.branchmap() # make sure we have a relevant, up to date branchmap
2649
2654
2650 try:
2655 try:
2651 fromfile = branchmap.branchcache.fromfile
2656 fromfile = branchmap.branchcache.fromfile
2652 except AttributeError:
2657 except AttributeError:
2653 # older versions
2658 # older versions
2654 fromfile = branchmap.read
2659 fromfile = branchmap.read
2655
2660
2656 currentfilter = filter
2661 currentfilter = filter
2657 # try once without timer, the filter may not be cached
2662 # try once without timer, the filter may not be cached
2658 while fromfile(repo) is None:
2663 while fromfile(repo) is None:
2659 currentfilter = subsettable.get(currentfilter)
2664 currentfilter = subsettable.get(currentfilter)
2660 if currentfilter is None:
2665 if currentfilter is None:
2661 raise error.Abort(b'No branchmap cached for %s repo'
2666 raise error.Abort(b'No branchmap cached for %s repo'
2662 % (filter or b'unfiltered'))
2667 % (filter or b'unfiltered'))
2663 repo = repo.filtered(currentfilter)
2668 repo = repo.filtered(currentfilter)
2664 timer, fm = gettimer(ui, opts)
2669 timer, fm = gettimer(ui, opts)
2665 def setup():
2670 def setup():
2666 if clearrevlogs:
2671 if clearrevlogs:
2667 clearchangelog(repo)
2672 clearchangelog(repo)
2668 def bench():
2673 def bench():
2669 fromfile(repo)
2674 fromfile(repo)
2670 timer(bench, setup=setup)
2675 timer(bench, setup=setup)
2671 fm.end()
2676 fm.end()
2672
2677
2673 @command(b'perfloadmarkers')
2678 @command(b'perfloadmarkers')
2674 def perfloadmarkers(ui, repo):
2679 def perfloadmarkers(ui, repo):
2675 """benchmark the time to parse the on-disk markers for a repo
2680 """benchmark the time to parse the on-disk markers for a repo
2676
2681
2677 Result is the number of markers in the repo."""
2682 Result is the number of markers in the repo."""
2678 timer, fm = gettimer(ui)
2683 timer, fm = gettimer(ui)
2679 svfs = getsvfs(repo)
2684 svfs = getsvfs(repo)
2680 timer(lambda: len(obsolete.obsstore(svfs)))
2685 timer(lambda: len(obsolete.obsstore(svfs)))
2681 fm.end()
2686 fm.end()
2682
2687
2683 @command(b'perflrucachedict', formatteropts +
2688 @command(b'perflrucachedict', formatteropts +
2684 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2689 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2685 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2690 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2686 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2691 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2687 (b'', b'size', 4, b'size of cache'),
2692 (b'', b'size', 4, b'size of cache'),
2688 (b'', b'gets', 10000, b'number of key lookups'),
2693 (b'', b'gets', 10000, b'number of key lookups'),
2689 (b'', b'sets', 10000, b'number of key sets'),
2694 (b'', b'sets', 10000, b'number of key sets'),
2690 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2695 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2691 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2696 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2692 norepo=True)
2697 norepo=True)
2693 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2698 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2694 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2699 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2695 opts = _byteskwargs(opts)
2700 opts = _byteskwargs(opts)
2696
2701
2697 def doinit():
2702 def doinit():
2698 for i in _xrange(10000):
2703 for i in _xrange(10000):
2699 util.lrucachedict(size)
2704 util.lrucachedict(size)
2700
2705
2701 costrange = list(range(mincost, maxcost + 1))
2706 costrange = list(range(mincost, maxcost + 1))
2702
2707
2703 values = []
2708 values = []
2704 for i in _xrange(size):
2709 for i in _xrange(size):
2705 values.append(random.randint(0, _maxint))
2710 values.append(random.randint(0, _maxint))
2706
2711
2707 # Get mode fills the cache and tests raw lookup performance with no
2712 # Get mode fills the cache and tests raw lookup performance with no
2708 # eviction.
2713 # eviction.
2709 getseq = []
2714 getseq = []
2710 for i in _xrange(gets):
2715 for i in _xrange(gets):
2711 getseq.append(random.choice(values))
2716 getseq.append(random.choice(values))
2712
2717
2713 def dogets():
2718 def dogets():
2714 d = util.lrucachedict(size)
2719 d = util.lrucachedict(size)
2715 for v in values:
2720 for v in values:
2716 d[v] = v
2721 d[v] = v
2717 for key in getseq:
2722 for key in getseq:
2718 value = d[key]
2723 value = d[key]
2719 value # silence pyflakes warning
2724 value # silence pyflakes warning
2720
2725
2721 def dogetscost():
2726 def dogetscost():
2722 d = util.lrucachedict(size, maxcost=costlimit)
2727 d = util.lrucachedict(size, maxcost=costlimit)
2723 for i, v in enumerate(values):
2728 for i, v in enumerate(values):
2724 d.insert(v, v, cost=costs[i])
2729 d.insert(v, v, cost=costs[i])
2725 for key in getseq:
2730 for key in getseq:
2726 try:
2731 try:
2727 value = d[key]
2732 value = d[key]
2728 value # silence pyflakes warning
2733 value # silence pyflakes warning
2729 except KeyError:
2734 except KeyError:
2730 pass
2735 pass
2731
2736
2732 # Set mode tests insertion speed with cache eviction.
2737 # Set mode tests insertion speed with cache eviction.
2733 setseq = []
2738 setseq = []
2734 costs = []
2739 costs = []
2735 for i in _xrange(sets):
2740 for i in _xrange(sets):
2736 setseq.append(random.randint(0, _maxint))
2741 setseq.append(random.randint(0, _maxint))
2737 costs.append(random.choice(costrange))
2742 costs.append(random.choice(costrange))
2738
2743
2739 def doinserts():
2744 def doinserts():
2740 d = util.lrucachedict(size)
2745 d = util.lrucachedict(size)
2741 for v in setseq:
2746 for v in setseq:
2742 d.insert(v, v)
2747 d.insert(v, v)
2743
2748
2744 def doinsertscost():
2749 def doinsertscost():
2745 d = util.lrucachedict(size, maxcost=costlimit)
2750 d = util.lrucachedict(size, maxcost=costlimit)
2746 for i, v in enumerate(setseq):
2751 for i, v in enumerate(setseq):
2747 d.insert(v, v, cost=costs[i])
2752 d.insert(v, v, cost=costs[i])
2748
2753
2749 def dosets():
2754 def dosets():
2750 d = util.lrucachedict(size)
2755 d = util.lrucachedict(size)
2751 for v in setseq:
2756 for v in setseq:
2752 d[v] = v
2757 d[v] = v
2753
2758
2754 # Mixed mode randomly performs gets and sets with eviction.
2759 # Mixed mode randomly performs gets and sets with eviction.
2755 mixedops = []
2760 mixedops = []
2756 for i in _xrange(mixed):
2761 for i in _xrange(mixed):
2757 r = random.randint(0, 100)
2762 r = random.randint(0, 100)
2758 if r < mixedgetfreq:
2763 if r < mixedgetfreq:
2759 op = 0
2764 op = 0
2760 else:
2765 else:
2761 op = 1
2766 op = 1
2762
2767
2763 mixedops.append((op,
2768 mixedops.append((op,
2764 random.randint(0, size * 2),
2769 random.randint(0, size * 2),
2765 random.choice(costrange)))
2770 random.choice(costrange)))
2766
2771
2767 def domixed():
2772 def domixed():
2768 d = util.lrucachedict(size)
2773 d = util.lrucachedict(size)
2769
2774
2770 for op, v, cost in mixedops:
2775 for op, v, cost in mixedops:
2771 if op == 0:
2776 if op == 0:
2772 try:
2777 try:
2773 d[v]
2778 d[v]
2774 except KeyError:
2779 except KeyError:
2775 pass
2780 pass
2776 else:
2781 else:
2777 d[v] = v
2782 d[v] = v
2778
2783
2779 def domixedcost():
2784 def domixedcost():
2780 d = util.lrucachedict(size, maxcost=costlimit)
2785 d = util.lrucachedict(size, maxcost=costlimit)
2781
2786
2782 for op, v, cost in mixedops:
2787 for op, v, cost in mixedops:
2783 if op == 0:
2788 if op == 0:
2784 try:
2789 try:
2785 d[v]
2790 d[v]
2786 except KeyError:
2791 except KeyError:
2787 pass
2792 pass
2788 else:
2793 else:
2789 d.insert(v, v, cost=cost)
2794 d.insert(v, v, cost=cost)
2790
2795
2791 benches = [
2796 benches = [
2792 (doinit, b'init'),
2797 (doinit, b'init'),
2793 ]
2798 ]
2794
2799
2795 if costlimit:
2800 if costlimit:
2796 benches.extend([
2801 benches.extend([
2797 (dogetscost, b'gets w/ cost limit'),
2802 (dogetscost, b'gets w/ cost limit'),
2798 (doinsertscost, b'inserts w/ cost limit'),
2803 (doinsertscost, b'inserts w/ cost limit'),
2799 (domixedcost, b'mixed w/ cost limit'),
2804 (domixedcost, b'mixed w/ cost limit'),
2800 ])
2805 ])
2801 else:
2806 else:
2802 benches.extend([
2807 benches.extend([
2803 (dogets, b'gets'),
2808 (dogets, b'gets'),
2804 (doinserts, b'inserts'),
2809 (doinserts, b'inserts'),
2805 (dosets, b'sets'),
2810 (dosets, b'sets'),
2806 (domixed, b'mixed')
2811 (domixed, b'mixed')
2807 ])
2812 ])
2808
2813
2809 for fn, title in benches:
2814 for fn, title in benches:
2810 timer, fm = gettimer(ui, opts)
2815 timer, fm = gettimer(ui, opts)
2811 timer(fn, title=title)
2816 timer(fn, title=title)
2812 fm.end()
2817 fm.end()
2813
2818
2814 @command(b'perfwrite', formatteropts)
2819 @command(b'perfwrite', formatteropts)
2815 def perfwrite(ui, repo, **opts):
2820 def perfwrite(ui, repo, **opts):
2816 """microbenchmark ui.write
2821 """microbenchmark ui.write
2817 """
2822 """
2818 opts = _byteskwargs(opts)
2823 opts = _byteskwargs(opts)
2819
2824
2820 timer, fm = gettimer(ui, opts)
2825 timer, fm = gettimer(ui, opts)
2821 def write():
2826 def write():
2822 for i in range(100000):
2827 for i in range(100000):
2823 ui.write((b'Testing write performance\n'))
2828 ui.write((b'Testing write performance\n'))
2824 timer(write)
2829 timer(write)
2825 fm.end()
2830 fm.end()
2826
2831
2827 def uisetup(ui):
2832 def uisetup(ui):
2828 if (util.safehasattr(cmdutil, b'openrevlog') and
2833 if (util.safehasattr(cmdutil, b'openrevlog') and
2829 not util.safehasattr(commands, b'debugrevlogopts')):
2834 not util.safehasattr(commands, b'debugrevlogopts')):
2830 # for "historical portability":
2835 # for "historical portability":
2831 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2836 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2832 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2837 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2833 # openrevlog() should cause failure, because it has been
2838 # openrevlog() should cause failure, because it has been
2834 # available since 3.5 (or 49c583ca48c4).
2839 # available since 3.5 (or 49c583ca48c4).
2835 def openrevlog(orig, repo, cmd, file_, opts):
2840 def openrevlog(orig, repo, cmd, file_, opts):
2836 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2841 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2837 raise error.Abort(b"This version doesn't support --dir option",
2842 raise error.Abort(b"This version doesn't support --dir option",
2838 hint=b"use 3.5 or later")
2843 hint=b"use 3.5 or later")
2839 return orig(repo, cmd, file_, opts)
2844 return orig(repo, cmd, file_, opts)
2840 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2845 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2841
2846
2842 @command(b'perfprogress', formatteropts + [
2847 @command(b'perfprogress', formatteropts + [
2843 (b'', b'topic', b'topic', b'topic for progress messages'),
2848 (b'', b'topic', b'topic', b'topic for progress messages'),
2844 (b'c', b'total', 1000000, b'total value we are progressing to'),
2849 (b'c', b'total', 1000000, b'total value we are progressing to'),
2845 ], norepo=True)
2850 ], norepo=True)
2846 def perfprogress(ui, topic=None, total=None, **opts):
2851 def perfprogress(ui, topic=None, total=None, **opts):
2847 """printing of progress bars"""
2852 """printing of progress bars"""
2848 opts = _byteskwargs(opts)
2853 opts = _byteskwargs(opts)
2849
2854
2850 timer, fm = gettimer(ui, opts)
2855 timer, fm = gettimer(ui, opts)
2851
2856
2852 def doprogress():
2857 def doprogress():
2853 with ui.makeprogress(topic, total=total) as progress:
2858 with ui.makeprogress(topic, total=total) as progress:
2854 for i in pycompat.xrange(total):
2859 for i in pycompat.xrange(total):
2855 progress.increment()
2860 progress.increment()
2856
2861
2857 timer(doprogress)
2862 timer(doprogress)
2858 fm.end()
2863 fm.end()
@@ -1,669 +1,658 b''
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11
11
12 from .node import (
12 from .node import (
13 bin,
13 bin,
14 hex,
14 hex,
15 nullid,
15 nullid,
16 nullrev,
16 nullrev,
17 )
17 )
18 from . import (
18 from . import (
19 encoding,
19 encoding,
20 error,
20 error,
21 pycompat,
21 pycompat,
22 scmutil,
22 scmutil,
23 util,
23 util,
24 )
24 )
25 from .utils import (
25 from .utils import (
26 repoviewutil,
26 stringutil,
27 stringutil,
27 )
28 )
28
29
30 subsettable = repoviewutil. subsettable
31
29 calcsize = struct.calcsize
32 calcsize = struct.calcsize
30 pack_into = struct.pack_into
33 pack_into = struct.pack_into
31 unpack_from = struct.unpack_from
34 unpack_from = struct.unpack_from
32
35
33
36
34 ### Nearest subset relation
35 # Nearest subset of filter X is a filter Y so that:
36 # * Y is included in X,
37 # * X - Y is as small as possible.
38 # This create and ordering used for branchmap purpose.
39 # the ordering may be partial
40 subsettable = {None: 'visible',
41 'visible-hidden': 'visible',
42 'visible': 'served',
43 'served.hidden': 'served',
44 'served': 'immutable',
45 'immutable': 'base'}
46
47
48 class BranchMapCache(object):
37 class BranchMapCache(object):
49 """mapping of filtered views of repo with their branchcache"""
38 """mapping of filtered views of repo with their branchcache"""
50 def __init__(self):
39 def __init__(self):
51 self._per_filter = {}
40 self._per_filter = {}
52
41
53 def __getitem__(self, repo):
42 def __getitem__(self, repo):
54 self.updatecache(repo)
43 self.updatecache(repo)
55 return self._per_filter[repo.filtername]
44 return self._per_filter[repo.filtername]
56
45
57 def updatecache(self, repo):
46 def updatecache(self, repo):
58 """Update the cache for the given filtered view on a repository"""
47 """Update the cache for the given filtered view on a repository"""
59 # This can trigger updates for the caches for subsets of the filtered
48 # This can trigger updates for the caches for subsets of the filtered
60 # view, e.g. when there is no cache for this filtered view or the cache
49 # view, e.g. when there is no cache for this filtered view or the cache
61 # is stale.
50 # is stale.
62
51
63 cl = repo.changelog
52 cl = repo.changelog
64 filtername = repo.filtername
53 filtername = repo.filtername
65 bcache = self._per_filter.get(filtername)
54 bcache = self._per_filter.get(filtername)
66 if bcache is None or not bcache.validfor(repo):
55 if bcache is None or not bcache.validfor(repo):
67 # cache object missing or cache object stale? Read from disk
56 # cache object missing or cache object stale? Read from disk
68 bcache = branchcache.fromfile(repo)
57 bcache = branchcache.fromfile(repo)
69
58
70 revs = []
59 revs = []
71 if bcache is None:
60 if bcache is None:
72 # no (fresh) cache available anymore, perhaps we can re-use
61 # no (fresh) cache available anymore, perhaps we can re-use
73 # the cache for a subset, then extend that to add info on missing
62 # the cache for a subset, then extend that to add info on missing
74 # revisions.
63 # revisions.
75 subsetname = subsettable.get(filtername)
64 subsetname = subsettable.get(filtername)
76 if subsetname is not None:
65 if subsetname is not None:
77 subset = repo.filtered(subsetname)
66 subset = repo.filtered(subsetname)
78 bcache = self[subset].copy()
67 bcache = self[subset].copy()
79 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
68 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
80 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
69 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
81 else:
70 else:
82 # nothing to fall back on, start empty.
71 # nothing to fall back on, start empty.
83 bcache = branchcache()
72 bcache = branchcache()
84
73
85 revs.extend(cl.revs(start=bcache.tiprev + 1))
74 revs.extend(cl.revs(start=bcache.tiprev + 1))
86 if revs:
75 if revs:
87 bcache.update(repo, revs)
76 bcache.update(repo, revs)
88
77
89 assert bcache.validfor(repo), filtername
78 assert bcache.validfor(repo), filtername
90 self._per_filter[repo.filtername] = bcache
79 self._per_filter[repo.filtername] = bcache
91
80
92 def replace(self, repo, remotebranchmap):
81 def replace(self, repo, remotebranchmap):
93 """Replace the branchmap cache for a repo with a branch mapping.
82 """Replace the branchmap cache for a repo with a branch mapping.
94
83
95 This is likely only called during clone with a branch map from a
84 This is likely only called during clone with a branch map from a
96 remote.
85 remote.
97
86
98 """
87 """
99 cl = repo.changelog
88 cl = repo.changelog
100 clrev = cl.rev
89 clrev = cl.rev
101 clbranchinfo = cl.branchinfo
90 clbranchinfo = cl.branchinfo
102 rbheads = []
91 rbheads = []
103 closed = []
92 closed = []
104 for bheads in remotebranchmap.itervalues():
93 for bheads in remotebranchmap.itervalues():
105 rbheads += bheads
94 rbheads += bheads
106 for h in bheads:
95 for h in bheads:
107 r = clrev(h)
96 r = clrev(h)
108 b, c = clbranchinfo(r)
97 b, c = clbranchinfo(r)
109 if c:
98 if c:
110 closed.append(h)
99 closed.append(h)
111
100
112 if rbheads:
101 if rbheads:
113 rtiprev = max((int(clrev(node)) for node in rbheads))
102 rtiprev = max((int(clrev(node)) for node in rbheads))
114 cache = branchcache(
103 cache = branchcache(
115 remotebranchmap, repo[rtiprev].node(), rtiprev,
104 remotebranchmap, repo[rtiprev].node(), rtiprev,
116 closednodes=closed)
105 closednodes=closed)
117
106
118 # Try to stick it as low as possible
107 # Try to stick it as low as possible
119 # filter above served are unlikely to be fetch from a clone
108 # filter above served are unlikely to be fetch from a clone
120 for candidate in ('base', 'immutable', 'served'):
109 for candidate in ('base', 'immutable', 'served'):
121 rview = repo.filtered(candidate)
110 rview = repo.filtered(candidate)
122 if cache.validfor(rview):
111 if cache.validfor(rview):
123 self._per_filter[candidate] = cache
112 self._per_filter[candidate] = cache
124 cache.write(rview)
113 cache.write(rview)
125 return
114 return
126
115
127 def clear(self):
116 def clear(self):
128 self._per_filter.clear()
117 self._per_filter.clear()
129
118
130 def _unknownnode(node):
119 def _unknownnode(node):
131 """ raises ValueError when branchcache found a node which does not exists
120 """ raises ValueError when branchcache found a node which does not exists
132 """
121 """
133 raise ValueError(r'node %s does not exist' % pycompat.sysstr(hex(node)))
122 raise ValueError(r'node %s does not exist' % pycompat.sysstr(hex(node)))
134
123
135 class branchcache(object):
124 class branchcache(object):
136 """A dict like object that hold branches heads cache.
125 """A dict like object that hold branches heads cache.
137
126
138 This cache is used to avoid costly computations to determine all the
127 This cache is used to avoid costly computations to determine all the
139 branch heads of a repo.
128 branch heads of a repo.
140
129
141 The cache is serialized on disk in the following format:
130 The cache is serialized on disk in the following format:
142
131
143 <tip hex node> <tip rev number> [optional filtered repo hex hash]
132 <tip hex node> <tip rev number> [optional filtered repo hex hash]
144 <branch head hex node> <open/closed state> <branch name>
133 <branch head hex node> <open/closed state> <branch name>
145 <branch head hex node> <open/closed state> <branch name>
134 <branch head hex node> <open/closed state> <branch name>
146 ...
135 ...
147
136
148 The first line is used to check if the cache is still valid. If the
137 The first line is used to check if the cache is still valid. If the
149 branch cache is for a filtered repo view, an optional third hash is
138 branch cache is for a filtered repo view, an optional third hash is
150 included that hashes the hashes of all filtered revisions.
139 included that hashes the hashes of all filtered revisions.
151
140
152 The open/closed state is represented by a single letter 'o' or 'c'.
141 The open/closed state is represented by a single letter 'o' or 'c'.
153 This field can be used to avoid changelog reads when determining if a
142 This field can be used to avoid changelog reads when determining if a
154 branch head closes a branch or not.
143 branch head closes a branch or not.
155 """
144 """
156
145
157 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
146 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
158 filteredhash=None, closednodes=None, hasnode=None):
147 filteredhash=None, closednodes=None, hasnode=None):
159 """ hasnode is a function which can be used to verify whether changelog
148 """ hasnode is a function which can be used to verify whether changelog
160 has a given node or not. If it's not provided, we assume that every node
149 has a given node or not. If it's not provided, we assume that every node
161 we have exists in changelog """
150 we have exists in changelog """
162 self.tipnode = tipnode
151 self.tipnode = tipnode
163 self.tiprev = tiprev
152 self.tiprev = tiprev
164 self.filteredhash = filteredhash
153 self.filteredhash = filteredhash
165 # closednodes is a set of nodes that close their branch. If the branch
154 # closednodes is a set of nodes that close their branch. If the branch
166 # cache has been updated, it may contain nodes that are no longer
155 # cache has been updated, it may contain nodes that are no longer
167 # heads.
156 # heads.
168 if closednodes is None:
157 if closednodes is None:
169 self._closednodes = set()
158 self._closednodes = set()
170 else:
159 else:
171 self._closednodes = closednodes
160 self._closednodes = closednodes
172 self._entries = dict(entries)
161 self._entries = dict(entries)
173 # whether closed nodes are verified or not
162 # whether closed nodes are verified or not
174 self._closedverified = False
163 self._closedverified = False
175 # branches for which nodes are verified
164 # branches for which nodes are verified
176 self._verifiedbranches = set()
165 self._verifiedbranches = set()
177 self._hasnode = hasnode
166 self._hasnode = hasnode
178 if self._hasnode is None:
167 if self._hasnode is None:
179 self._hasnode = lambda x: True
168 self._hasnode = lambda x: True
180
169
181 def _verifyclosed(self):
170 def _verifyclosed(self):
182 """ verify the closed nodes we have """
171 """ verify the closed nodes we have """
183 if self._closedverified:
172 if self._closedverified:
184 return
173 return
185 for node in self._closednodes:
174 for node in self._closednodes:
186 if not self._hasnode(node):
175 if not self._hasnode(node):
187 _unknownnode(node)
176 _unknownnode(node)
188
177
189 self._closedverified = True
178 self._closedverified = True
190
179
191 def _verifybranch(self, branch):
180 def _verifybranch(self, branch):
192 """ verify head nodes for the given branch. """
181 """ verify head nodes for the given branch. """
193 if branch not in self._entries or branch in self._verifiedbranches:
182 if branch not in self._entries or branch in self._verifiedbranches:
194 return
183 return
195 for n in self._entries[branch]:
184 for n in self._entries[branch]:
196 if not self._hasnode(n):
185 if not self._hasnode(n):
197 _unknownnode(n)
186 _unknownnode(n)
198
187
199 self._verifiedbranches.add(branch)
188 self._verifiedbranches.add(branch)
200
189
201 def _verifyall(self):
190 def _verifyall(self):
202 """ verifies nodes of all the branches """
191 """ verifies nodes of all the branches """
203 needverification = set(self._entries.keys()) - self._verifiedbranches
192 needverification = set(self._entries.keys()) - self._verifiedbranches
204 for b in needverification:
193 for b in needverification:
205 self._verifybranch(b)
194 self._verifybranch(b)
206
195
207 def __iter__(self):
196 def __iter__(self):
208 return iter(self._entries)
197 return iter(self._entries)
209
198
210 def __setitem__(self, key, value):
199 def __setitem__(self, key, value):
211 self._entries[key] = value
200 self._entries[key] = value
212
201
213 def __getitem__(self, key):
202 def __getitem__(self, key):
214 self._verifybranch(key)
203 self._verifybranch(key)
215 return self._entries[key]
204 return self._entries[key]
216
205
217 def __contains__(self, key):
206 def __contains__(self, key):
218 self._verifybranch(key)
207 self._verifybranch(key)
219 return key in self._entries
208 return key in self._entries
220
209
221 def iteritems(self):
210 def iteritems(self):
222 for k, v in self._entries.iteritems():
211 for k, v in self._entries.iteritems():
223 self._verifybranch(k)
212 self._verifybranch(k)
224 yield k, v
213 yield k, v
225
214
226 def hasbranch(self, label):
215 def hasbranch(self, label):
227 """ checks whether a branch of this name exists or not """
216 """ checks whether a branch of this name exists or not """
228 self._verifybranch(label)
217 self._verifybranch(label)
229 return label in self._entries
218 return label in self._entries
230
219
231 @classmethod
220 @classmethod
232 def fromfile(cls, repo):
221 def fromfile(cls, repo):
233 f = None
222 f = None
234 try:
223 try:
235 f = repo.cachevfs(cls._filename(repo))
224 f = repo.cachevfs(cls._filename(repo))
236 lineiter = iter(f)
225 lineiter = iter(f)
237 cachekey = next(lineiter).rstrip('\n').split(" ", 2)
226 cachekey = next(lineiter).rstrip('\n').split(" ", 2)
238 last, lrev = cachekey[:2]
227 last, lrev = cachekey[:2]
239 last, lrev = bin(last), int(lrev)
228 last, lrev = bin(last), int(lrev)
240 filteredhash = None
229 filteredhash = None
241 hasnode = repo.changelog.hasnode
230 hasnode = repo.changelog.hasnode
242 if len(cachekey) > 2:
231 if len(cachekey) > 2:
243 filteredhash = bin(cachekey[2])
232 filteredhash = bin(cachekey[2])
244 bcache = cls(tipnode=last, tiprev=lrev, filteredhash=filteredhash,
233 bcache = cls(tipnode=last, tiprev=lrev, filteredhash=filteredhash,
245 hasnode=hasnode)
234 hasnode=hasnode)
246 if not bcache.validfor(repo):
235 if not bcache.validfor(repo):
247 # invalidate the cache
236 # invalidate the cache
248 raise ValueError(r'tip differs')
237 raise ValueError(r'tip differs')
249 bcache.load(repo, lineiter)
238 bcache.load(repo, lineiter)
250 except (IOError, OSError):
239 except (IOError, OSError):
251 return None
240 return None
252
241
253 except Exception as inst:
242 except Exception as inst:
254 if repo.ui.debugflag:
243 if repo.ui.debugflag:
255 msg = 'invalid branchheads cache'
244 msg = 'invalid branchheads cache'
256 if repo.filtername is not None:
245 if repo.filtername is not None:
257 msg += ' (%s)' % repo.filtername
246 msg += ' (%s)' % repo.filtername
258 msg += ': %s\n'
247 msg += ': %s\n'
259 repo.ui.debug(msg % pycompat.bytestr(inst))
248 repo.ui.debug(msg % pycompat.bytestr(inst))
260 bcache = None
249 bcache = None
261
250
262 finally:
251 finally:
263 if f:
252 if f:
264 f.close()
253 f.close()
265
254
266 return bcache
255 return bcache
267
256
268 def load(self, repo, lineiter):
257 def load(self, repo, lineiter):
269 """ fully loads the branchcache by reading from the file using the line
258 """ fully loads the branchcache by reading from the file using the line
270 iterator passed"""
259 iterator passed"""
271 for line in lineiter:
260 for line in lineiter:
272 line = line.rstrip('\n')
261 line = line.rstrip('\n')
273 if not line:
262 if not line:
274 continue
263 continue
275 node, state, label = line.split(" ", 2)
264 node, state, label = line.split(" ", 2)
276 if state not in 'oc':
265 if state not in 'oc':
277 raise ValueError(r'invalid branch state')
266 raise ValueError(r'invalid branch state')
278 label = encoding.tolocal(label.strip())
267 label = encoding.tolocal(label.strip())
279 node = bin(node)
268 node = bin(node)
280 self._entries.setdefault(label, []).append(node)
269 self._entries.setdefault(label, []).append(node)
281 if state == 'c':
270 if state == 'c':
282 self._closednodes.add(node)
271 self._closednodes.add(node)
283
272
284 @staticmethod
273 @staticmethod
285 def _filename(repo):
274 def _filename(repo):
286 """name of a branchcache file for a given repo or repoview"""
275 """name of a branchcache file for a given repo or repoview"""
287 filename = "branch2"
276 filename = "branch2"
288 if repo.filtername:
277 if repo.filtername:
289 filename = '%s-%s' % (filename, repo.filtername)
278 filename = '%s-%s' % (filename, repo.filtername)
290 return filename
279 return filename
291
280
292 def validfor(self, repo):
281 def validfor(self, repo):
293 """Is the cache content valid regarding a repo
282 """Is the cache content valid regarding a repo
294
283
295 - False when cached tipnode is unknown or if we detect a strip.
284 - False when cached tipnode is unknown or if we detect a strip.
296 - True when cache is up to date or a subset of current repo."""
285 - True when cache is up to date or a subset of current repo."""
297 try:
286 try:
298 return ((self.tipnode == repo.changelog.node(self.tiprev))
287 return ((self.tipnode == repo.changelog.node(self.tiprev))
299 and (self.filteredhash ==
288 and (self.filteredhash ==
300 scmutil.filteredhash(repo, self.tiprev)))
289 scmutil.filteredhash(repo, self.tiprev)))
301 except IndexError:
290 except IndexError:
302 return False
291 return False
303
292
304 def _branchtip(self, heads):
293 def _branchtip(self, heads):
305 '''Return tuple with last open head in heads and false,
294 '''Return tuple with last open head in heads and false,
306 otherwise return last closed head and true.'''
295 otherwise return last closed head and true.'''
307 tip = heads[-1]
296 tip = heads[-1]
308 closed = True
297 closed = True
309 for h in reversed(heads):
298 for h in reversed(heads):
310 if h not in self._closednodes:
299 if h not in self._closednodes:
311 tip = h
300 tip = h
312 closed = False
301 closed = False
313 break
302 break
314 return tip, closed
303 return tip, closed
315
304
316 def branchtip(self, branch):
305 def branchtip(self, branch):
317 '''Return the tipmost open head on branch head, otherwise return the
306 '''Return the tipmost open head on branch head, otherwise return the
318 tipmost closed head on branch.
307 tipmost closed head on branch.
319 Raise KeyError for unknown branch.'''
308 Raise KeyError for unknown branch.'''
320 return self._branchtip(self[branch])[0]
309 return self._branchtip(self[branch])[0]
321
310
322 def iteropen(self, nodes):
311 def iteropen(self, nodes):
323 return (n for n in nodes if n not in self._closednodes)
312 return (n for n in nodes if n not in self._closednodes)
324
313
325 def branchheads(self, branch, closed=False):
314 def branchheads(self, branch, closed=False):
326 self._verifybranch(branch)
315 self._verifybranch(branch)
327 heads = self._entries[branch]
316 heads = self._entries[branch]
328 if not closed:
317 if not closed:
329 heads = list(self.iteropen(heads))
318 heads = list(self.iteropen(heads))
330 return heads
319 return heads
331
320
332 def iterbranches(self):
321 def iterbranches(self):
333 for bn, heads in self.iteritems():
322 for bn, heads in self.iteritems():
334 yield (bn, heads) + self._branchtip(heads)
323 yield (bn, heads) + self._branchtip(heads)
335
324
336 def iterheads(self):
325 def iterheads(self):
337 """ returns all the heads """
326 """ returns all the heads """
338 self._verifyall()
327 self._verifyall()
339 return self._entries.itervalues()
328 return self._entries.itervalues()
340
329
341 def copy(self):
330 def copy(self):
342 """return an deep copy of the branchcache object"""
331 """return an deep copy of the branchcache object"""
343 return type(self)(
332 return type(self)(
344 self._entries, self.tipnode, self.tiprev, self.filteredhash,
333 self._entries, self.tipnode, self.tiprev, self.filteredhash,
345 self._closednodes)
334 self._closednodes)
346
335
347 def write(self, repo):
336 def write(self, repo):
348 try:
337 try:
349 f = repo.cachevfs(self._filename(repo), "w", atomictemp=True)
338 f = repo.cachevfs(self._filename(repo), "w", atomictemp=True)
350 cachekey = [hex(self.tipnode), '%d' % self.tiprev]
339 cachekey = [hex(self.tipnode), '%d' % self.tiprev]
351 if self.filteredhash is not None:
340 if self.filteredhash is not None:
352 cachekey.append(hex(self.filteredhash))
341 cachekey.append(hex(self.filteredhash))
353 f.write(" ".join(cachekey) + '\n')
342 f.write(" ".join(cachekey) + '\n')
354 nodecount = 0
343 nodecount = 0
355 for label, nodes in sorted(self.iteritems()):
344 for label, nodes in sorted(self.iteritems()):
356 label = encoding.fromlocal(label)
345 label = encoding.fromlocal(label)
357 for node in nodes:
346 for node in nodes:
358 nodecount += 1
347 nodecount += 1
359 if node in self._closednodes:
348 if node in self._closednodes:
360 state = 'c'
349 state = 'c'
361 else:
350 else:
362 state = 'o'
351 state = 'o'
363 f.write("%s %s %s\n" % (hex(node), state, label))
352 f.write("%s %s %s\n" % (hex(node), state, label))
364 f.close()
353 f.close()
365 repo.ui.log('branchcache',
354 repo.ui.log('branchcache',
366 'wrote %s branch cache with %d labels and %d nodes\n',
355 'wrote %s branch cache with %d labels and %d nodes\n',
367 repo.filtername, len(self._entries), nodecount)
356 repo.filtername, len(self._entries), nodecount)
368 except (IOError, OSError, error.Abort) as inst:
357 except (IOError, OSError, error.Abort) as inst:
369 # Abort may be raised by read only opener, so log and continue
358 # Abort may be raised by read only opener, so log and continue
370 repo.ui.debug("couldn't write branch cache: %s\n" %
359 repo.ui.debug("couldn't write branch cache: %s\n" %
371 stringutil.forcebytestr(inst))
360 stringutil.forcebytestr(inst))
372
361
373 def update(self, repo, revgen):
362 def update(self, repo, revgen):
374 """Given a branchhead cache, self, that may have extra nodes or be
363 """Given a branchhead cache, self, that may have extra nodes or be
375 missing heads, and a generator of nodes that are strictly a superset of
364 missing heads, and a generator of nodes that are strictly a superset of
376 heads missing, this function updates self to be correct.
365 heads missing, this function updates self to be correct.
377 """
366 """
378 starttime = util.timer()
367 starttime = util.timer()
379 cl = repo.changelog
368 cl = repo.changelog
380 # collect new branch entries
369 # collect new branch entries
381 newbranches = {}
370 newbranches = {}
382 getbranchinfo = repo.revbranchcache().branchinfo
371 getbranchinfo = repo.revbranchcache().branchinfo
383 for r in revgen:
372 for r in revgen:
384 branch, closesbranch = getbranchinfo(r)
373 branch, closesbranch = getbranchinfo(r)
385 newbranches.setdefault(branch, []).append(r)
374 newbranches.setdefault(branch, []).append(r)
386 if closesbranch:
375 if closesbranch:
387 self._closednodes.add(cl.node(r))
376 self._closednodes.add(cl.node(r))
388
377
389 # fetch current topological heads to speed up filtering
378 # fetch current topological heads to speed up filtering
390 topoheads = set(cl.headrevs())
379 topoheads = set(cl.headrevs())
391
380
392 # if older branchheads are reachable from new ones, they aren't
381 # if older branchheads are reachable from new ones, they aren't
393 # really branchheads. Note checking parents is insufficient:
382 # really branchheads. Note checking parents is insufficient:
394 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
383 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
395 for branch, newheadrevs in newbranches.iteritems():
384 for branch, newheadrevs in newbranches.iteritems():
396 bheads = self._entries.setdefault(branch, [])
385 bheads = self._entries.setdefault(branch, [])
397 bheadset = set(cl.rev(node) for node in bheads)
386 bheadset = set(cl.rev(node) for node in bheads)
398
387
399 # This have been tested True on all internal usage of this function.
388 # This have been tested True on all internal usage of this function.
400 # run it again in case of doubt
389 # run it again in case of doubt
401 # assert not (set(bheadrevs) & set(newheadrevs))
390 # assert not (set(bheadrevs) & set(newheadrevs))
402 bheadset.update(newheadrevs)
391 bheadset.update(newheadrevs)
403
392
404 # This prunes out two kinds of heads - heads that are superseded by
393 # This prunes out two kinds of heads - heads that are superseded by
405 # a head in newheadrevs, and newheadrevs that are not heads because
394 # a head in newheadrevs, and newheadrevs that are not heads because
406 # an existing head is their descendant.
395 # an existing head is their descendant.
407 uncertain = bheadset - topoheads
396 uncertain = bheadset - topoheads
408 if uncertain:
397 if uncertain:
409 floorrev = min(uncertain)
398 floorrev = min(uncertain)
410 ancestors = set(cl.ancestors(newheadrevs, floorrev))
399 ancestors = set(cl.ancestors(newheadrevs, floorrev))
411 bheadset -= ancestors
400 bheadset -= ancestors
412 bheadrevs = sorted(bheadset)
401 bheadrevs = sorted(bheadset)
413 self[branch] = [cl.node(rev) for rev in bheadrevs]
402 self[branch] = [cl.node(rev) for rev in bheadrevs]
414 tiprev = bheadrevs[-1]
403 tiprev = bheadrevs[-1]
415 if tiprev > self.tiprev:
404 if tiprev > self.tiprev:
416 self.tipnode = cl.node(tiprev)
405 self.tipnode = cl.node(tiprev)
417 self.tiprev = tiprev
406 self.tiprev = tiprev
418
407
419 if not self.validfor(repo):
408 if not self.validfor(repo):
420 # cache key are not valid anymore
409 # cache key are not valid anymore
421 self.tipnode = nullid
410 self.tipnode = nullid
422 self.tiprev = nullrev
411 self.tiprev = nullrev
423 for heads in self.iterheads():
412 for heads in self.iterheads():
424 tiprev = max(cl.rev(node) for node in heads)
413 tiprev = max(cl.rev(node) for node in heads)
425 if tiprev > self.tiprev:
414 if tiprev > self.tiprev:
426 self.tipnode = cl.node(tiprev)
415 self.tipnode = cl.node(tiprev)
427 self.tiprev = tiprev
416 self.tiprev = tiprev
428 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
417 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
429
418
430 duration = util.timer() - starttime
419 duration = util.timer() - starttime
431 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
420 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
432 repo.filtername or b'None', duration)
421 repo.filtername or b'None', duration)
433
422
434 self.write(repo)
423 self.write(repo)
435
424
436
425
437 class remotebranchcache(branchcache):
426 class remotebranchcache(branchcache):
438 """Branchmap info for a remote connection, should not write locally"""
427 """Branchmap info for a remote connection, should not write locally"""
439 def write(self, repo):
428 def write(self, repo):
440 pass
429 pass
441
430
442
431
443 # Revision branch info cache
432 # Revision branch info cache
444
433
445 _rbcversion = '-v1'
434 _rbcversion = '-v1'
446 _rbcnames = 'rbc-names' + _rbcversion
435 _rbcnames = 'rbc-names' + _rbcversion
447 _rbcrevs = 'rbc-revs' + _rbcversion
436 _rbcrevs = 'rbc-revs' + _rbcversion
448 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
437 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
449 _rbcrecfmt = '>4sI'
438 _rbcrecfmt = '>4sI'
450 _rbcrecsize = calcsize(_rbcrecfmt)
439 _rbcrecsize = calcsize(_rbcrecfmt)
451 _rbcnodelen = 4
440 _rbcnodelen = 4
452 _rbcbranchidxmask = 0x7fffffff
441 _rbcbranchidxmask = 0x7fffffff
453 _rbccloseflag = 0x80000000
442 _rbccloseflag = 0x80000000
454
443
455 class revbranchcache(object):
444 class revbranchcache(object):
456 """Persistent cache, mapping from revision number to branch name and close.
445 """Persistent cache, mapping from revision number to branch name and close.
457 This is a low level cache, independent of filtering.
446 This is a low level cache, independent of filtering.
458
447
459 Branch names are stored in rbc-names in internal encoding separated by 0.
448 Branch names are stored in rbc-names in internal encoding separated by 0.
460 rbc-names is append-only, and each branch name is only stored once and will
449 rbc-names is append-only, and each branch name is only stored once and will
461 thus have a unique index.
450 thus have a unique index.
462
451
463 The branch info for each revision is stored in rbc-revs as constant size
452 The branch info for each revision is stored in rbc-revs as constant size
464 records. The whole file is read into memory, but it is only 'parsed' on
453 records. The whole file is read into memory, but it is only 'parsed' on
465 demand. The file is usually append-only but will be truncated if repo
454 demand. The file is usually append-only but will be truncated if repo
466 modification is detected.
455 modification is detected.
467 The record for each revision contains the first 4 bytes of the
456 The record for each revision contains the first 4 bytes of the
468 corresponding node hash, and the record is only used if it still matches.
457 corresponding node hash, and the record is only used if it still matches.
469 Even a completely trashed rbc-revs fill thus still give the right result
458 Even a completely trashed rbc-revs fill thus still give the right result
470 while converging towards full recovery ... assuming no incorrectly matching
459 while converging towards full recovery ... assuming no incorrectly matching
471 node hashes.
460 node hashes.
472 The record also contains 4 bytes where 31 bits contains the index of the
461 The record also contains 4 bytes where 31 bits contains the index of the
473 branch and the last bit indicate that it is a branch close commit.
462 branch and the last bit indicate that it is a branch close commit.
474 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
463 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
475 and will grow with it but be 1/8th of its size.
464 and will grow with it but be 1/8th of its size.
476 """
465 """
477
466
478 def __init__(self, repo, readonly=True):
467 def __init__(self, repo, readonly=True):
479 assert repo.filtername is None
468 assert repo.filtername is None
480 self._repo = repo
469 self._repo = repo
481 self._names = [] # branch names in local encoding with static index
470 self._names = [] # branch names in local encoding with static index
482 self._rbcrevs = bytearray()
471 self._rbcrevs = bytearray()
483 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
472 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
484 try:
473 try:
485 bndata = repo.cachevfs.read(_rbcnames)
474 bndata = repo.cachevfs.read(_rbcnames)
486 self._rbcsnameslen = len(bndata) # for verification before writing
475 self._rbcsnameslen = len(bndata) # for verification before writing
487 if bndata:
476 if bndata:
488 self._names = [encoding.tolocal(bn)
477 self._names = [encoding.tolocal(bn)
489 for bn in bndata.split('\0')]
478 for bn in bndata.split('\0')]
490 except (IOError, OSError):
479 except (IOError, OSError):
491 if readonly:
480 if readonly:
492 # don't try to use cache - fall back to the slow path
481 # don't try to use cache - fall back to the slow path
493 self.branchinfo = self._branchinfo
482 self.branchinfo = self._branchinfo
494
483
495 if self._names:
484 if self._names:
496 try:
485 try:
497 data = repo.cachevfs.read(_rbcrevs)
486 data = repo.cachevfs.read(_rbcrevs)
498 self._rbcrevs[:] = data
487 self._rbcrevs[:] = data
499 except (IOError, OSError) as inst:
488 except (IOError, OSError) as inst:
500 repo.ui.debug("couldn't read revision branch cache: %s\n" %
489 repo.ui.debug("couldn't read revision branch cache: %s\n" %
501 stringutil.forcebytestr(inst))
490 stringutil.forcebytestr(inst))
502 # remember number of good records on disk
491 # remember number of good records on disk
503 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
492 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
504 len(repo.changelog))
493 len(repo.changelog))
505 if self._rbcrevslen == 0:
494 if self._rbcrevslen == 0:
506 self._names = []
495 self._names = []
507 self._rbcnamescount = len(self._names) # number of names read at
496 self._rbcnamescount = len(self._names) # number of names read at
508 # _rbcsnameslen
497 # _rbcsnameslen
509
498
510 def _clear(self):
499 def _clear(self):
511 self._rbcsnameslen = 0
500 self._rbcsnameslen = 0
512 del self._names[:]
501 del self._names[:]
513 self._rbcnamescount = 0
502 self._rbcnamescount = 0
514 self._rbcrevslen = len(self._repo.changelog)
503 self._rbcrevslen = len(self._repo.changelog)
515 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
504 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
516 util.clearcachedproperty(self, '_namesreverse')
505 util.clearcachedproperty(self, '_namesreverse')
517
506
518 @util.propertycache
507 @util.propertycache
519 def _namesreverse(self):
508 def _namesreverse(self):
520 return dict((b, r) for r, b in enumerate(self._names))
509 return dict((b, r) for r, b in enumerate(self._names))
521
510
522 def branchinfo(self, rev):
511 def branchinfo(self, rev):
523 """Return branch name and close flag for rev, using and updating
512 """Return branch name and close flag for rev, using and updating
524 persistent cache."""
513 persistent cache."""
525 changelog = self._repo.changelog
514 changelog = self._repo.changelog
526 rbcrevidx = rev * _rbcrecsize
515 rbcrevidx = rev * _rbcrecsize
527
516
528 # avoid negative index, changelog.read(nullrev) is fast without cache
517 # avoid negative index, changelog.read(nullrev) is fast without cache
529 if rev == nullrev:
518 if rev == nullrev:
530 return changelog.branchinfo(rev)
519 return changelog.branchinfo(rev)
531
520
532 # if requested rev isn't allocated, grow and cache the rev info
521 # if requested rev isn't allocated, grow and cache the rev info
533 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
522 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
534 return self._branchinfo(rev)
523 return self._branchinfo(rev)
535
524
536 # fast path: extract data from cache, use it if node is matching
525 # fast path: extract data from cache, use it if node is matching
537 reponode = changelog.node(rev)[:_rbcnodelen]
526 reponode = changelog.node(rev)[:_rbcnodelen]
538 cachenode, branchidx = unpack_from(
527 cachenode, branchidx = unpack_from(
539 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx)
528 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx)
540 close = bool(branchidx & _rbccloseflag)
529 close = bool(branchidx & _rbccloseflag)
541 if close:
530 if close:
542 branchidx &= _rbcbranchidxmask
531 branchidx &= _rbcbranchidxmask
543 if cachenode == '\0\0\0\0':
532 if cachenode == '\0\0\0\0':
544 pass
533 pass
545 elif cachenode == reponode:
534 elif cachenode == reponode:
546 try:
535 try:
547 return self._names[branchidx], close
536 return self._names[branchidx], close
548 except IndexError:
537 except IndexError:
549 # recover from invalid reference to unknown branch
538 # recover from invalid reference to unknown branch
550 self._repo.ui.debug("referenced branch names not found"
539 self._repo.ui.debug("referenced branch names not found"
551 " - rebuilding revision branch cache from scratch\n")
540 " - rebuilding revision branch cache from scratch\n")
552 self._clear()
541 self._clear()
553 else:
542 else:
554 # rev/node map has changed, invalidate the cache from here up
543 # rev/node map has changed, invalidate the cache from here up
555 self._repo.ui.debug("history modification detected - truncating "
544 self._repo.ui.debug("history modification detected - truncating "
556 "revision branch cache to revision %d\n" % rev)
545 "revision branch cache to revision %d\n" % rev)
557 truncate = rbcrevidx + _rbcrecsize
546 truncate = rbcrevidx + _rbcrecsize
558 del self._rbcrevs[truncate:]
547 del self._rbcrevs[truncate:]
559 self._rbcrevslen = min(self._rbcrevslen, truncate)
548 self._rbcrevslen = min(self._rbcrevslen, truncate)
560
549
561 # fall back to slow path and make sure it will be written to disk
550 # fall back to slow path and make sure it will be written to disk
562 return self._branchinfo(rev)
551 return self._branchinfo(rev)
563
552
564 def _branchinfo(self, rev):
553 def _branchinfo(self, rev):
565 """Retrieve branch info from changelog and update _rbcrevs"""
554 """Retrieve branch info from changelog and update _rbcrevs"""
566 changelog = self._repo.changelog
555 changelog = self._repo.changelog
567 b, close = changelog.branchinfo(rev)
556 b, close = changelog.branchinfo(rev)
568 if b in self._namesreverse:
557 if b in self._namesreverse:
569 branchidx = self._namesreverse[b]
558 branchidx = self._namesreverse[b]
570 else:
559 else:
571 branchidx = len(self._names)
560 branchidx = len(self._names)
572 self._names.append(b)
561 self._names.append(b)
573 self._namesreverse[b] = branchidx
562 self._namesreverse[b] = branchidx
574 reponode = changelog.node(rev)
563 reponode = changelog.node(rev)
575 if close:
564 if close:
576 branchidx |= _rbccloseflag
565 branchidx |= _rbccloseflag
577 self._setcachedata(rev, reponode, branchidx)
566 self._setcachedata(rev, reponode, branchidx)
578 return b, close
567 return b, close
579
568
580 def setdata(self, branch, rev, node, close):
569 def setdata(self, branch, rev, node, close):
581 """add new data information to the cache"""
570 """add new data information to the cache"""
582 if branch in self._namesreverse:
571 if branch in self._namesreverse:
583 branchidx = self._namesreverse[branch]
572 branchidx = self._namesreverse[branch]
584 else:
573 else:
585 branchidx = len(self._names)
574 branchidx = len(self._names)
586 self._names.append(branch)
575 self._names.append(branch)
587 self._namesreverse[branch] = branchidx
576 self._namesreverse[branch] = branchidx
588 if close:
577 if close:
589 branchidx |= _rbccloseflag
578 branchidx |= _rbccloseflag
590 self._setcachedata(rev, node, branchidx)
579 self._setcachedata(rev, node, branchidx)
591 # If no cache data were readable (non exists, bad permission, etc)
580 # If no cache data were readable (non exists, bad permission, etc)
592 # the cache was bypassing itself by setting:
581 # the cache was bypassing itself by setting:
593 #
582 #
594 # self.branchinfo = self._branchinfo
583 # self.branchinfo = self._branchinfo
595 #
584 #
596 # Since we now have data in the cache, we need to drop this bypassing.
585 # Since we now have data in the cache, we need to drop this bypassing.
597 if r'branchinfo' in vars(self):
586 if r'branchinfo' in vars(self):
598 del self.branchinfo
587 del self.branchinfo
599
588
600 def _setcachedata(self, rev, node, branchidx):
589 def _setcachedata(self, rev, node, branchidx):
601 """Writes the node's branch data to the in-memory cache data."""
590 """Writes the node's branch data to the in-memory cache data."""
602 if rev == nullrev:
591 if rev == nullrev:
603 return
592 return
604 rbcrevidx = rev * _rbcrecsize
593 rbcrevidx = rev * _rbcrecsize
605 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
594 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
606 self._rbcrevs.extend('\0' *
595 self._rbcrevs.extend('\0' *
607 (len(self._repo.changelog) * _rbcrecsize -
596 (len(self._repo.changelog) * _rbcrecsize -
608 len(self._rbcrevs)))
597 len(self._rbcrevs)))
609 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
598 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
610 self._rbcrevslen = min(self._rbcrevslen, rev)
599 self._rbcrevslen = min(self._rbcrevslen, rev)
611
600
612 tr = self._repo.currenttransaction()
601 tr = self._repo.currenttransaction()
613 if tr:
602 if tr:
614 tr.addfinalize('write-revbranchcache', self.write)
603 tr.addfinalize('write-revbranchcache', self.write)
615
604
616 def write(self, tr=None):
605 def write(self, tr=None):
617 """Save branch cache if it is dirty."""
606 """Save branch cache if it is dirty."""
618 repo = self._repo
607 repo = self._repo
619 wlock = None
608 wlock = None
620 step = ''
609 step = ''
621 try:
610 try:
622 if self._rbcnamescount < len(self._names):
611 if self._rbcnamescount < len(self._names):
623 step = ' names'
612 step = ' names'
624 wlock = repo.wlock(wait=False)
613 wlock = repo.wlock(wait=False)
625 if self._rbcnamescount != 0:
614 if self._rbcnamescount != 0:
626 f = repo.cachevfs.open(_rbcnames, 'ab')
615 f = repo.cachevfs.open(_rbcnames, 'ab')
627 if f.tell() == self._rbcsnameslen:
616 if f.tell() == self._rbcsnameslen:
628 f.write('\0')
617 f.write('\0')
629 else:
618 else:
630 f.close()
619 f.close()
631 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
620 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
632 self._rbcnamescount = 0
621 self._rbcnamescount = 0
633 self._rbcrevslen = 0
622 self._rbcrevslen = 0
634 if self._rbcnamescount == 0:
623 if self._rbcnamescount == 0:
635 # before rewriting names, make sure references are removed
624 # before rewriting names, make sure references are removed
636 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
625 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
637 f = repo.cachevfs.open(_rbcnames, 'wb')
626 f = repo.cachevfs.open(_rbcnames, 'wb')
638 f.write('\0'.join(encoding.fromlocal(b)
627 f.write('\0'.join(encoding.fromlocal(b)
639 for b in self._names[self._rbcnamescount:]))
628 for b in self._names[self._rbcnamescount:]))
640 self._rbcsnameslen = f.tell()
629 self._rbcsnameslen = f.tell()
641 f.close()
630 f.close()
642 self._rbcnamescount = len(self._names)
631 self._rbcnamescount = len(self._names)
643
632
644 start = self._rbcrevslen * _rbcrecsize
633 start = self._rbcrevslen * _rbcrecsize
645 if start != len(self._rbcrevs):
634 if start != len(self._rbcrevs):
646 step = ''
635 step = ''
647 if wlock is None:
636 if wlock is None:
648 wlock = repo.wlock(wait=False)
637 wlock = repo.wlock(wait=False)
649 revs = min(len(repo.changelog),
638 revs = min(len(repo.changelog),
650 len(self._rbcrevs) // _rbcrecsize)
639 len(self._rbcrevs) // _rbcrecsize)
651 f = repo.cachevfs.open(_rbcrevs, 'ab')
640 f = repo.cachevfs.open(_rbcrevs, 'ab')
652 if f.tell() != start:
641 if f.tell() != start:
653 repo.ui.debug("truncating cache/%s to %d\n"
642 repo.ui.debug("truncating cache/%s to %d\n"
654 % (_rbcrevs, start))
643 % (_rbcrevs, start))
655 f.seek(start)
644 f.seek(start)
656 if f.tell() != start:
645 if f.tell() != start:
657 start = 0
646 start = 0
658 f.seek(start)
647 f.seek(start)
659 f.truncate()
648 f.truncate()
660 end = revs * _rbcrecsize
649 end = revs * _rbcrecsize
661 f.write(self._rbcrevs[start:end])
650 f.write(self._rbcrevs[start:end])
662 f.close()
651 f.close()
663 self._rbcrevslen = revs
652 self._rbcrevslen = revs
664 except (IOError, OSError, error.Abort, error.LockError) as inst:
653 except (IOError, OSError, error.Abort, error.LockError) as inst:
665 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
654 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
666 % (step, stringutil.forcebytestr(inst)))
655 % (step, stringutil.forcebytestr(inst)))
667 finally:
656 finally:
668 if wlock is not None:
657 if wlock is not None:
669 wlock.release()
658 wlock.release()
@@ -1,280 +1,280 b''
1 # repoview.py - Filtered view of a localrepo object
1 # repoview.py - Filtered view of a localrepo object
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import copy
11 import copy
12 import weakref
12 import weakref
13
13
14 from .node import nullrev
14 from .node import nullrev
15 from . import (
15 from . import (
16 obsolete,
16 obsolete,
17 phases,
17 phases,
18 pycompat,
18 pycompat,
19 tags as tagsmod,
19 tags as tagsmod,
20 )
20 )
21
21
22 def hideablerevs(repo):
22 def hideablerevs(repo):
23 """Revision candidates to be hidden
23 """Revision candidates to be hidden
24
24
25 This is a standalone function to allow extensions to wrap it.
25 This is a standalone function to allow extensions to wrap it.
26
26
27 Because we use the set of immutable changesets as a fallback subset in
27 Because we use the set of immutable changesets as a fallback subset in
28 branchmap (see mercurial.branchmap.subsettable), you cannot set "public"
28 branchmap (see mercurial.utils.repoviewutils.subsettable), you cannot set
29 changesets as "hideable". Doing so would break multiple code assertions and
29 "public" changesets as "hideable". Doing so would break multiple code
30 lead to crashes."""
30 assertions and lead to crashes."""
31 obsoletes = obsolete.getrevs(repo, 'obsolete')
31 obsoletes = obsolete.getrevs(repo, 'obsolete')
32 internals = repo._phasecache.getrevset(repo, phases.localhiddenphases)
32 internals = repo._phasecache.getrevset(repo, phases.localhiddenphases)
33 internals = frozenset(internals)
33 internals = frozenset(internals)
34 return obsoletes | internals
34 return obsoletes | internals
35
35
36 def pinnedrevs(repo):
36 def pinnedrevs(repo):
37 """revisions blocking hidden changesets from being filtered
37 """revisions blocking hidden changesets from being filtered
38 """
38 """
39
39
40 cl = repo.changelog
40 cl = repo.changelog
41 pinned = set()
41 pinned = set()
42 pinned.update([par.rev() for par in repo[None].parents()])
42 pinned.update([par.rev() for par in repo[None].parents()])
43 pinned.update([cl.rev(bm) for bm in repo._bookmarks.values()])
43 pinned.update([cl.rev(bm) for bm in repo._bookmarks.values()])
44
44
45 tags = {}
45 tags = {}
46 tagsmod.readlocaltags(repo.ui, repo, tags, {})
46 tagsmod.readlocaltags(repo.ui, repo, tags, {})
47 if tags:
47 if tags:
48 rev, nodemap = cl.rev, cl.nodemap
48 rev, nodemap = cl.rev, cl.nodemap
49 pinned.update(rev(t[0]) for t in tags.values() if t[0] in nodemap)
49 pinned.update(rev(t[0]) for t in tags.values() if t[0] in nodemap)
50 return pinned
50 return pinned
51
51
52
52
53 def _revealancestors(pfunc, hidden, revs):
53 def _revealancestors(pfunc, hidden, revs):
54 """reveals contiguous chains of hidden ancestors of 'revs' by removing them
54 """reveals contiguous chains of hidden ancestors of 'revs' by removing them
55 from 'hidden'
55 from 'hidden'
56
56
57 - pfunc(r): a funtion returning parent of 'r',
57 - pfunc(r): a funtion returning parent of 'r',
58 - hidden: the (preliminary) hidden revisions, to be updated
58 - hidden: the (preliminary) hidden revisions, to be updated
59 - revs: iterable of revnum,
59 - revs: iterable of revnum,
60
60
61 (Ancestors are revealed exclusively, i.e. the elements in 'revs' are
61 (Ancestors are revealed exclusively, i.e. the elements in 'revs' are
62 *not* revealed)
62 *not* revealed)
63 """
63 """
64 stack = list(revs)
64 stack = list(revs)
65 while stack:
65 while stack:
66 for p in pfunc(stack.pop()):
66 for p in pfunc(stack.pop()):
67 if p != nullrev and p in hidden:
67 if p != nullrev and p in hidden:
68 hidden.remove(p)
68 hidden.remove(p)
69 stack.append(p)
69 stack.append(p)
70
70
71 def computehidden(repo, visibilityexceptions=None):
71 def computehidden(repo, visibilityexceptions=None):
72 """compute the set of hidden revision to filter
72 """compute the set of hidden revision to filter
73
73
74 During most operation hidden should be filtered."""
74 During most operation hidden should be filtered."""
75 assert not repo.changelog.filteredrevs
75 assert not repo.changelog.filteredrevs
76
76
77 hidden = hideablerevs(repo)
77 hidden = hideablerevs(repo)
78 if hidden:
78 if hidden:
79 hidden = set(hidden - pinnedrevs(repo))
79 hidden = set(hidden - pinnedrevs(repo))
80 if visibilityexceptions:
80 if visibilityexceptions:
81 hidden -= visibilityexceptions
81 hidden -= visibilityexceptions
82 pfunc = repo.changelog.parentrevs
82 pfunc = repo.changelog.parentrevs
83 mutable = repo._phasecache.getrevset(repo, phases.mutablephases)
83 mutable = repo._phasecache.getrevset(repo, phases.mutablephases)
84
84
85 visible = mutable - hidden
85 visible = mutable - hidden
86 _revealancestors(pfunc, hidden, visible)
86 _revealancestors(pfunc, hidden, visible)
87 return frozenset(hidden)
87 return frozenset(hidden)
88
88
89 def computesecret(repo, visibilityexceptions=None):
89 def computesecret(repo, visibilityexceptions=None):
90 """compute the set of revision that can never be exposed through hgweb
90 """compute the set of revision that can never be exposed through hgweb
91
91
92 Changeset in the secret phase (or above) should stay unaccessible."""
92 Changeset in the secret phase (or above) should stay unaccessible."""
93 assert not repo.changelog.filteredrevs
93 assert not repo.changelog.filteredrevs
94 secrets = repo._phasecache.getrevset(repo, phases.remotehiddenphases)
94 secrets = repo._phasecache.getrevset(repo, phases.remotehiddenphases)
95 return frozenset(secrets)
95 return frozenset(secrets)
96
96
97 def computeunserved(repo, visibilityexceptions=None):
97 def computeunserved(repo, visibilityexceptions=None):
98 """compute the set of revision that should be filtered when used a server
98 """compute the set of revision that should be filtered when used a server
99
99
100 Secret and hidden changeset should not pretend to be here."""
100 Secret and hidden changeset should not pretend to be here."""
101 assert not repo.changelog.filteredrevs
101 assert not repo.changelog.filteredrevs
102 # fast path in simple case to avoid impact of non optimised code
102 # fast path in simple case to avoid impact of non optimised code
103 hiddens = filterrevs(repo, 'visible')
103 hiddens = filterrevs(repo, 'visible')
104 secrets = filterrevs(repo, 'served.hidden')
104 secrets = filterrevs(repo, 'served.hidden')
105 if secrets:
105 if secrets:
106 return frozenset(hiddens | secrets)
106 return frozenset(hiddens | secrets)
107 else:
107 else:
108 return hiddens
108 return hiddens
109
109
110 def computemutable(repo, visibilityexceptions=None):
110 def computemutable(repo, visibilityexceptions=None):
111 assert not repo.changelog.filteredrevs
111 assert not repo.changelog.filteredrevs
112 # fast check to avoid revset call on huge repo
112 # fast check to avoid revset call on huge repo
113 if any(repo._phasecache.phaseroots[1:]):
113 if any(repo._phasecache.phaseroots[1:]):
114 getphase = repo._phasecache.phase
114 getphase = repo._phasecache.phase
115 maymutable = filterrevs(repo, 'base')
115 maymutable = filterrevs(repo, 'base')
116 return frozenset(r for r in maymutable if getphase(repo, r))
116 return frozenset(r for r in maymutable if getphase(repo, r))
117 return frozenset()
117 return frozenset()
118
118
119 def computeimpactable(repo, visibilityexceptions=None):
119 def computeimpactable(repo, visibilityexceptions=None):
120 """Everything impactable by mutable revision
120 """Everything impactable by mutable revision
121
121
122 The immutable filter still have some chance to get invalidated. This will
122 The immutable filter still have some chance to get invalidated. This will
123 happen when:
123 happen when:
124
124
125 - you garbage collect hidden changeset,
125 - you garbage collect hidden changeset,
126 - public phase is moved backward,
126 - public phase is moved backward,
127 - something is changed in the filtering (this could be fixed)
127 - something is changed in the filtering (this could be fixed)
128
128
129 This filter out any mutable changeset and any public changeset that may be
129 This filter out any mutable changeset and any public changeset that may be
130 impacted by something happening to a mutable revision.
130 impacted by something happening to a mutable revision.
131
131
132 This is achieved by filtered everything with a revision number egal or
132 This is achieved by filtered everything with a revision number egal or
133 higher than the first mutable changeset is filtered."""
133 higher than the first mutable changeset is filtered."""
134 assert not repo.changelog.filteredrevs
134 assert not repo.changelog.filteredrevs
135 cl = repo.changelog
135 cl = repo.changelog
136 firstmutable = len(cl)
136 firstmutable = len(cl)
137 for roots in repo._phasecache.phaseroots[1:]:
137 for roots in repo._phasecache.phaseroots[1:]:
138 if roots:
138 if roots:
139 firstmutable = min(firstmutable, min(cl.rev(r) for r in roots))
139 firstmutable = min(firstmutable, min(cl.rev(r) for r in roots))
140 # protect from nullrev root
140 # protect from nullrev root
141 firstmutable = max(0, firstmutable)
141 firstmutable = max(0, firstmutable)
142 return frozenset(pycompat.xrange(firstmutable, len(cl)))
142 return frozenset(pycompat.xrange(firstmutable, len(cl)))
143
143
144 # function to compute filtered set
144 # function to compute filtered set
145 #
145 #
146 # When adding a new filter you MUST update the table at:
146 # When adding a new filter you MUST update the table at:
147 # mercurial.branchmap.subsettable
147 # mercurial.utils.repoviewutil.subsettable
148 # Otherwise your filter will have to recompute all its branches cache
148 # Otherwise your filter will have to recompute all its branches cache
149 # from scratch (very slow).
149 # from scratch (very slow).
150 filtertable = {'visible': computehidden,
150 filtertable = {'visible': computehidden,
151 'visible-hidden': computehidden,
151 'visible-hidden': computehidden,
152 'served.hidden': computesecret,
152 'served.hidden': computesecret,
153 'served': computeunserved,
153 'served': computeunserved,
154 'immutable': computemutable,
154 'immutable': computemutable,
155 'base': computeimpactable}
155 'base': computeimpactable}
156
156
157 def filterrevs(repo, filtername, visibilityexceptions=None):
157 def filterrevs(repo, filtername, visibilityexceptions=None):
158 """returns set of filtered revision for this filter name
158 """returns set of filtered revision for this filter name
159
159
160 visibilityexceptions is a set of revs which must are exceptions for
160 visibilityexceptions is a set of revs which must are exceptions for
161 hidden-state and must be visible. They are dynamic and hence we should not
161 hidden-state and must be visible. They are dynamic and hence we should not
162 cache it's result"""
162 cache it's result"""
163 if filtername not in repo.filteredrevcache:
163 if filtername not in repo.filteredrevcache:
164 func = filtertable[filtername]
164 func = filtertable[filtername]
165 if visibilityexceptions:
165 if visibilityexceptions:
166 return func(repo.unfiltered, visibilityexceptions)
166 return func(repo.unfiltered, visibilityexceptions)
167 repo.filteredrevcache[filtername] = func(repo.unfiltered())
167 repo.filteredrevcache[filtername] = func(repo.unfiltered())
168 return repo.filteredrevcache[filtername]
168 return repo.filteredrevcache[filtername]
169
169
170 class repoview(object):
170 class repoview(object):
171 """Provide a read/write view of a repo through a filtered changelog
171 """Provide a read/write view of a repo through a filtered changelog
172
172
173 This object is used to access a filtered version of a repository without
173 This object is used to access a filtered version of a repository without
174 altering the original repository object itself. We can not alter the
174 altering the original repository object itself. We can not alter the
175 original object for two main reasons:
175 original object for two main reasons:
176 - It prevents the use of a repo with multiple filters at the same time. In
176 - It prevents the use of a repo with multiple filters at the same time. In
177 particular when multiple threads are involved.
177 particular when multiple threads are involved.
178 - It makes scope of the filtering harder to control.
178 - It makes scope of the filtering harder to control.
179
179
180 This object behaves very closely to the original repository. All attribute
180 This object behaves very closely to the original repository. All attribute
181 operations are done on the original repository:
181 operations are done on the original repository:
182 - An access to `repoview.someattr` actually returns `repo.someattr`,
182 - An access to `repoview.someattr` actually returns `repo.someattr`,
183 - A write to `repoview.someattr` actually sets value of `repo.someattr`,
183 - A write to `repoview.someattr` actually sets value of `repo.someattr`,
184 - A deletion of `repoview.someattr` actually drops `someattr`
184 - A deletion of `repoview.someattr` actually drops `someattr`
185 from `repo.__dict__`.
185 from `repo.__dict__`.
186
186
187 The only exception is the `changelog` property. It is overridden to return
187 The only exception is the `changelog` property. It is overridden to return
188 a (surface) copy of `repo.changelog` with some revisions filtered. The
188 a (surface) copy of `repo.changelog` with some revisions filtered. The
189 `filtername` attribute of the view control the revisions that need to be
189 `filtername` attribute of the view control the revisions that need to be
190 filtered. (the fact the changelog is copied is an implementation detail).
190 filtered. (the fact the changelog is copied is an implementation detail).
191
191
192 Unlike attributes, this object intercepts all method calls. This means that
192 Unlike attributes, this object intercepts all method calls. This means that
193 all methods are run on the `repoview` object with the filtered `changelog`
193 all methods are run on the `repoview` object with the filtered `changelog`
194 property. For this purpose the simple `repoview` class must be mixed with
194 property. For this purpose the simple `repoview` class must be mixed with
195 the actual class of the repository. This ensures that the resulting
195 the actual class of the repository. This ensures that the resulting
196 `repoview` object have the very same methods than the repo object. This
196 `repoview` object have the very same methods than the repo object. This
197 leads to the property below.
197 leads to the property below.
198
198
199 repoview.method() --> repo.__class__.method(repoview)
199 repoview.method() --> repo.__class__.method(repoview)
200
200
201 The inheritance has to be done dynamically because `repo` can be of any
201 The inheritance has to be done dynamically because `repo` can be of any
202 subclasses of `localrepo`. Eg: `bundlerepo` or `statichttprepo`.
202 subclasses of `localrepo`. Eg: `bundlerepo` or `statichttprepo`.
203 """
203 """
204
204
205 def __init__(self, repo, filtername, visibilityexceptions=None):
205 def __init__(self, repo, filtername, visibilityexceptions=None):
206 object.__setattr__(self, r'_unfilteredrepo', repo)
206 object.__setattr__(self, r'_unfilteredrepo', repo)
207 object.__setattr__(self, r'filtername', filtername)
207 object.__setattr__(self, r'filtername', filtername)
208 object.__setattr__(self, r'_clcachekey', None)
208 object.__setattr__(self, r'_clcachekey', None)
209 object.__setattr__(self, r'_clcache', None)
209 object.__setattr__(self, r'_clcache', None)
210 # revs which are exceptions and must not be hidden
210 # revs which are exceptions and must not be hidden
211 object.__setattr__(self, r'_visibilityexceptions',
211 object.__setattr__(self, r'_visibilityexceptions',
212 visibilityexceptions)
212 visibilityexceptions)
213
213
214 # not a propertycache on purpose we shall implement a proper cache later
214 # not a propertycache on purpose we shall implement a proper cache later
215 @property
215 @property
216 def changelog(self):
216 def changelog(self):
217 """return a filtered version of the changeset
217 """return a filtered version of the changeset
218
218
219 this changelog must not be used for writing"""
219 this changelog must not be used for writing"""
220 # some cache may be implemented later
220 # some cache may be implemented later
221 unfi = self._unfilteredrepo
221 unfi = self._unfilteredrepo
222 unfichangelog = unfi.changelog
222 unfichangelog = unfi.changelog
223 # bypass call to changelog.method
223 # bypass call to changelog.method
224 unfiindex = unfichangelog.index
224 unfiindex = unfichangelog.index
225 unfilen = len(unfiindex)
225 unfilen = len(unfiindex)
226 unfinode = unfiindex[unfilen - 1][7]
226 unfinode = unfiindex[unfilen - 1][7]
227
227
228 revs = filterrevs(unfi, self.filtername, self._visibilityexceptions)
228 revs = filterrevs(unfi, self.filtername, self._visibilityexceptions)
229 cl = self._clcache
229 cl = self._clcache
230 newkey = (unfilen, unfinode, hash(revs), unfichangelog._delayed)
230 newkey = (unfilen, unfinode, hash(revs), unfichangelog._delayed)
231 # if cl.index is not unfiindex, unfi.changelog would be
231 # if cl.index is not unfiindex, unfi.changelog would be
232 # recreated, and our clcache refers to garbage object
232 # recreated, and our clcache refers to garbage object
233 if (cl is not None and
233 if (cl is not None and
234 (cl.index is not unfiindex or newkey != self._clcachekey)):
234 (cl.index is not unfiindex or newkey != self._clcachekey)):
235 cl = None
235 cl = None
236 # could have been made None by the previous if
236 # could have been made None by the previous if
237 if cl is None:
237 if cl is None:
238 cl = copy.copy(unfichangelog)
238 cl = copy.copy(unfichangelog)
239 cl.filteredrevs = revs
239 cl.filteredrevs = revs
240 object.__setattr__(self, r'_clcache', cl)
240 object.__setattr__(self, r'_clcache', cl)
241 object.__setattr__(self, r'_clcachekey', newkey)
241 object.__setattr__(self, r'_clcachekey', newkey)
242 return cl
242 return cl
243
243
244 def unfiltered(self):
244 def unfiltered(self):
245 """Return an unfiltered version of a repo"""
245 """Return an unfiltered version of a repo"""
246 return self._unfilteredrepo
246 return self._unfilteredrepo
247
247
248 def filtered(self, name, visibilityexceptions=None):
248 def filtered(self, name, visibilityexceptions=None):
249 """Return a filtered version of a repository"""
249 """Return a filtered version of a repository"""
250 if name == self.filtername and not visibilityexceptions:
250 if name == self.filtername and not visibilityexceptions:
251 return self
251 return self
252 return self.unfiltered().filtered(name, visibilityexceptions)
252 return self.unfiltered().filtered(name, visibilityexceptions)
253
253
254 def __repr__(self):
254 def __repr__(self):
255 return r'<%s:%s %r>' % (self.__class__.__name__,
255 return r'<%s:%s %r>' % (self.__class__.__name__,
256 pycompat.sysstr(self.filtername),
256 pycompat.sysstr(self.filtername),
257 self.unfiltered())
257 self.unfiltered())
258
258
259 # everything access are forwarded to the proxied repo
259 # everything access are forwarded to the proxied repo
260 def __getattr__(self, attr):
260 def __getattr__(self, attr):
261 return getattr(self._unfilteredrepo, attr)
261 return getattr(self._unfilteredrepo, attr)
262
262
263 def __setattr__(self, attr, value):
263 def __setattr__(self, attr, value):
264 return setattr(self._unfilteredrepo, attr, value)
264 return setattr(self._unfilteredrepo, attr, value)
265
265
266 def __delattr__(self, attr):
266 def __delattr__(self, attr):
267 return delattr(self._unfilteredrepo, attr)
267 return delattr(self._unfilteredrepo, attr)
268
268
269 # Python <3.4 easily leaks types via __mro__. See
269 # Python <3.4 easily leaks types via __mro__. See
270 # https://bugs.python.org/issue17950. We cache dynamically created types
270 # https://bugs.python.org/issue17950. We cache dynamically created types
271 # so they won't be leaked on every invocation of repo.filtered().
271 # so they won't be leaked on every invocation of repo.filtered().
272 _filteredrepotypes = weakref.WeakKeyDictionary()
272 _filteredrepotypes = weakref.WeakKeyDictionary()
273
273
274 def newtype(base):
274 def newtype(base):
275 """Create a new type with the repoview mixin and the given base class"""
275 """Create a new type with the repoview mixin and the given base class"""
276 if base not in _filteredrepotypes:
276 if base not in _filteredrepotypes:
277 class filteredrepo(repoview, base):
277 class filteredrepo(repoview, base):
278 pass
278 pass
279 _filteredrepotypes[base] = filteredrepo
279 _filteredrepotypes[base] = filteredrepo
280 return _filteredrepotypes[base]
280 return _filteredrepotypes[base]
@@ -1,79 +1,79 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 #
2 #
3 # check-perf-code - (historical) portability checker for contrib/perf.py
3 # check-perf-code - (historical) portability checker for contrib/perf.py
4
4
5 from __future__ import absolute_import
5 from __future__ import absolute_import
6
6
7 import os
7 import os
8 import sys
8 import sys
9
9
10 # write static check patterns here
10 # write static check patterns here
11 perfpypats = [
11 perfpypats = [
12 [
12 [
13 (r'(branchmap|repoview)\.subsettable',
13 (r'(branchmap|repoview|repoviewutil)\.subsettable',
14 "use getbranchmapsubsettable() for early Mercurial"),
14 "use getbranchmapsubsettable() for early Mercurial"),
15 (r'\.(vfs|svfs|opener|sopener)',
15 (r'\.(vfs|svfs|opener|sopener)',
16 "use getvfs()/getsvfs() for early Mercurial"),
16 "use getvfs()/getsvfs() for early Mercurial"),
17 (r'ui\.configint',
17 (r'ui\.configint',
18 "use getint() instead of ui.configint() for early Mercurial"),
18 "use getint() instead of ui.configint() for early Mercurial"),
19 ],
19 ],
20 # warnings
20 # warnings
21 [
21 [
22 ]
22 ]
23 ]
23 ]
24
24
25 def modulewhitelist(names):
25 def modulewhitelist(names):
26 replacement = [('.py', ''), ('.c', ''), # trim suffix
26 replacement = [('.py', ''), ('.c', ''), # trim suffix
27 ('mercurial%s' % ('/'), ''), # trim "mercurial/" path
27 ('mercurial%s' % ('/'), ''), # trim "mercurial/" path
28 ]
28 ]
29 ignored = {'__init__'}
29 ignored = {'__init__'}
30 modules = {}
30 modules = {}
31
31
32 # convert from file name to module name, and count # of appearances
32 # convert from file name to module name, and count # of appearances
33 for name in names:
33 for name in names:
34 name = name.strip()
34 name = name.strip()
35 for old, new in replacement:
35 for old, new in replacement:
36 name = name.replace(old, new)
36 name = name.replace(old, new)
37 if name not in ignored:
37 if name not in ignored:
38 modules[name] = modules.get(name, 0) + 1
38 modules[name] = modules.get(name, 0) + 1
39
39
40 # list up module names, which appear multiple times
40 # list up module names, which appear multiple times
41 whitelist = []
41 whitelist = []
42 for name, count in modules.items():
42 for name, count in modules.items():
43 if count > 1:
43 if count > 1:
44 whitelist.append(name)
44 whitelist.append(name)
45
45
46 return whitelist
46 return whitelist
47
47
48 if __name__ == "__main__":
48 if __name__ == "__main__":
49 # in this case, it is assumed that result of "hg files" at
49 # in this case, it is assumed that result of "hg files" at
50 # multiple revisions is given via stdin
50 # multiple revisions is given via stdin
51 whitelist = modulewhitelist(sys.stdin)
51 whitelist = modulewhitelist(sys.stdin)
52 assert whitelist, "module whitelist is empty"
52 assert whitelist, "module whitelist is empty"
53
53
54 # build up module whitelist check from file names given at runtime
54 # build up module whitelist check from file names given at runtime
55 perfpypats[0].append(
55 perfpypats[0].append(
56 # this matching pattern assumes importing modules from
56 # this matching pattern assumes importing modules from
57 # "mercurial" package in the current style below, for simplicity
57 # "mercurial" package in the current style below, for simplicity
58 #
58 #
59 # from mercurial import (
59 # from mercurial import (
60 # foo,
60 # foo,
61 # bar,
61 # bar,
62 # baz
62 # baz
63 # )
63 # )
64 ((r'from mercurial import [(][a-z0-9, \n#]*\n(?! *%s,|^[ #]*\n|[)])'
64 ((r'from mercurial import [(][a-z0-9, \n#]*\n(?! *%s,|^[ #]*\n|[)])'
65 % ',| *'.join(whitelist)),
65 % ',| *'.join(whitelist)),
66 "import newer module separately in try clause for early Mercurial"
66 "import newer module separately in try clause for early Mercurial"
67 ))
67 ))
68
68
69 # import contrib/check-code.py as checkcode
69 # import contrib/check-code.py as checkcode
70 assert 'RUNTESTDIR' in os.environ, "use check-perf-code.py in *.t script"
70 assert 'RUNTESTDIR' in os.environ, "use check-perf-code.py in *.t script"
71 contribpath = os.path.join(os.environ['RUNTESTDIR'], '..', 'contrib')
71 contribpath = os.path.join(os.environ['RUNTESTDIR'], '..', 'contrib')
72 sys.path.insert(0, contribpath)
72 sys.path.insert(0, contribpath)
73 checkcode = __import__('check-code')
73 checkcode = __import__('check-code')
74
74
75 # register perf.py specific entry with "checks" in check-code.py
75 # register perf.py specific entry with "checks" in check-code.py
76 checkcode.checks.append(('perf.py', r'contrib/perf.py$', '',
76 checkcode.checks.append(('perf.py', r'contrib/perf.py$', '',
77 checkcode.pyfilters, perfpypats))
77 checkcode.pyfilters, perfpypats))
78
78
79 sys.exit(checkcode.main())
79 sys.exit(checkcode.main())
General Comments 0
You need to be logged in to leave comments. Login now