##// END OF EJS Templates
config: add experimental argument to the config registrar...
Navaneeth Suresh -
r42987:e67d9b6b default draft
parent child Browse files
Show More
@@ -1,3063 +1,3068 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 dir(registrar) # forcibly load it
96 dir(registrar) # forcibly load it
97 except ImportError:
97 except ImportError:
98 registrar = None
98 registrar = None
99 try:
99 try:
100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 except ImportError:
101 except ImportError:
102 pass
102 pass
103 try:
103 try:
104 from mercurial.utils import repoviewutil # since 5.0
104 from mercurial.utils import repoviewutil # since 5.0
105 except ImportError:
105 except ImportError:
106 repoviewutil = None
106 repoviewutil = None
107 try:
107 try:
108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 except ImportError:
109 except ImportError:
110 pass
110 pass
111 try:
111 try:
112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 except ImportError:
113 except ImportError:
114 pass
114 pass
115
115
116 try:
116 try:
117 from mercurial import profiling
117 from mercurial import profiling
118 except ImportError:
118 except ImportError:
119 profiling = None
119 profiling = None
120
120
121 def identity(a):
121 def identity(a):
122 return a
122 return a
123
123
124 try:
124 try:
125 from mercurial import pycompat
125 from mercurial import pycompat
126 getargspec = pycompat.getargspec # added to module after 4.5
126 getargspec = pycompat.getargspec # added to module after 4.5
127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
129 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
129 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
130 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
130 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
131 if pycompat.ispy3:
131 if pycompat.ispy3:
132 _maxint = sys.maxsize # per py3 docs for replacing maxint
132 _maxint = sys.maxsize # per py3 docs for replacing maxint
133 else:
133 else:
134 _maxint = sys.maxint
134 _maxint = sys.maxint
135 except (ImportError, AttributeError):
135 except (ImportError, AttributeError):
136 import inspect
136 import inspect
137 getargspec = inspect.getargspec
137 getargspec = inspect.getargspec
138 _byteskwargs = identity
138 _byteskwargs = identity
139 fsencode = identity # no py3 support
139 fsencode = identity # no py3 support
140 _maxint = sys.maxint # no py3 support
140 _maxint = sys.maxint # no py3 support
141 _sysstr = lambda x: x # no py3 support
141 _sysstr = lambda x: x # no py3 support
142 _xrange = xrange
142 _xrange = xrange
143
143
144 try:
144 try:
145 # 4.7+
145 # 4.7+
146 queue = pycompat.queue.Queue
146 queue = pycompat.queue.Queue
147 except (AttributeError, ImportError):
147 except (AttributeError, ImportError):
148 # <4.7.
148 # <4.7.
149 try:
149 try:
150 queue = pycompat.queue
150 queue = pycompat.queue
151 except (AttributeError, ImportError):
151 except (AttributeError, ImportError):
152 queue = util.queue
152 queue = util.queue
153
153
154 try:
154 try:
155 from mercurial import logcmdutil
155 from mercurial import logcmdutil
156 makelogtemplater = logcmdutil.maketemplater
156 makelogtemplater = logcmdutil.maketemplater
157 except (AttributeError, ImportError):
157 except (AttributeError, ImportError):
158 try:
158 try:
159 makelogtemplater = cmdutil.makelogtemplater
159 makelogtemplater = cmdutil.makelogtemplater
160 except (AttributeError, ImportError):
160 except (AttributeError, ImportError):
161 makelogtemplater = None
161 makelogtemplater = None
162
162
163 # for "historical portability":
163 # for "historical portability":
164 # define util.safehasattr forcibly, because util.safehasattr has been
164 # define util.safehasattr forcibly, because util.safehasattr has been
165 # available since 1.9.3 (or 94b200a11cf7)
165 # available since 1.9.3 (or 94b200a11cf7)
166 _undefined = object()
166 _undefined = object()
167 def safehasattr(thing, attr):
167 def safehasattr(thing, attr):
168 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
168 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
169 setattr(util, 'safehasattr', safehasattr)
169 setattr(util, 'safehasattr', safehasattr)
170
170
171 # for "historical portability":
171 # for "historical portability":
172 # define util.timer forcibly, because util.timer has been available
172 # define util.timer forcibly, because util.timer has been available
173 # since ae5d60bb70c9
173 # since ae5d60bb70c9
174 if safehasattr(time, 'perf_counter'):
174 if safehasattr(time, 'perf_counter'):
175 util.timer = time.perf_counter
175 util.timer = time.perf_counter
176 elif os.name == b'nt':
176 elif os.name == b'nt':
177 util.timer = time.clock
177 util.timer = time.clock
178 else:
178 else:
179 util.timer = time.time
179 util.timer = time.time
180
180
181 # for "historical portability":
181 # for "historical portability":
182 # use locally defined empty option list, if formatteropts isn't
182 # use locally defined empty option list, if formatteropts isn't
183 # available, because commands.formatteropts has been available since
183 # available, because commands.formatteropts has been available since
184 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
184 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
185 # available since 2.2 (or ae5f92e154d3)
185 # available since 2.2 (or ae5f92e154d3)
186 formatteropts = getattr(cmdutil, "formatteropts",
186 formatteropts = getattr(cmdutil, "formatteropts",
187 getattr(commands, "formatteropts", []))
187 getattr(commands, "formatteropts", []))
188
188
189 # for "historical portability":
189 # for "historical portability":
190 # use locally defined option list, if debugrevlogopts isn't available,
190 # use locally defined option list, if debugrevlogopts isn't available,
191 # because commands.debugrevlogopts has been available since 3.7 (or
191 # because commands.debugrevlogopts has been available since 3.7 (or
192 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
192 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
193 # since 1.9 (or a79fea6b3e77).
193 # since 1.9 (or a79fea6b3e77).
194 revlogopts = getattr(cmdutil, "debugrevlogopts",
194 revlogopts = getattr(cmdutil, "debugrevlogopts",
195 getattr(commands, "debugrevlogopts", [
195 getattr(commands, "debugrevlogopts", [
196 (b'c', b'changelog', False, (b'open changelog')),
196 (b'c', b'changelog', False, (b'open changelog')),
197 (b'm', b'manifest', False, (b'open manifest')),
197 (b'm', b'manifest', False, (b'open manifest')),
198 (b'', b'dir', False, (b'open directory manifest')),
198 (b'', b'dir', False, (b'open directory manifest')),
199 ]))
199 ]))
200
200
201 cmdtable = {}
201 cmdtable = {}
202
202
203 # for "historical portability":
203 # for "historical portability":
204 # define parsealiases locally, because cmdutil.parsealiases has been
204 # define parsealiases locally, because cmdutil.parsealiases has been
205 # available since 1.5 (or 6252852b4332)
205 # available since 1.5 (or 6252852b4332)
206 def parsealiases(cmd):
206 def parsealiases(cmd):
207 return cmd.split(b"|")
207 return cmd.split(b"|")
208
208
209 if safehasattr(registrar, 'command'):
209 if safehasattr(registrar, 'command'):
210 command = registrar.command(cmdtable)
210 command = registrar.command(cmdtable)
211 elif safehasattr(cmdutil, 'command'):
211 elif safehasattr(cmdutil, 'command'):
212 command = cmdutil.command(cmdtable)
212 command = cmdutil.command(cmdtable)
213 if b'norepo' not in getargspec(command).args:
213 if b'norepo' not in getargspec(command).args:
214 # for "historical portability":
214 # for "historical portability":
215 # wrap original cmdutil.command, because "norepo" option has
215 # wrap original cmdutil.command, because "norepo" option has
216 # been available since 3.1 (or 75a96326cecb)
216 # been available since 3.1 (or 75a96326cecb)
217 _command = command
217 _command = command
218 def command(name, options=(), synopsis=None, norepo=False):
218 def command(name, options=(), synopsis=None, norepo=False):
219 if norepo:
219 if norepo:
220 commands.norepo += b' %s' % b' '.join(parsealiases(name))
220 commands.norepo += b' %s' % b' '.join(parsealiases(name))
221 return _command(name, list(options), synopsis)
221 return _command(name, list(options), synopsis)
222 else:
222 else:
223 # for "historical portability":
223 # for "historical portability":
224 # define "@command" annotation locally, because cmdutil.command
224 # define "@command" annotation locally, because cmdutil.command
225 # has been available since 1.9 (or 2daa5179e73f)
225 # has been available since 1.9 (or 2daa5179e73f)
226 def command(name, options=(), synopsis=None, norepo=False):
226 def command(name, options=(), synopsis=None, norepo=False):
227 def decorator(func):
227 def decorator(func):
228 if synopsis:
228 if synopsis:
229 cmdtable[name] = func, list(options), synopsis
229 cmdtable[name] = func, list(options), synopsis
230 else:
230 else:
231 cmdtable[name] = func, list(options)
231 cmdtable[name] = func, list(options)
232 if norepo:
232 if norepo:
233 commands.norepo += b' %s' % b' '.join(parsealiases(name))
233 commands.norepo += b' %s' % b' '.join(parsealiases(name))
234 return func
234 return func
235 return decorator
235 return decorator
236
236
237 try:
237 try:
238 import mercurial.registrar
238 import mercurial.registrar
239 import mercurial.configitems
239 import mercurial.configitems
240 configtable = {}
240 configtable = {}
241 configitem = mercurial.registrar.configitem(configtable)
241 configitem = mercurial.registrar.configitem(configtable)
242 configitem(b'perf', b'presleep',
242 configitem(b'perf', b'presleep',
243 default=mercurial.configitems.dynamicdefault,
243 default=mercurial.configitems.dynamicdefault,
244 experimental=True,
244 )
245 )
245 configitem(b'perf', b'stub',
246 configitem(b'perf', b'stub',
246 default=mercurial.configitems.dynamicdefault,
247 default=mercurial.configitems.dynamicdefault,
248 experimental=True,
247 )
249 )
248 configitem(b'perf', b'parentscount',
250 configitem(b'perf', b'parentscount',
249 default=mercurial.configitems.dynamicdefault,
251 default=mercurial.configitems.dynamicdefault,
252 experimental=True,
250 )
253 )
251 configitem(b'perf', b'all-timing',
254 configitem(b'perf', b'all-timing',
252 default=mercurial.configitems.dynamicdefault,
255 default=mercurial.configitems.dynamicdefault,
256 experimental=True,
253 )
257 )
254 configitem(b'perf', b'pre-run',
258 configitem(b'perf', b'pre-run',
255 default=mercurial.configitems.dynamicdefault,
259 default=mercurial.configitems.dynamicdefault,
256 )
260 )
257 configitem(b'perf', b'profile-benchmark',
261 configitem(b'perf', b'profile-benchmark',
258 default=mercurial.configitems.dynamicdefault,
262 default=mercurial.configitems.dynamicdefault,
259 )
263 )
260 configitem(b'perf', b'run-limits',
264 configitem(b'perf', b'run-limits',
261 default=mercurial.configitems.dynamicdefault,
265 default=mercurial.configitems.dynamicdefault,
266 experimental=True,
262 )
267 )
263 except (ImportError, AttributeError):
268 except (ImportError, AttributeError):
264 pass
269 pass
265
270
266 def getlen(ui):
271 def getlen(ui):
267 if ui.configbool(b"perf", b"stub", False):
272 if ui.configbool(b"perf", b"stub", False):
268 return lambda x: 1
273 return lambda x: 1
269 return len
274 return len
270
275
271 class noop(object):
276 class noop(object):
272 """dummy context manager"""
277 """dummy context manager"""
273 def __enter__(self):
278 def __enter__(self):
274 pass
279 pass
275 def __exit__(self, *args):
280 def __exit__(self, *args):
276 pass
281 pass
277
282
278 NOOPCTX = noop()
283 NOOPCTX = noop()
279
284
280 def gettimer(ui, opts=None):
285 def gettimer(ui, opts=None):
281 """return a timer function and formatter: (timer, formatter)
286 """return a timer function and formatter: (timer, formatter)
282
287
283 This function exists to gather the creation of formatter in a single
288 This function exists to gather the creation of formatter in a single
284 place instead of duplicating it in all performance commands."""
289 place instead of duplicating it in all performance commands."""
285
290
286 # enforce an idle period before execution to counteract power management
291 # enforce an idle period before execution to counteract power management
287 # experimental config: perf.presleep
292 # experimental config: perf.presleep
288 time.sleep(getint(ui, b"perf", b"presleep", 1))
293 time.sleep(getint(ui, b"perf", b"presleep", 1))
289
294
290 if opts is None:
295 if opts is None:
291 opts = {}
296 opts = {}
292 # redirect all to stderr unless buffer api is in use
297 # redirect all to stderr unless buffer api is in use
293 if not ui._buffers:
298 if not ui._buffers:
294 ui = ui.copy()
299 ui = ui.copy()
295 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
300 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
296 if uifout:
301 if uifout:
297 # for "historical portability":
302 # for "historical portability":
298 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
303 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
299 uifout.set(ui.ferr)
304 uifout.set(ui.ferr)
300
305
301 # get a formatter
306 # get a formatter
302 uiformatter = getattr(ui, 'formatter', None)
307 uiformatter = getattr(ui, 'formatter', None)
303 if uiformatter:
308 if uiformatter:
304 fm = uiformatter(b'perf', opts)
309 fm = uiformatter(b'perf', opts)
305 else:
310 else:
306 # for "historical portability":
311 # for "historical portability":
307 # define formatter locally, because ui.formatter has been
312 # define formatter locally, because ui.formatter has been
308 # available since 2.2 (or ae5f92e154d3)
313 # available since 2.2 (or ae5f92e154d3)
309 from mercurial import node
314 from mercurial import node
310 class defaultformatter(object):
315 class defaultformatter(object):
311 """Minimized composition of baseformatter and plainformatter
316 """Minimized composition of baseformatter and plainformatter
312 """
317 """
313 def __init__(self, ui, topic, opts):
318 def __init__(self, ui, topic, opts):
314 self._ui = ui
319 self._ui = ui
315 if ui.debugflag:
320 if ui.debugflag:
316 self.hexfunc = node.hex
321 self.hexfunc = node.hex
317 else:
322 else:
318 self.hexfunc = node.short
323 self.hexfunc = node.short
319 def __nonzero__(self):
324 def __nonzero__(self):
320 return False
325 return False
321 __bool__ = __nonzero__
326 __bool__ = __nonzero__
322 def startitem(self):
327 def startitem(self):
323 pass
328 pass
324 def data(self, **data):
329 def data(self, **data):
325 pass
330 pass
326 def write(self, fields, deftext, *fielddata, **opts):
331 def write(self, fields, deftext, *fielddata, **opts):
327 self._ui.write(deftext % fielddata, **opts)
332 self._ui.write(deftext % fielddata, **opts)
328 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
333 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
329 if cond:
334 if cond:
330 self._ui.write(deftext % fielddata, **opts)
335 self._ui.write(deftext % fielddata, **opts)
331 def plain(self, text, **opts):
336 def plain(self, text, **opts):
332 self._ui.write(text, **opts)
337 self._ui.write(text, **opts)
333 def end(self):
338 def end(self):
334 pass
339 pass
335 fm = defaultformatter(ui, b'perf', opts)
340 fm = defaultformatter(ui, b'perf', opts)
336
341
337 # stub function, runs code only once instead of in a loop
342 # stub function, runs code only once instead of in a loop
338 # experimental config: perf.stub
343 # experimental config: perf.stub
339 if ui.configbool(b"perf", b"stub", False):
344 if ui.configbool(b"perf", b"stub", False):
340 return functools.partial(stub_timer, fm), fm
345 return functools.partial(stub_timer, fm), fm
341
346
342 # experimental config: perf.all-timing
347 # experimental config: perf.all-timing
343 displayall = ui.configbool(b"perf", b"all-timing", False)
348 displayall = ui.configbool(b"perf", b"all-timing", False)
344
349
345 # experimental config: perf.run-limits
350 # experimental config: perf.run-limits
346 limitspec = ui.configlist(b"perf", b"run-limits", [])
351 limitspec = ui.configlist(b"perf", b"run-limits", [])
347 limits = []
352 limits = []
348 for item in limitspec:
353 for item in limitspec:
349 parts = item.split(b'-', 1)
354 parts = item.split(b'-', 1)
350 if len(parts) < 2:
355 if len(parts) < 2:
351 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
356 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
352 % item))
357 % item))
353 continue
358 continue
354 try:
359 try:
355 time_limit = float(pycompat.sysstr(parts[0]))
360 time_limit = float(pycompat.sysstr(parts[0]))
356 except ValueError as e:
361 except ValueError as e:
357 ui.warn((b'malformatted run limit entry, %s: %s\n'
362 ui.warn((b'malformatted run limit entry, %s: %s\n'
358 % (pycompat.bytestr(e), item)))
363 % (pycompat.bytestr(e), item)))
359 continue
364 continue
360 try:
365 try:
361 run_limit = int(pycompat.sysstr(parts[1]))
366 run_limit = int(pycompat.sysstr(parts[1]))
362 except ValueError as e:
367 except ValueError as e:
363 ui.warn((b'malformatted run limit entry, %s: %s\n'
368 ui.warn((b'malformatted run limit entry, %s: %s\n'
364 % (pycompat.bytestr(e), item)))
369 % (pycompat.bytestr(e), item)))
365 continue
370 continue
366 limits.append((time_limit, run_limit))
371 limits.append((time_limit, run_limit))
367 if not limits:
372 if not limits:
368 limits = DEFAULTLIMITS
373 limits = DEFAULTLIMITS
369
374
370 profiler = None
375 profiler = None
371 if profiling is not None:
376 if profiling is not None:
372 if ui.configbool(b"perf", b"profile-benchmark", False):
377 if ui.configbool(b"perf", b"profile-benchmark", False):
373 profiler = profiling.profile(ui)
378 profiler = profiling.profile(ui)
374
379
375 prerun = getint(ui, b"perf", b"pre-run", 0)
380 prerun = getint(ui, b"perf", b"pre-run", 0)
376 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
381 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
377 prerun=prerun, profiler=profiler)
382 prerun=prerun, profiler=profiler)
378 return t, fm
383 return t, fm
379
384
380 def stub_timer(fm, func, setup=None, title=None):
385 def stub_timer(fm, func, setup=None, title=None):
381 if setup is not None:
386 if setup is not None:
382 setup()
387 setup()
383 func()
388 func()
384
389
385 @contextlib.contextmanager
390 @contextlib.contextmanager
386 def timeone():
391 def timeone():
387 r = []
392 r = []
388 ostart = os.times()
393 ostart = os.times()
389 cstart = util.timer()
394 cstart = util.timer()
390 yield r
395 yield r
391 cstop = util.timer()
396 cstop = util.timer()
392 ostop = os.times()
397 ostop = os.times()
393 a, b = ostart, ostop
398 a, b = ostart, ostop
394 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
399 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
395
400
396
401
397 # list of stop condition (elapsed time, minimal run count)
402 # list of stop condition (elapsed time, minimal run count)
398 DEFAULTLIMITS = (
403 DEFAULTLIMITS = (
399 (3.0, 100),
404 (3.0, 100),
400 (10.0, 3),
405 (10.0, 3),
401 )
406 )
402
407
403 def _timer(fm, func, setup=None, title=None, displayall=False,
408 def _timer(fm, func, setup=None, title=None, displayall=False,
404 limits=DEFAULTLIMITS, prerun=0, profiler=None):
409 limits=DEFAULTLIMITS, prerun=0, profiler=None):
405 gc.collect()
410 gc.collect()
406 results = []
411 results = []
407 begin = util.timer()
412 begin = util.timer()
408 count = 0
413 count = 0
409 if profiler is None:
414 if profiler is None:
410 profiler = NOOPCTX
415 profiler = NOOPCTX
411 for i in range(prerun):
416 for i in range(prerun):
412 if setup is not None:
417 if setup is not None:
413 setup()
418 setup()
414 func()
419 func()
415 keepgoing = True
420 keepgoing = True
416 while keepgoing:
421 while keepgoing:
417 if setup is not None:
422 if setup is not None:
418 setup()
423 setup()
419 with profiler:
424 with profiler:
420 with timeone() as item:
425 with timeone() as item:
421 r = func()
426 r = func()
422 profiler = NOOPCTX
427 profiler = NOOPCTX
423 count += 1
428 count += 1
424 results.append(item[0])
429 results.append(item[0])
425 cstop = util.timer()
430 cstop = util.timer()
426 # Look for a stop condition.
431 # Look for a stop condition.
427 elapsed = cstop - begin
432 elapsed = cstop - begin
428 for t, mincount in limits:
433 for t, mincount in limits:
429 if elapsed >= t and count >= mincount:
434 if elapsed >= t and count >= mincount:
430 keepgoing = False
435 keepgoing = False
431 break
436 break
432
437
433 formatone(fm, results, title=title, result=r,
438 formatone(fm, results, title=title, result=r,
434 displayall=displayall)
439 displayall=displayall)
435
440
436 def formatone(fm, timings, title=None, result=None, displayall=False):
441 def formatone(fm, timings, title=None, result=None, displayall=False):
437
442
438 count = len(timings)
443 count = len(timings)
439
444
440 fm.startitem()
445 fm.startitem()
441
446
442 if title:
447 if title:
443 fm.write(b'title', b'! %s\n', title)
448 fm.write(b'title', b'! %s\n', title)
444 if result:
449 if result:
445 fm.write(b'result', b'! result: %s\n', result)
450 fm.write(b'result', b'! result: %s\n', result)
446 def display(role, entry):
451 def display(role, entry):
447 prefix = b''
452 prefix = b''
448 if role != b'best':
453 if role != b'best':
449 prefix = b'%s.' % role
454 prefix = b'%s.' % role
450 fm.plain(b'!')
455 fm.plain(b'!')
451 fm.write(prefix + b'wall', b' wall %f', entry[0])
456 fm.write(prefix + b'wall', b' wall %f', entry[0])
452 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
457 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
453 fm.write(prefix + b'user', b' user %f', entry[1])
458 fm.write(prefix + b'user', b' user %f', entry[1])
454 fm.write(prefix + b'sys', b' sys %f', entry[2])
459 fm.write(prefix + b'sys', b' sys %f', entry[2])
455 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
460 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
456 fm.plain(b'\n')
461 fm.plain(b'\n')
457 timings.sort()
462 timings.sort()
458 min_val = timings[0]
463 min_val = timings[0]
459 display(b'best', min_val)
464 display(b'best', min_val)
460 if displayall:
465 if displayall:
461 max_val = timings[-1]
466 max_val = timings[-1]
462 display(b'max', max_val)
467 display(b'max', max_val)
463 avg = tuple([sum(x) / count for x in zip(*timings)])
468 avg = tuple([sum(x) / count for x in zip(*timings)])
464 display(b'avg', avg)
469 display(b'avg', avg)
465 median = timings[len(timings) // 2]
470 median = timings[len(timings) // 2]
466 display(b'median', median)
471 display(b'median', median)
467
472
468 # utilities for historical portability
473 # utilities for historical portability
469
474
470 def getint(ui, section, name, default):
475 def getint(ui, section, name, default):
471 # for "historical portability":
476 # for "historical portability":
472 # ui.configint has been available since 1.9 (or fa2b596db182)
477 # ui.configint has been available since 1.9 (or fa2b596db182)
473 v = ui.config(section, name, None)
478 v = ui.config(section, name, None)
474 if v is None:
479 if v is None:
475 return default
480 return default
476 try:
481 try:
477 return int(v)
482 return int(v)
478 except ValueError:
483 except ValueError:
479 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
484 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
480 % (section, name, v))
485 % (section, name, v))
481
486
482 def safeattrsetter(obj, name, ignoremissing=False):
487 def safeattrsetter(obj, name, ignoremissing=False):
483 """Ensure that 'obj' has 'name' attribute before subsequent setattr
488 """Ensure that 'obj' has 'name' attribute before subsequent setattr
484
489
485 This function is aborted, if 'obj' doesn't have 'name' attribute
490 This function is aborted, if 'obj' doesn't have 'name' attribute
486 at runtime. This avoids overlooking removal of an attribute, which
491 at runtime. This avoids overlooking removal of an attribute, which
487 breaks assumption of performance measurement, in the future.
492 breaks assumption of performance measurement, in the future.
488
493
489 This function returns the object to (1) assign a new value, and
494 This function returns the object to (1) assign a new value, and
490 (2) restore an original value to the attribute.
495 (2) restore an original value to the attribute.
491
496
492 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
497 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
493 abortion, and this function returns None. This is useful to
498 abortion, and this function returns None. This is useful to
494 examine an attribute, which isn't ensured in all Mercurial
499 examine an attribute, which isn't ensured in all Mercurial
495 versions.
500 versions.
496 """
501 """
497 if not util.safehasattr(obj, name):
502 if not util.safehasattr(obj, name):
498 if ignoremissing:
503 if ignoremissing:
499 return None
504 return None
500 raise error.Abort((b"missing attribute %s of %s might break assumption"
505 raise error.Abort((b"missing attribute %s of %s might break assumption"
501 b" of performance measurement") % (name, obj))
506 b" of performance measurement") % (name, obj))
502
507
503 origvalue = getattr(obj, _sysstr(name))
508 origvalue = getattr(obj, _sysstr(name))
504 class attrutil(object):
509 class attrutil(object):
505 def set(self, newvalue):
510 def set(self, newvalue):
506 setattr(obj, _sysstr(name), newvalue)
511 setattr(obj, _sysstr(name), newvalue)
507 def restore(self):
512 def restore(self):
508 setattr(obj, _sysstr(name), origvalue)
513 setattr(obj, _sysstr(name), origvalue)
509
514
510 return attrutil()
515 return attrutil()
511
516
512 # utilities to examine each internal API changes
517 # utilities to examine each internal API changes
513
518
514 def getbranchmapsubsettable():
519 def getbranchmapsubsettable():
515 # for "historical portability":
520 # for "historical portability":
516 # subsettable is defined in:
521 # subsettable is defined in:
517 # - branchmap since 2.9 (or 175c6fd8cacc)
522 # - branchmap since 2.9 (or 175c6fd8cacc)
518 # - repoview since 2.5 (or 59a9f18d4587)
523 # - repoview since 2.5 (or 59a9f18d4587)
519 # - repoviewutil since 5.0
524 # - repoviewutil since 5.0
520 for mod in (branchmap, repoview, repoviewutil):
525 for mod in (branchmap, repoview, repoviewutil):
521 subsettable = getattr(mod, 'subsettable', None)
526 subsettable = getattr(mod, 'subsettable', None)
522 if subsettable:
527 if subsettable:
523 return subsettable
528 return subsettable
524
529
525 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
530 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
526 # branchmap and repoview modules exist, but subsettable attribute
531 # branchmap and repoview modules exist, but subsettable attribute
527 # doesn't)
532 # doesn't)
528 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
533 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
529 hint=b"use 2.5 or later")
534 hint=b"use 2.5 or later")
530
535
531 def getsvfs(repo):
536 def getsvfs(repo):
532 """Return appropriate object to access files under .hg/store
537 """Return appropriate object to access files under .hg/store
533 """
538 """
534 # for "historical portability":
539 # for "historical portability":
535 # repo.svfs has been available since 2.3 (or 7034365089bf)
540 # repo.svfs has been available since 2.3 (or 7034365089bf)
536 svfs = getattr(repo, 'svfs', None)
541 svfs = getattr(repo, 'svfs', None)
537 if svfs:
542 if svfs:
538 return svfs
543 return svfs
539 else:
544 else:
540 return getattr(repo, 'sopener')
545 return getattr(repo, 'sopener')
541
546
542 def getvfs(repo):
547 def getvfs(repo):
543 """Return appropriate object to access files under .hg
548 """Return appropriate object to access files under .hg
544 """
549 """
545 # for "historical portability":
550 # for "historical portability":
546 # repo.vfs has been available since 2.3 (or 7034365089bf)
551 # repo.vfs has been available since 2.3 (or 7034365089bf)
547 vfs = getattr(repo, 'vfs', None)
552 vfs = getattr(repo, 'vfs', None)
548 if vfs:
553 if vfs:
549 return vfs
554 return vfs
550 else:
555 else:
551 return getattr(repo, 'opener')
556 return getattr(repo, 'opener')
552
557
553 def repocleartagscachefunc(repo):
558 def repocleartagscachefunc(repo):
554 """Return the function to clear tags cache according to repo internal API
559 """Return the function to clear tags cache according to repo internal API
555 """
560 """
556 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
561 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
557 # in this case, setattr(repo, '_tagscache', None) or so isn't
562 # in this case, setattr(repo, '_tagscache', None) or so isn't
558 # correct way to clear tags cache, because existing code paths
563 # correct way to clear tags cache, because existing code paths
559 # expect _tagscache to be a structured object.
564 # expect _tagscache to be a structured object.
560 def clearcache():
565 def clearcache():
561 # _tagscache has been filteredpropertycache since 2.5 (or
566 # _tagscache has been filteredpropertycache since 2.5 (or
562 # 98c867ac1330), and delattr() can't work in such case
567 # 98c867ac1330), and delattr() can't work in such case
563 if b'_tagscache' in vars(repo):
568 if b'_tagscache' in vars(repo):
564 del repo.__dict__[b'_tagscache']
569 del repo.__dict__[b'_tagscache']
565 return clearcache
570 return clearcache
566
571
567 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
572 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
568 if repotags: # since 1.4 (or 5614a628d173)
573 if repotags: # since 1.4 (or 5614a628d173)
569 return lambda : repotags.set(None)
574 return lambda : repotags.set(None)
570
575
571 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
576 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
572 if repotagscache: # since 0.6 (or d7df759d0e97)
577 if repotagscache: # since 0.6 (or d7df759d0e97)
573 return lambda : repotagscache.set(None)
578 return lambda : repotagscache.set(None)
574
579
575 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
580 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
576 # this point, but it isn't so problematic, because:
581 # this point, but it isn't so problematic, because:
577 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
582 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
578 # in perftags() causes failure soon
583 # in perftags() causes failure soon
579 # - perf.py itself has been available since 1.1 (or eb240755386d)
584 # - perf.py itself has been available since 1.1 (or eb240755386d)
580 raise error.Abort((b"tags API of this hg command is unknown"))
585 raise error.Abort((b"tags API of this hg command is unknown"))
581
586
582 # utilities to clear cache
587 # utilities to clear cache
583
588
584 def clearfilecache(obj, attrname):
589 def clearfilecache(obj, attrname):
585 unfiltered = getattr(obj, 'unfiltered', None)
590 unfiltered = getattr(obj, 'unfiltered', None)
586 if unfiltered is not None:
591 if unfiltered is not None:
587 obj = obj.unfiltered()
592 obj = obj.unfiltered()
588 if attrname in vars(obj):
593 if attrname in vars(obj):
589 delattr(obj, attrname)
594 delattr(obj, attrname)
590 obj._filecache.pop(attrname, None)
595 obj._filecache.pop(attrname, None)
591
596
592 def clearchangelog(repo):
597 def clearchangelog(repo):
593 if repo is not repo.unfiltered():
598 if repo is not repo.unfiltered():
594 object.__setattr__(repo, r'_clcachekey', None)
599 object.__setattr__(repo, r'_clcachekey', None)
595 object.__setattr__(repo, r'_clcache', None)
600 object.__setattr__(repo, r'_clcache', None)
596 clearfilecache(repo.unfiltered(), 'changelog')
601 clearfilecache(repo.unfiltered(), 'changelog')
597
602
598 # perf commands
603 # perf commands
599
604
600 @command(b'perfwalk', formatteropts)
605 @command(b'perfwalk', formatteropts)
601 def perfwalk(ui, repo, *pats, **opts):
606 def perfwalk(ui, repo, *pats, **opts):
602 opts = _byteskwargs(opts)
607 opts = _byteskwargs(opts)
603 timer, fm = gettimer(ui, opts)
608 timer, fm = gettimer(ui, opts)
604 m = scmutil.match(repo[None], pats, {})
609 m = scmutil.match(repo[None], pats, {})
605 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
610 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
606 ignored=False))))
611 ignored=False))))
607 fm.end()
612 fm.end()
608
613
609 @command(b'perfannotate', formatteropts)
614 @command(b'perfannotate', formatteropts)
610 def perfannotate(ui, repo, f, **opts):
615 def perfannotate(ui, repo, f, **opts):
611 opts = _byteskwargs(opts)
616 opts = _byteskwargs(opts)
612 timer, fm = gettimer(ui, opts)
617 timer, fm = gettimer(ui, opts)
613 fc = repo[b'.'][f]
618 fc = repo[b'.'][f]
614 timer(lambda: len(fc.annotate(True)))
619 timer(lambda: len(fc.annotate(True)))
615 fm.end()
620 fm.end()
616
621
617 @command(b'perfstatus',
622 @command(b'perfstatus',
618 [(b'u', b'unknown', False,
623 [(b'u', b'unknown', False,
619 b'ask status to look for unknown files')] + formatteropts)
624 b'ask status to look for unknown files')] + formatteropts)
620 def perfstatus(ui, repo, **opts):
625 def perfstatus(ui, repo, **opts):
621 opts = _byteskwargs(opts)
626 opts = _byteskwargs(opts)
622 #m = match.always(repo.root, repo.getcwd())
627 #m = match.always(repo.root, repo.getcwd())
623 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
628 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
624 # False))))
629 # False))))
625 timer, fm = gettimer(ui, opts)
630 timer, fm = gettimer(ui, opts)
626 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
631 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
627 fm.end()
632 fm.end()
628
633
629 @command(b'perfaddremove', formatteropts)
634 @command(b'perfaddremove', formatteropts)
630 def perfaddremove(ui, repo, **opts):
635 def perfaddremove(ui, repo, **opts):
631 opts = _byteskwargs(opts)
636 opts = _byteskwargs(opts)
632 timer, fm = gettimer(ui, opts)
637 timer, fm = gettimer(ui, opts)
633 try:
638 try:
634 oldquiet = repo.ui.quiet
639 oldquiet = repo.ui.quiet
635 repo.ui.quiet = True
640 repo.ui.quiet = True
636 matcher = scmutil.match(repo[None])
641 matcher = scmutil.match(repo[None])
637 opts[b'dry_run'] = True
642 opts[b'dry_run'] = True
638 if b'uipathfn' in getargspec(scmutil.addremove).args:
643 if b'uipathfn' in getargspec(scmutil.addremove).args:
639 uipathfn = scmutil.getuipathfn(repo)
644 uipathfn = scmutil.getuipathfn(repo)
640 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
645 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
641 else:
646 else:
642 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
647 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
643 finally:
648 finally:
644 repo.ui.quiet = oldquiet
649 repo.ui.quiet = oldquiet
645 fm.end()
650 fm.end()
646
651
647 def clearcaches(cl):
652 def clearcaches(cl):
648 # behave somewhat consistently across internal API changes
653 # behave somewhat consistently across internal API changes
649 if util.safehasattr(cl, b'clearcaches'):
654 if util.safehasattr(cl, b'clearcaches'):
650 cl.clearcaches()
655 cl.clearcaches()
651 elif util.safehasattr(cl, b'_nodecache'):
656 elif util.safehasattr(cl, b'_nodecache'):
652 from mercurial.node import nullid, nullrev
657 from mercurial.node import nullid, nullrev
653 cl._nodecache = {nullid: nullrev}
658 cl._nodecache = {nullid: nullrev}
654 cl._nodepos = None
659 cl._nodepos = None
655
660
656 @command(b'perfheads', formatteropts)
661 @command(b'perfheads', formatteropts)
657 def perfheads(ui, repo, **opts):
662 def perfheads(ui, repo, **opts):
658 """benchmark the computation of a changelog heads"""
663 """benchmark the computation of a changelog heads"""
659 opts = _byteskwargs(opts)
664 opts = _byteskwargs(opts)
660 timer, fm = gettimer(ui, opts)
665 timer, fm = gettimer(ui, opts)
661 cl = repo.changelog
666 cl = repo.changelog
662 def s():
667 def s():
663 clearcaches(cl)
668 clearcaches(cl)
664 def d():
669 def d():
665 len(cl.headrevs())
670 len(cl.headrevs())
666 timer(d, setup=s)
671 timer(d, setup=s)
667 fm.end()
672 fm.end()
668
673
669 @command(b'perftags', formatteropts+
674 @command(b'perftags', formatteropts+
670 [
675 [
671 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
676 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
672 ])
677 ])
673 def perftags(ui, repo, **opts):
678 def perftags(ui, repo, **opts):
674 opts = _byteskwargs(opts)
679 opts = _byteskwargs(opts)
675 timer, fm = gettimer(ui, opts)
680 timer, fm = gettimer(ui, opts)
676 repocleartagscache = repocleartagscachefunc(repo)
681 repocleartagscache = repocleartagscachefunc(repo)
677 clearrevlogs = opts[b'clear_revlogs']
682 clearrevlogs = opts[b'clear_revlogs']
678 def s():
683 def s():
679 if clearrevlogs:
684 if clearrevlogs:
680 clearchangelog(repo)
685 clearchangelog(repo)
681 clearfilecache(repo.unfiltered(), 'manifest')
686 clearfilecache(repo.unfiltered(), 'manifest')
682 repocleartagscache()
687 repocleartagscache()
683 def t():
688 def t():
684 return len(repo.tags())
689 return len(repo.tags())
685 timer(t, setup=s)
690 timer(t, setup=s)
686 fm.end()
691 fm.end()
687
692
688 @command(b'perfancestors', formatteropts)
693 @command(b'perfancestors', formatteropts)
689 def perfancestors(ui, repo, **opts):
694 def perfancestors(ui, repo, **opts):
690 opts = _byteskwargs(opts)
695 opts = _byteskwargs(opts)
691 timer, fm = gettimer(ui, opts)
696 timer, fm = gettimer(ui, opts)
692 heads = repo.changelog.headrevs()
697 heads = repo.changelog.headrevs()
693 def d():
698 def d():
694 for a in repo.changelog.ancestors(heads):
699 for a in repo.changelog.ancestors(heads):
695 pass
700 pass
696 timer(d)
701 timer(d)
697 fm.end()
702 fm.end()
698
703
699 @command(b'perfancestorset', formatteropts)
704 @command(b'perfancestorset', formatteropts)
700 def perfancestorset(ui, repo, revset, **opts):
705 def perfancestorset(ui, repo, revset, **opts):
701 opts = _byteskwargs(opts)
706 opts = _byteskwargs(opts)
702 timer, fm = gettimer(ui, opts)
707 timer, fm = gettimer(ui, opts)
703 revs = repo.revs(revset)
708 revs = repo.revs(revset)
704 heads = repo.changelog.headrevs()
709 heads = repo.changelog.headrevs()
705 def d():
710 def d():
706 s = repo.changelog.ancestors(heads)
711 s = repo.changelog.ancestors(heads)
707 for rev in revs:
712 for rev in revs:
708 rev in s
713 rev in s
709 timer(d)
714 timer(d)
710 fm.end()
715 fm.end()
711
716
712 @command(b'perfdiscovery', formatteropts, b'PATH')
717 @command(b'perfdiscovery', formatteropts, b'PATH')
713 def perfdiscovery(ui, repo, path, **opts):
718 def perfdiscovery(ui, repo, path, **opts):
714 """benchmark discovery between local repo and the peer at given path
719 """benchmark discovery between local repo and the peer at given path
715 """
720 """
716 repos = [repo, None]
721 repos = [repo, None]
717 timer, fm = gettimer(ui, opts)
722 timer, fm = gettimer(ui, opts)
718 path = ui.expandpath(path)
723 path = ui.expandpath(path)
719
724
720 def s():
725 def s():
721 repos[1] = hg.peer(ui, opts, path)
726 repos[1] = hg.peer(ui, opts, path)
722 def d():
727 def d():
723 setdiscovery.findcommonheads(ui, *repos)
728 setdiscovery.findcommonheads(ui, *repos)
724 timer(d, setup=s)
729 timer(d, setup=s)
725 fm.end()
730 fm.end()
726
731
727 @command(b'perfbookmarks', formatteropts +
732 @command(b'perfbookmarks', formatteropts +
728 [
733 [
729 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
734 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
730 ])
735 ])
731 def perfbookmarks(ui, repo, **opts):
736 def perfbookmarks(ui, repo, **opts):
732 """benchmark parsing bookmarks from disk to memory"""
737 """benchmark parsing bookmarks from disk to memory"""
733 opts = _byteskwargs(opts)
738 opts = _byteskwargs(opts)
734 timer, fm = gettimer(ui, opts)
739 timer, fm = gettimer(ui, opts)
735
740
736 clearrevlogs = opts[b'clear_revlogs']
741 clearrevlogs = opts[b'clear_revlogs']
737 def s():
742 def s():
738 if clearrevlogs:
743 if clearrevlogs:
739 clearchangelog(repo)
744 clearchangelog(repo)
740 clearfilecache(repo, b'_bookmarks')
745 clearfilecache(repo, b'_bookmarks')
741 def d():
746 def d():
742 repo._bookmarks
747 repo._bookmarks
743 timer(d, setup=s)
748 timer(d, setup=s)
744 fm.end()
749 fm.end()
745
750
746 @command(b'perfbundleread', formatteropts, b'BUNDLE')
751 @command(b'perfbundleread', formatteropts, b'BUNDLE')
747 def perfbundleread(ui, repo, bundlepath, **opts):
752 def perfbundleread(ui, repo, bundlepath, **opts):
748 """Benchmark reading of bundle files.
753 """Benchmark reading of bundle files.
749
754
750 This command is meant to isolate the I/O part of bundle reading as
755 This command is meant to isolate the I/O part of bundle reading as
751 much as possible.
756 much as possible.
752 """
757 """
753 from mercurial import (
758 from mercurial import (
754 bundle2,
759 bundle2,
755 exchange,
760 exchange,
756 streamclone,
761 streamclone,
757 )
762 )
758
763
759 opts = _byteskwargs(opts)
764 opts = _byteskwargs(opts)
760
765
761 def makebench(fn):
766 def makebench(fn):
762 def run():
767 def run():
763 with open(bundlepath, b'rb') as fh:
768 with open(bundlepath, b'rb') as fh:
764 bundle = exchange.readbundle(ui, fh, bundlepath)
769 bundle = exchange.readbundle(ui, fh, bundlepath)
765 fn(bundle)
770 fn(bundle)
766
771
767 return run
772 return run
768
773
769 def makereadnbytes(size):
774 def makereadnbytes(size):
770 def run():
775 def run():
771 with open(bundlepath, b'rb') as fh:
776 with open(bundlepath, b'rb') as fh:
772 bundle = exchange.readbundle(ui, fh, bundlepath)
777 bundle = exchange.readbundle(ui, fh, bundlepath)
773 while bundle.read(size):
778 while bundle.read(size):
774 pass
779 pass
775
780
776 return run
781 return run
777
782
778 def makestdioread(size):
783 def makestdioread(size):
779 def run():
784 def run():
780 with open(bundlepath, b'rb') as fh:
785 with open(bundlepath, b'rb') as fh:
781 while fh.read(size):
786 while fh.read(size):
782 pass
787 pass
783
788
784 return run
789 return run
785
790
786 # bundle1
791 # bundle1
787
792
788 def deltaiter(bundle):
793 def deltaiter(bundle):
789 for delta in bundle.deltaiter():
794 for delta in bundle.deltaiter():
790 pass
795 pass
791
796
792 def iterchunks(bundle):
797 def iterchunks(bundle):
793 for chunk in bundle.getchunks():
798 for chunk in bundle.getchunks():
794 pass
799 pass
795
800
796 # bundle2
801 # bundle2
797
802
798 def forwardchunks(bundle):
803 def forwardchunks(bundle):
799 for chunk in bundle._forwardchunks():
804 for chunk in bundle._forwardchunks():
800 pass
805 pass
801
806
802 def iterparts(bundle):
807 def iterparts(bundle):
803 for part in bundle.iterparts():
808 for part in bundle.iterparts():
804 pass
809 pass
805
810
806 def iterpartsseekable(bundle):
811 def iterpartsseekable(bundle):
807 for part in bundle.iterparts(seekable=True):
812 for part in bundle.iterparts(seekable=True):
808 pass
813 pass
809
814
810 def seek(bundle):
815 def seek(bundle):
811 for part in bundle.iterparts(seekable=True):
816 for part in bundle.iterparts(seekable=True):
812 part.seek(0, os.SEEK_END)
817 part.seek(0, os.SEEK_END)
813
818
814 def makepartreadnbytes(size):
819 def makepartreadnbytes(size):
815 def run():
820 def run():
816 with open(bundlepath, b'rb') as fh:
821 with open(bundlepath, b'rb') as fh:
817 bundle = exchange.readbundle(ui, fh, bundlepath)
822 bundle = exchange.readbundle(ui, fh, bundlepath)
818 for part in bundle.iterparts():
823 for part in bundle.iterparts():
819 while part.read(size):
824 while part.read(size):
820 pass
825 pass
821
826
822 return run
827 return run
823
828
824 benches = [
829 benches = [
825 (makestdioread(8192), b'read(8k)'),
830 (makestdioread(8192), b'read(8k)'),
826 (makestdioread(16384), b'read(16k)'),
831 (makestdioread(16384), b'read(16k)'),
827 (makestdioread(32768), b'read(32k)'),
832 (makestdioread(32768), b'read(32k)'),
828 (makestdioread(131072), b'read(128k)'),
833 (makestdioread(131072), b'read(128k)'),
829 ]
834 ]
830
835
831 with open(bundlepath, b'rb') as fh:
836 with open(bundlepath, b'rb') as fh:
832 bundle = exchange.readbundle(ui, fh, bundlepath)
837 bundle = exchange.readbundle(ui, fh, bundlepath)
833
838
834 if isinstance(bundle, changegroup.cg1unpacker):
839 if isinstance(bundle, changegroup.cg1unpacker):
835 benches.extend([
840 benches.extend([
836 (makebench(deltaiter), b'cg1 deltaiter()'),
841 (makebench(deltaiter), b'cg1 deltaiter()'),
837 (makebench(iterchunks), b'cg1 getchunks()'),
842 (makebench(iterchunks), b'cg1 getchunks()'),
838 (makereadnbytes(8192), b'cg1 read(8k)'),
843 (makereadnbytes(8192), b'cg1 read(8k)'),
839 (makereadnbytes(16384), b'cg1 read(16k)'),
844 (makereadnbytes(16384), b'cg1 read(16k)'),
840 (makereadnbytes(32768), b'cg1 read(32k)'),
845 (makereadnbytes(32768), b'cg1 read(32k)'),
841 (makereadnbytes(131072), b'cg1 read(128k)'),
846 (makereadnbytes(131072), b'cg1 read(128k)'),
842 ])
847 ])
843 elif isinstance(bundle, bundle2.unbundle20):
848 elif isinstance(bundle, bundle2.unbundle20):
844 benches.extend([
849 benches.extend([
845 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
850 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
846 (makebench(iterparts), b'bundle2 iterparts()'),
851 (makebench(iterparts), b'bundle2 iterparts()'),
847 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
852 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
848 (makebench(seek), b'bundle2 part seek()'),
853 (makebench(seek), b'bundle2 part seek()'),
849 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
854 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
850 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
855 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
851 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
856 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
852 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
857 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
853 ])
858 ])
854 elif isinstance(bundle, streamclone.streamcloneapplier):
859 elif isinstance(bundle, streamclone.streamcloneapplier):
855 raise error.Abort(b'stream clone bundles not supported')
860 raise error.Abort(b'stream clone bundles not supported')
856 else:
861 else:
857 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
862 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
858
863
859 for fn, title in benches:
864 for fn, title in benches:
860 timer, fm = gettimer(ui, opts)
865 timer, fm = gettimer(ui, opts)
861 timer(fn, title=title)
866 timer(fn, title=title)
862 fm.end()
867 fm.end()
863
868
864 @command(b'perfchangegroupchangelog', formatteropts +
869 @command(b'perfchangegroupchangelog', formatteropts +
865 [(b'', b'cgversion', b'02', b'changegroup version'),
870 [(b'', b'cgversion', b'02', b'changegroup version'),
866 (b'r', b'rev', b'', b'revisions to add to changegroup')])
871 (b'r', b'rev', b'', b'revisions to add to changegroup')])
867 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
872 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
868 """Benchmark producing a changelog group for a changegroup.
873 """Benchmark producing a changelog group for a changegroup.
869
874
870 This measures the time spent processing the changelog during a
875 This measures the time spent processing the changelog during a
871 bundle operation. This occurs during `hg bundle` and on a server
876 bundle operation. This occurs during `hg bundle` and on a server
872 processing a `getbundle` wire protocol request (handles clones
877 processing a `getbundle` wire protocol request (handles clones
873 and pull requests).
878 and pull requests).
874
879
875 By default, all revisions are added to the changegroup.
880 By default, all revisions are added to the changegroup.
876 """
881 """
877 opts = _byteskwargs(opts)
882 opts = _byteskwargs(opts)
878 cl = repo.changelog
883 cl = repo.changelog
879 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
884 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
880 bundler = changegroup.getbundler(cgversion, repo)
885 bundler = changegroup.getbundler(cgversion, repo)
881
886
882 def d():
887 def d():
883 state, chunks = bundler._generatechangelog(cl, nodes)
888 state, chunks = bundler._generatechangelog(cl, nodes)
884 for chunk in chunks:
889 for chunk in chunks:
885 pass
890 pass
886
891
887 timer, fm = gettimer(ui, opts)
892 timer, fm = gettimer(ui, opts)
888
893
889 # Terminal printing can interfere with timing. So disable it.
894 # Terminal printing can interfere with timing. So disable it.
890 with ui.configoverride({(b'progress', b'disable'): True}):
895 with ui.configoverride({(b'progress', b'disable'): True}):
891 timer(d)
896 timer(d)
892
897
893 fm.end()
898 fm.end()
894
899
895 @command(b'perfdirs', formatteropts)
900 @command(b'perfdirs', formatteropts)
896 def perfdirs(ui, repo, **opts):
901 def perfdirs(ui, repo, **opts):
897 opts = _byteskwargs(opts)
902 opts = _byteskwargs(opts)
898 timer, fm = gettimer(ui, opts)
903 timer, fm = gettimer(ui, opts)
899 dirstate = repo.dirstate
904 dirstate = repo.dirstate
900 b'a' in dirstate
905 b'a' in dirstate
901 def d():
906 def d():
902 dirstate.hasdir(b'a')
907 dirstate.hasdir(b'a')
903 del dirstate._map._dirs
908 del dirstate._map._dirs
904 timer(d)
909 timer(d)
905 fm.end()
910 fm.end()
906
911
907 @command(b'perfdirstate', formatteropts)
912 @command(b'perfdirstate', formatteropts)
908 def perfdirstate(ui, repo, **opts):
913 def perfdirstate(ui, repo, **opts):
909 opts = _byteskwargs(opts)
914 opts = _byteskwargs(opts)
910 timer, fm = gettimer(ui, opts)
915 timer, fm = gettimer(ui, opts)
911 b"a" in repo.dirstate
916 b"a" in repo.dirstate
912 def d():
917 def d():
913 repo.dirstate.invalidate()
918 repo.dirstate.invalidate()
914 b"a" in repo.dirstate
919 b"a" in repo.dirstate
915 timer(d)
920 timer(d)
916 fm.end()
921 fm.end()
917
922
918 @command(b'perfdirstatedirs', formatteropts)
923 @command(b'perfdirstatedirs', formatteropts)
919 def perfdirstatedirs(ui, repo, **opts):
924 def perfdirstatedirs(ui, repo, **opts):
920 opts = _byteskwargs(opts)
925 opts = _byteskwargs(opts)
921 timer, fm = gettimer(ui, opts)
926 timer, fm = gettimer(ui, opts)
922 b"a" in repo.dirstate
927 b"a" in repo.dirstate
923 def d():
928 def d():
924 repo.dirstate.hasdir(b"a")
929 repo.dirstate.hasdir(b"a")
925 del repo.dirstate._map._dirs
930 del repo.dirstate._map._dirs
926 timer(d)
931 timer(d)
927 fm.end()
932 fm.end()
928
933
929 @command(b'perfdirstatefoldmap', formatteropts)
934 @command(b'perfdirstatefoldmap', formatteropts)
930 def perfdirstatefoldmap(ui, repo, **opts):
935 def perfdirstatefoldmap(ui, repo, **opts):
931 opts = _byteskwargs(opts)
936 opts = _byteskwargs(opts)
932 timer, fm = gettimer(ui, opts)
937 timer, fm = gettimer(ui, opts)
933 dirstate = repo.dirstate
938 dirstate = repo.dirstate
934 b'a' in dirstate
939 b'a' in dirstate
935 def d():
940 def d():
936 dirstate._map.filefoldmap.get(b'a')
941 dirstate._map.filefoldmap.get(b'a')
937 del dirstate._map.filefoldmap
942 del dirstate._map.filefoldmap
938 timer(d)
943 timer(d)
939 fm.end()
944 fm.end()
940
945
941 @command(b'perfdirfoldmap', formatteropts)
946 @command(b'perfdirfoldmap', formatteropts)
942 def perfdirfoldmap(ui, repo, **opts):
947 def perfdirfoldmap(ui, repo, **opts):
943 opts = _byteskwargs(opts)
948 opts = _byteskwargs(opts)
944 timer, fm = gettimer(ui, opts)
949 timer, fm = gettimer(ui, opts)
945 dirstate = repo.dirstate
950 dirstate = repo.dirstate
946 b'a' in dirstate
951 b'a' in dirstate
947 def d():
952 def d():
948 dirstate._map.dirfoldmap.get(b'a')
953 dirstate._map.dirfoldmap.get(b'a')
949 del dirstate._map.dirfoldmap
954 del dirstate._map.dirfoldmap
950 del dirstate._map._dirs
955 del dirstate._map._dirs
951 timer(d)
956 timer(d)
952 fm.end()
957 fm.end()
953
958
954 @command(b'perfdirstatewrite', formatteropts)
959 @command(b'perfdirstatewrite', formatteropts)
955 def perfdirstatewrite(ui, repo, **opts):
960 def perfdirstatewrite(ui, repo, **opts):
956 opts = _byteskwargs(opts)
961 opts = _byteskwargs(opts)
957 timer, fm = gettimer(ui, opts)
962 timer, fm = gettimer(ui, opts)
958 ds = repo.dirstate
963 ds = repo.dirstate
959 b"a" in ds
964 b"a" in ds
960 def d():
965 def d():
961 ds._dirty = True
966 ds._dirty = True
962 ds.write(repo.currenttransaction())
967 ds.write(repo.currenttransaction())
963 timer(d)
968 timer(d)
964 fm.end()
969 fm.end()
965
970
966 def _getmergerevs(repo, opts):
971 def _getmergerevs(repo, opts):
967 """parse command argument to return rev involved in merge
972 """parse command argument to return rev involved in merge
968
973
969 input: options dictionnary with `rev`, `from` and `bse`
974 input: options dictionnary with `rev`, `from` and `bse`
970 output: (localctx, otherctx, basectx)
975 output: (localctx, otherctx, basectx)
971 """
976 """
972 if opts[b'from']:
977 if opts[b'from']:
973 fromrev = scmutil.revsingle(repo, opts[b'from'])
978 fromrev = scmutil.revsingle(repo, opts[b'from'])
974 wctx = repo[fromrev]
979 wctx = repo[fromrev]
975 else:
980 else:
976 wctx = repo[None]
981 wctx = repo[None]
977 # we don't want working dir files to be stat'd in the benchmark, so
982 # we don't want working dir files to be stat'd in the benchmark, so
978 # prime that cache
983 # prime that cache
979 wctx.dirty()
984 wctx.dirty()
980 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
985 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
981 if opts[b'base']:
986 if opts[b'base']:
982 fromrev = scmutil.revsingle(repo, opts[b'base'])
987 fromrev = scmutil.revsingle(repo, opts[b'base'])
983 ancestor = repo[fromrev]
988 ancestor = repo[fromrev]
984 else:
989 else:
985 ancestor = wctx.ancestor(rctx)
990 ancestor = wctx.ancestor(rctx)
986 return (wctx, rctx, ancestor)
991 return (wctx, rctx, ancestor)
987
992
988 @command(b'perfmergecalculate',
993 @command(b'perfmergecalculate',
989 [
994 [
990 (b'r', b'rev', b'.', b'rev to merge against'),
995 (b'r', b'rev', b'.', b'rev to merge against'),
991 (b'', b'from', b'', b'rev to merge from'),
996 (b'', b'from', b'', b'rev to merge from'),
992 (b'', b'base', b'', b'the revision to use as base'),
997 (b'', b'base', b'', b'the revision to use as base'),
993 ] + formatteropts)
998 ] + formatteropts)
994 def perfmergecalculate(ui, repo, **opts):
999 def perfmergecalculate(ui, repo, **opts):
995 opts = _byteskwargs(opts)
1000 opts = _byteskwargs(opts)
996 timer, fm = gettimer(ui, opts)
1001 timer, fm = gettimer(ui, opts)
997
1002
998 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1003 wctx, rctx, ancestor = _getmergerevs(repo, opts)
999 def d():
1004 def d():
1000 # acceptremote is True because we don't want prompts in the middle of
1005 # acceptremote is True because we don't want prompts in the middle of
1001 # our benchmark
1006 # our benchmark
1002 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
1007 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
1003 acceptremote=True, followcopies=True)
1008 acceptremote=True, followcopies=True)
1004 timer(d)
1009 timer(d)
1005 fm.end()
1010 fm.end()
1006
1011
1007 @command(b'perfmergecopies',
1012 @command(b'perfmergecopies',
1008 [
1013 [
1009 (b'r', b'rev', b'.', b'rev to merge against'),
1014 (b'r', b'rev', b'.', b'rev to merge against'),
1010 (b'', b'from', b'', b'rev to merge from'),
1015 (b'', b'from', b'', b'rev to merge from'),
1011 (b'', b'base', b'', b'the revision to use as base'),
1016 (b'', b'base', b'', b'the revision to use as base'),
1012 ] + formatteropts)
1017 ] + formatteropts)
1013 def perfmergecopies(ui, repo, **opts):
1018 def perfmergecopies(ui, repo, **opts):
1014 """measure runtime of `copies.mergecopies`"""
1019 """measure runtime of `copies.mergecopies`"""
1015 opts = _byteskwargs(opts)
1020 opts = _byteskwargs(opts)
1016 timer, fm = gettimer(ui, opts)
1021 timer, fm = gettimer(ui, opts)
1017 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1022 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1018 def d():
1023 def d():
1019 # acceptremote is True because we don't want prompts in the middle of
1024 # acceptremote is True because we don't want prompts in the middle of
1020 # our benchmark
1025 # our benchmark
1021 copies.mergecopies(repo, wctx, rctx, ancestor)
1026 copies.mergecopies(repo, wctx, rctx, ancestor)
1022 timer(d)
1027 timer(d)
1023 fm.end()
1028 fm.end()
1024
1029
1025 @command(b'perfpathcopies', [], b"REV REV")
1030 @command(b'perfpathcopies', [], b"REV REV")
1026 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1031 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1027 """benchmark the copy tracing logic"""
1032 """benchmark the copy tracing logic"""
1028 opts = _byteskwargs(opts)
1033 opts = _byteskwargs(opts)
1029 timer, fm = gettimer(ui, opts)
1034 timer, fm = gettimer(ui, opts)
1030 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1035 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1031 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1036 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1032 def d():
1037 def d():
1033 copies.pathcopies(ctx1, ctx2)
1038 copies.pathcopies(ctx1, ctx2)
1034 timer(d)
1039 timer(d)
1035 fm.end()
1040 fm.end()
1036
1041
1037 @command(b'perfphases',
1042 @command(b'perfphases',
1038 [(b'', b'full', False, b'include file reading time too'),
1043 [(b'', b'full', False, b'include file reading time too'),
1039 ], b"")
1044 ], b"")
1040 def perfphases(ui, repo, **opts):
1045 def perfphases(ui, repo, **opts):
1041 """benchmark phasesets computation"""
1046 """benchmark phasesets computation"""
1042 opts = _byteskwargs(opts)
1047 opts = _byteskwargs(opts)
1043 timer, fm = gettimer(ui, opts)
1048 timer, fm = gettimer(ui, opts)
1044 _phases = repo._phasecache
1049 _phases = repo._phasecache
1045 full = opts.get(b'full')
1050 full = opts.get(b'full')
1046 def d():
1051 def d():
1047 phases = _phases
1052 phases = _phases
1048 if full:
1053 if full:
1049 clearfilecache(repo, b'_phasecache')
1054 clearfilecache(repo, b'_phasecache')
1050 phases = repo._phasecache
1055 phases = repo._phasecache
1051 phases.invalidate()
1056 phases.invalidate()
1052 phases.loadphaserevs(repo)
1057 phases.loadphaserevs(repo)
1053 timer(d)
1058 timer(d)
1054 fm.end()
1059 fm.end()
1055
1060
1056 @command(b'perfphasesremote',
1061 @command(b'perfphasesremote',
1057 [], b"[DEST]")
1062 [], b"[DEST]")
1058 def perfphasesremote(ui, repo, dest=None, **opts):
1063 def perfphasesremote(ui, repo, dest=None, **opts):
1059 """benchmark time needed to analyse phases of the remote server"""
1064 """benchmark time needed to analyse phases of the remote server"""
1060 from mercurial.node import (
1065 from mercurial.node import (
1061 bin,
1066 bin,
1062 )
1067 )
1063 from mercurial import (
1068 from mercurial import (
1064 exchange,
1069 exchange,
1065 hg,
1070 hg,
1066 phases,
1071 phases,
1067 )
1072 )
1068 opts = _byteskwargs(opts)
1073 opts = _byteskwargs(opts)
1069 timer, fm = gettimer(ui, opts)
1074 timer, fm = gettimer(ui, opts)
1070
1075
1071 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1076 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1072 if not path:
1077 if not path:
1073 raise error.Abort((b'default repository not configured!'),
1078 raise error.Abort((b'default repository not configured!'),
1074 hint=(b"see 'hg help config.paths'"))
1079 hint=(b"see 'hg help config.paths'"))
1075 dest = path.pushloc or path.loc
1080 dest = path.pushloc or path.loc
1076 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1081 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1077 other = hg.peer(repo, opts, dest)
1082 other = hg.peer(repo, opts, dest)
1078
1083
1079 # easier to perform discovery through the operation
1084 # easier to perform discovery through the operation
1080 op = exchange.pushoperation(repo, other)
1085 op = exchange.pushoperation(repo, other)
1081 exchange._pushdiscoverychangeset(op)
1086 exchange._pushdiscoverychangeset(op)
1082
1087
1083 remotesubset = op.fallbackheads
1088 remotesubset = op.fallbackheads
1084
1089
1085 with other.commandexecutor() as e:
1090 with other.commandexecutor() as e:
1086 remotephases = e.callcommand(b'listkeys',
1091 remotephases = e.callcommand(b'listkeys',
1087 {b'namespace': b'phases'}).result()
1092 {b'namespace': b'phases'}).result()
1088 del other
1093 del other
1089 publishing = remotephases.get(b'publishing', False)
1094 publishing = remotephases.get(b'publishing', False)
1090 if publishing:
1095 if publishing:
1091 ui.status((b'publishing: yes\n'))
1096 ui.status((b'publishing: yes\n'))
1092 else:
1097 else:
1093 ui.status((b'publishing: no\n'))
1098 ui.status((b'publishing: no\n'))
1094
1099
1095 nodemap = repo.changelog.nodemap
1100 nodemap = repo.changelog.nodemap
1096 nonpublishroots = 0
1101 nonpublishroots = 0
1097 for nhex, phase in remotephases.iteritems():
1102 for nhex, phase in remotephases.iteritems():
1098 if nhex == b'publishing': # ignore data related to publish option
1103 if nhex == b'publishing': # ignore data related to publish option
1099 continue
1104 continue
1100 node = bin(nhex)
1105 node = bin(nhex)
1101 if node in nodemap and int(phase):
1106 if node in nodemap and int(phase):
1102 nonpublishroots += 1
1107 nonpublishroots += 1
1103 ui.status((b'number of roots: %d\n') % len(remotephases))
1108 ui.status((b'number of roots: %d\n') % len(remotephases))
1104 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1109 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1105 def d():
1110 def d():
1106 phases.remotephasessummary(repo,
1111 phases.remotephasessummary(repo,
1107 remotesubset,
1112 remotesubset,
1108 remotephases)
1113 remotephases)
1109 timer(d)
1114 timer(d)
1110 fm.end()
1115 fm.end()
1111
1116
1112 @command(b'perfmanifest',[
1117 @command(b'perfmanifest',[
1113 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1118 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1114 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1119 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1115 ] + formatteropts, b'REV|NODE')
1120 ] + formatteropts, b'REV|NODE')
1116 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1121 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1117 """benchmark the time to read a manifest from disk and return a usable
1122 """benchmark the time to read a manifest from disk and return a usable
1118 dict-like object
1123 dict-like object
1119
1124
1120 Manifest caches are cleared before retrieval."""
1125 Manifest caches are cleared before retrieval."""
1121 opts = _byteskwargs(opts)
1126 opts = _byteskwargs(opts)
1122 timer, fm = gettimer(ui, opts)
1127 timer, fm = gettimer(ui, opts)
1123 if not manifest_rev:
1128 if not manifest_rev:
1124 ctx = scmutil.revsingle(repo, rev, rev)
1129 ctx = scmutil.revsingle(repo, rev, rev)
1125 t = ctx.manifestnode()
1130 t = ctx.manifestnode()
1126 else:
1131 else:
1127 from mercurial.node import bin
1132 from mercurial.node import bin
1128
1133
1129 if len(rev) == 40:
1134 if len(rev) == 40:
1130 t = bin(rev)
1135 t = bin(rev)
1131 else:
1136 else:
1132 try:
1137 try:
1133 rev = int(rev)
1138 rev = int(rev)
1134
1139
1135 if util.safehasattr(repo.manifestlog, b'getstorage'):
1140 if util.safehasattr(repo.manifestlog, b'getstorage'):
1136 t = repo.manifestlog.getstorage(b'').node(rev)
1141 t = repo.manifestlog.getstorage(b'').node(rev)
1137 else:
1142 else:
1138 t = repo.manifestlog._revlog.lookup(rev)
1143 t = repo.manifestlog._revlog.lookup(rev)
1139 except ValueError:
1144 except ValueError:
1140 raise error.Abort(b'manifest revision must be integer or full '
1145 raise error.Abort(b'manifest revision must be integer or full '
1141 b'node')
1146 b'node')
1142 def d():
1147 def d():
1143 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1148 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1144 repo.manifestlog[t].read()
1149 repo.manifestlog[t].read()
1145 timer(d)
1150 timer(d)
1146 fm.end()
1151 fm.end()
1147
1152
1148 @command(b'perfchangeset', formatteropts)
1153 @command(b'perfchangeset', formatteropts)
1149 def perfchangeset(ui, repo, rev, **opts):
1154 def perfchangeset(ui, repo, rev, **opts):
1150 opts = _byteskwargs(opts)
1155 opts = _byteskwargs(opts)
1151 timer, fm = gettimer(ui, opts)
1156 timer, fm = gettimer(ui, opts)
1152 n = scmutil.revsingle(repo, rev).node()
1157 n = scmutil.revsingle(repo, rev).node()
1153 def d():
1158 def d():
1154 repo.changelog.read(n)
1159 repo.changelog.read(n)
1155 #repo.changelog._cache = None
1160 #repo.changelog._cache = None
1156 timer(d)
1161 timer(d)
1157 fm.end()
1162 fm.end()
1158
1163
1159 @command(b'perfignore', formatteropts)
1164 @command(b'perfignore', formatteropts)
1160 def perfignore(ui, repo, **opts):
1165 def perfignore(ui, repo, **opts):
1161 """benchmark operation related to computing ignore"""
1166 """benchmark operation related to computing ignore"""
1162 opts = _byteskwargs(opts)
1167 opts = _byteskwargs(opts)
1163 timer, fm = gettimer(ui, opts)
1168 timer, fm = gettimer(ui, opts)
1164 dirstate = repo.dirstate
1169 dirstate = repo.dirstate
1165
1170
1166 def setupone():
1171 def setupone():
1167 dirstate.invalidate()
1172 dirstate.invalidate()
1168 clearfilecache(dirstate, b'_ignore')
1173 clearfilecache(dirstate, b'_ignore')
1169
1174
1170 def runone():
1175 def runone():
1171 dirstate._ignore
1176 dirstate._ignore
1172
1177
1173 timer(runone, setup=setupone, title=b"load")
1178 timer(runone, setup=setupone, title=b"load")
1174 fm.end()
1179 fm.end()
1175
1180
1176 @command(b'perfindex', [
1181 @command(b'perfindex', [
1177 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1182 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1178 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1183 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1179 ] + formatteropts)
1184 ] + formatteropts)
1180 def perfindex(ui, repo, **opts):
1185 def perfindex(ui, repo, **opts):
1181 """benchmark index creation time followed by a lookup
1186 """benchmark index creation time followed by a lookup
1182
1187
1183 The default is to look `tip` up. Depending on the index implementation,
1188 The default is to look `tip` up. Depending on the index implementation,
1184 the revision looked up can matters. For example, an implementation
1189 the revision looked up can matters. For example, an implementation
1185 scanning the index will have a faster lookup time for `--rev tip` than for
1190 scanning the index will have a faster lookup time for `--rev tip` than for
1186 `--rev 0`. The number of looked up revisions and their order can also
1191 `--rev 0`. The number of looked up revisions and their order can also
1187 matters.
1192 matters.
1188
1193
1189 Example of useful set to test:
1194 Example of useful set to test:
1190 * tip
1195 * tip
1191 * 0
1196 * 0
1192 * -10:
1197 * -10:
1193 * :10
1198 * :10
1194 * -10: + :10
1199 * -10: + :10
1195 * :10: + -10:
1200 * :10: + -10:
1196 * -10000:
1201 * -10000:
1197 * -10000: + 0
1202 * -10000: + 0
1198
1203
1199 It is not currently possible to check for lookup of a missing node. For
1204 It is not currently possible to check for lookup of a missing node. For
1200 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1205 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1201 import mercurial.revlog
1206 import mercurial.revlog
1202 opts = _byteskwargs(opts)
1207 opts = _byteskwargs(opts)
1203 timer, fm = gettimer(ui, opts)
1208 timer, fm = gettimer(ui, opts)
1204 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1209 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1205 if opts[b'no_lookup']:
1210 if opts[b'no_lookup']:
1206 if opts['rev']:
1211 if opts['rev']:
1207 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1212 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1208 nodes = []
1213 nodes = []
1209 elif not opts[b'rev']:
1214 elif not opts[b'rev']:
1210 nodes = [repo[b"tip"].node()]
1215 nodes = [repo[b"tip"].node()]
1211 else:
1216 else:
1212 revs = scmutil.revrange(repo, opts[b'rev'])
1217 revs = scmutil.revrange(repo, opts[b'rev'])
1213 cl = repo.changelog
1218 cl = repo.changelog
1214 nodes = [cl.node(r) for r in revs]
1219 nodes = [cl.node(r) for r in revs]
1215
1220
1216 unfi = repo.unfiltered()
1221 unfi = repo.unfiltered()
1217 # find the filecache func directly
1222 # find the filecache func directly
1218 # This avoid polluting the benchmark with the filecache logic
1223 # This avoid polluting the benchmark with the filecache logic
1219 makecl = unfi.__class__.changelog.func
1224 makecl = unfi.__class__.changelog.func
1220 def setup():
1225 def setup():
1221 # probably not necessary, but for good measure
1226 # probably not necessary, but for good measure
1222 clearchangelog(unfi)
1227 clearchangelog(unfi)
1223 def d():
1228 def d():
1224 cl = makecl(unfi)
1229 cl = makecl(unfi)
1225 for n in nodes:
1230 for n in nodes:
1226 cl.rev(n)
1231 cl.rev(n)
1227 timer(d, setup=setup)
1232 timer(d, setup=setup)
1228 fm.end()
1233 fm.end()
1229
1234
1230 @command(b'perfnodemap', [
1235 @command(b'perfnodemap', [
1231 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1236 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1232 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1237 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1233 ] + formatteropts)
1238 ] + formatteropts)
1234 def perfnodemap(ui, repo, **opts):
1239 def perfnodemap(ui, repo, **opts):
1235 """benchmark the time necessary to look up revision from a cold nodemap
1240 """benchmark the time necessary to look up revision from a cold nodemap
1236
1241
1237 Depending on the implementation, the amount and order of revision we look
1242 Depending on the implementation, the amount and order of revision we look
1238 up can varies. Example of useful set to test:
1243 up can varies. Example of useful set to test:
1239 * tip
1244 * tip
1240 * 0
1245 * 0
1241 * -10:
1246 * -10:
1242 * :10
1247 * :10
1243 * -10: + :10
1248 * -10: + :10
1244 * :10: + -10:
1249 * :10: + -10:
1245 * -10000:
1250 * -10000:
1246 * -10000: + 0
1251 * -10000: + 0
1247
1252
1248 The command currently focus on valid binary lookup. Benchmarking for
1253 The command currently focus on valid binary lookup. Benchmarking for
1249 hexlookup, prefix lookup and missing lookup would also be valuable.
1254 hexlookup, prefix lookup and missing lookup would also be valuable.
1250 """
1255 """
1251 import mercurial.revlog
1256 import mercurial.revlog
1252 opts = _byteskwargs(opts)
1257 opts = _byteskwargs(opts)
1253 timer, fm = gettimer(ui, opts)
1258 timer, fm = gettimer(ui, opts)
1254 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1259 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1255
1260
1256 unfi = repo.unfiltered()
1261 unfi = repo.unfiltered()
1257 clearcaches = opts['clear_caches']
1262 clearcaches = opts['clear_caches']
1258 # find the filecache func directly
1263 # find the filecache func directly
1259 # This avoid polluting the benchmark with the filecache logic
1264 # This avoid polluting the benchmark with the filecache logic
1260 makecl = unfi.__class__.changelog.func
1265 makecl = unfi.__class__.changelog.func
1261 if not opts[b'rev']:
1266 if not opts[b'rev']:
1262 raise error.Abort('use --rev to specify revisions to look up')
1267 raise error.Abort('use --rev to specify revisions to look up')
1263 revs = scmutil.revrange(repo, opts[b'rev'])
1268 revs = scmutil.revrange(repo, opts[b'rev'])
1264 cl = repo.changelog
1269 cl = repo.changelog
1265 nodes = [cl.node(r) for r in revs]
1270 nodes = [cl.node(r) for r in revs]
1266
1271
1267 # use a list to pass reference to a nodemap from one closure to the next
1272 # use a list to pass reference to a nodemap from one closure to the next
1268 nodeget = [None]
1273 nodeget = [None]
1269 def setnodeget():
1274 def setnodeget():
1270 # probably not necessary, but for good measure
1275 # probably not necessary, but for good measure
1271 clearchangelog(unfi)
1276 clearchangelog(unfi)
1272 nodeget[0] = makecl(unfi).nodemap.get
1277 nodeget[0] = makecl(unfi).nodemap.get
1273
1278
1274 def d():
1279 def d():
1275 get = nodeget[0]
1280 get = nodeget[0]
1276 for n in nodes:
1281 for n in nodes:
1277 get(n)
1282 get(n)
1278
1283
1279 setup = None
1284 setup = None
1280 if clearcaches:
1285 if clearcaches:
1281 def setup():
1286 def setup():
1282 setnodeget()
1287 setnodeget()
1283 else:
1288 else:
1284 setnodeget()
1289 setnodeget()
1285 d() # prewarm the data structure
1290 d() # prewarm the data structure
1286 timer(d, setup=setup)
1291 timer(d, setup=setup)
1287 fm.end()
1292 fm.end()
1288
1293
1289 @command(b'perfstartup', formatteropts)
1294 @command(b'perfstartup', formatteropts)
1290 def perfstartup(ui, repo, **opts):
1295 def perfstartup(ui, repo, **opts):
1291 opts = _byteskwargs(opts)
1296 opts = _byteskwargs(opts)
1292 timer, fm = gettimer(ui, opts)
1297 timer, fm = gettimer(ui, opts)
1293 def d():
1298 def d():
1294 if os.name != r'nt':
1299 if os.name != r'nt':
1295 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1300 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1296 fsencode(sys.argv[0]))
1301 fsencode(sys.argv[0]))
1297 else:
1302 else:
1298 os.environ[r'HGRCPATH'] = r' '
1303 os.environ[r'HGRCPATH'] = r' '
1299 os.system(r"%s version -q > NUL" % sys.argv[0])
1304 os.system(r"%s version -q > NUL" % sys.argv[0])
1300 timer(d)
1305 timer(d)
1301 fm.end()
1306 fm.end()
1302
1307
1303 @command(b'perfparents', formatteropts)
1308 @command(b'perfparents', formatteropts)
1304 def perfparents(ui, repo, **opts):
1309 def perfparents(ui, repo, **opts):
1305 """benchmark the time necessary to fetch one changeset's parents.
1310 """benchmark the time necessary to fetch one changeset's parents.
1306
1311
1307 The fetch is done using the `node identifier`, traversing all object layers
1312 The fetch is done using the `node identifier`, traversing all object layers
1308 from the repository object. The first N revisions will be used for this
1313 from the repository object. The first N revisions will be used for this
1309 benchmark. N is controlled by the ``perf.parentscount`` config option
1314 benchmark. N is controlled by the ``perf.parentscount`` config option
1310 (default: 1000).
1315 (default: 1000).
1311 """
1316 """
1312 opts = _byteskwargs(opts)
1317 opts = _byteskwargs(opts)
1313 timer, fm = gettimer(ui, opts)
1318 timer, fm = gettimer(ui, opts)
1314 # control the number of commits perfparents iterates over
1319 # control the number of commits perfparents iterates over
1315 # experimental config: perf.parentscount
1320 # experimental config: perf.parentscount
1316 count = getint(ui, b"perf", b"parentscount", 1000)
1321 count = getint(ui, b"perf", b"parentscount", 1000)
1317 if len(repo.changelog) < count:
1322 if len(repo.changelog) < count:
1318 raise error.Abort(b"repo needs %d commits for this test" % count)
1323 raise error.Abort(b"repo needs %d commits for this test" % count)
1319 repo = repo.unfiltered()
1324 repo = repo.unfiltered()
1320 nl = [repo.changelog.node(i) for i in _xrange(count)]
1325 nl = [repo.changelog.node(i) for i in _xrange(count)]
1321 def d():
1326 def d():
1322 for n in nl:
1327 for n in nl:
1323 repo.changelog.parents(n)
1328 repo.changelog.parents(n)
1324 timer(d)
1329 timer(d)
1325 fm.end()
1330 fm.end()
1326
1331
1327 @command(b'perfctxfiles', formatteropts)
1332 @command(b'perfctxfiles', formatteropts)
1328 def perfctxfiles(ui, repo, x, **opts):
1333 def perfctxfiles(ui, repo, x, **opts):
1329 opts = _byteskwargs(opts)
1334 opts = _byteskwargs(opts)
1330 x = int(x)
1335 x = int(x)
1331 timer, fm = gettimer(ui, opts)
1336 timer, fm = gettimer(ui, opts)
1332 def d():
1337 def d():
1333 len(repo[x].files())
1338 len(repo[x].files())
1334 timer(d)
1339 timer(d)
1335 fm.end()
1340 fm.end()
1336
1341
1337 @command(b'perfrawfiles', formatteropts)
1342 @command(b'perfrawfiles', formatteropts)
1338 def perfrawfiles(ui, repo, x, **opts):
1343 def perfrawfiles(ui, repo, x, **opts):
1339 opts = _byteskwargs(opts)
1344 opts = _byteskwargs(opts)
1340 x = int(x)
1345 x = int(x)
1341 timer, fm = gettimer(ui, opts)
1346 timer, fm = gettimer(ui, opts)
1342 cl = repo.changelog
1347 cl = repo.changelog
1343 def d():
1348 def d():
1344 len(cl.read(x)[3])
1349 len(cl.read(x)[3])
1345 timer(d)
1350 timer(d)
1346 fm.end()
1351 fm.end()
1347
1352
1348 @command(b'perflookup', formatteropts)
1353 @command(b'perflookup', formatteropts)
1349 def perflookup(ui, repo, rev, **opts):
1354 def perflookup(ui, repo, rev, **opts):
1350 opts = _byteskwargs(opts)
1355 opts = _byteskwargs(opts)
1351 timer, fm = gettimer(ui, opts)
1356 timer, fm = gettimer(ui, opts)
1352 timer(lambda: len(repo.lookup(rev)))
1357 timer(lambda: len(repo.lookup(rev)))
1353 fm.end()
1358 fm.end()
1354
1359
1355 @command(b'perflinelogedits',
1360 @command(b'perflinelogedits',
1356 [(b'n', b'edits', 10000, b'number of edits'),
1361 [(b'n', b'edits', 10000, b'number of edits'),
1357 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1362 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1358 ], norepo=True)
1363 ], norepo=True)
1359 def perflinelogedits(ui, **opts):
1364 def perflinelogedits(ui, **opts):
1360 from mercurial import linelog
1365 from mercurial import linelog
1361
1366
1362 opts = _byteskwargs(opts)
1367 opts = _byteskwargs(opts)
1363
1368
1364 edits = opts[b'edits']
1369 edits = opts[b'edits']
1365 maxhunklines = opts[b'max_hunk_lines']
1370 maxhunklines = opts[b'max_hunk_lines']
1366
1371
1367 maxb1 = 100000
1372 maxb1 = 100000
1368 random.seed(0)
1373 random.seed(0)
1369 randint = random.randint
1374 randint = random.randint
1370 currentlines = 0
1375 currentlines = 0
1371 arglist = []
1376 arglist = []
1372 for rev in _xrange(edits):
1377 for rev in _xrange(edits):
1373 a1 = randint(0, currentlines)
1378 a1 = randint(0, currentlines)
1374 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1379 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1375 b1 = randint(0, maxb1)
1380 b1 = randint(0, maxb1)
1376 b2 = randint(b1, b1 + maxhunklines)
1381 b2 = randint(b1, b1 + maxhunklines)
1377 currentlines += (b2 - b1) - (a2 - a1)
1382 currentlines += (b2 - b1) - (a2 - a1)
1378 arglist.append((rev, a1, a2, b1, b2))
1383 arglist.append((rev, a1, a2, b1, b2))
1379
1384
1380 def d():
1385 def d():
1381 ll = linelog.linelog()
1386 ll = linelog.linelog()
1382 for args in arglist:
1387 for args in arglist:
1383 ll.replacelines(*args)
1388 ll.replacelines(*args)
1384
1389
1385 timer, fm = gettimer(ui, opts)
1390 timer, fm = gettimer(ui, opts)
1386 timer(d)
1391 timer(d)
1387 fm.end()
1392 fm.end()
1388
1393
1389 @command(b'perfrevrange', formatteropts)
1394 @command(b'perfrevrange', formatteropts)
1390 def perfrevrange(ui, repo, *specs, **opts):
1395 def perfrevrange(ui, repo, *specs, **opts):
1391 opts = _byteskwargs(opts)
1396 opts = _byteskwargs(opts)
1392 timer, fm = gettimer(ui, opts)
1397 timer, fm = gettimer(ui, opts)
1393 revrange = scmutil.revrange
1398 revrange = scmutil.revrange
1394 timer(lambda: len(revrange(repo, specs)))
1399 timer(lambda: len(revrange(repo, specs)))
1395 fm.end()
1400 fm.end()
1396
1401
1397 @command(b'perfnodelookup', formatteropts)
1402 @command(b'perfnodelookup', formatteropts)
1398 def perfnodelookup(ui, repo, rev, **opts):
1403 def perfnodelookup(ui, repo, rev, **opts):
1399 opts = _byteskwargs(opts)
1404 opts = _byteskwargs(opts)
1400 timer, fm = gettimer(ui, opts)
1405 timer, fm = gettimer(ui, opts)
1401 import mercurial.revlog
1406 import mercurial.revlog
1402 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1407 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1403 n = scmutil.revsingle(repo, rev).node()
1408 n = scmutil.revsingle(repo, rev).node()
1404 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1409 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1405 def d():
1410 def d():
1406 cl.rev(n)
1411 cl.rev(n)
1407 clearcaches(cl)
1412 clearcaches(cl)
1408 timer(d)
1413 timer(d)
1409 fm.end()
1414 fm.end()
1410
1415
1411 @command(b'perflog',
1416 @command(b'perflog',
1412 [(b'', b'rename', False, b'ask log to follow renames')
1417 [(b'', b'rename', False, b'ask log to follow renames')
1413 ] + formatteropts)
1418 ] + formatteropts)
1414 def perflog(ui, repo, rev=None, **opts):
1419 def perflog(ui, repo, rev=None, **opts):
1415 opts = _byteskwargs(opts)
1420 opts = _byteskwargs(opts)
1416 if rev is None:
1421 if rev is None:
1417 rev=[]
1422 rev=[]
1418 timer, fm = gettimer(ui, opts)
1423 timer, fm = gettimer(ui, opts)
1419 ui.pushbuffer()
1424 ui.pushbuffer()
1420 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1425 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1421 copies=opts.get(b'rename')))
1426 copies=opts.get(b'rename')))
1422 ui.popbuffer()
1427 ui.popbuffer()
1423 fm.end()
1428 fm.end()
1424
1429
1425 @command(b'perfmoonwalk', formatteropts)
1430 @command(b'perfmoonwalk', formatteropts)
1426 def perfmoonwalk(ui, repo, **opts):
1431 def perfmoonwalk(ui, repo, **opts):
1427 """benchmark walking the changelog backwards
1432 """benchmark walking the changelog backwards
1428
1433
1429 This also loads the changelog data for each revision in the changelog.
1434 This also loads the changelog data for each revision in the changelog.
1430 """
1435 """
1431 opts = _byteskwargs(opts)
1436 opts = _byteskwargs(opts)
1432 timer, fm = gettimer(ui, opts)
1437 timer, fm = gettimer(ui, opts)
1433 def moonwalk():
1438 def moonwalk():
1434 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1439 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1435 ctx = repo[i]
1440 ctx = repo[i]
1436 ctx.branch() # read changelog data (in addition to the index)
1441 ctx.branch() # read changelog data (in addition to the index)
1437 timer(moonwalk)
1442 timer(moonwalk)
1438 fm.end()
1443 fm.end()
1439
1444
1440 @command(b'perftemplating',
1445 @command(b'perftemplating',
1441 [(b'r', b'rev', [], b'revisions to run the template on'),
1446 [(b'r', b'rev', [], b'revisions to run the template on'),
1442 ] + formatteropts)
1447 ] + formatteropts)
1443 def perftemplating(ui, repo, testedtemplate=None, **opts):
1448 def perftemplating(ui, repo, testedtemplate=None, **opts):
1444 """test the rendering time of a given template"""
1449 """test the rendering time of a given template"""
1445 if makelogtemplater is None:
1450 if makelogtemplater is None:
1446 raise error.Abort((b"perftemplating not available with this Mercurial"),
1451 raise error.Abort((b"perftemplating not available with this Mercurial"),
1447 hint=b"use 4.3 or later")
1452 hint=b"use 4.3 or later")
1448
1453
1449 opts = _byteskwargs(opts)
1454 opts = _byteskwargs(opts)
1450
1455
1451 nullui = ui.copy()
1456 nullui = ui.copy()
1452 nullui.fout = open(os.devnull, r'wb')
1457 nullui.fout = open(os.devnull, r'wb')
1453 nullui.disablepager()
1458 nullui.disablepager()
1454 revs = opts.get(b'rev')
1459 revs = opts.get(b'rev')
1455 if not revs:
1460 if not revs:
1456 revs = [b'all()']
1461 revs = [b'all()']
1457 revs = list(scmutil.revrange(repo, revs))
1462 revs = list(scmutil.revrange(repo, revs))
1458
1463
1459 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1464 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1460 b' {author|person}: {desc|firstline}\n')
1465 b' {author|person}: {desc|firstline}\n')
1461 if testedtemplate is None:
1466 if testedtemplate is None:
1462 testedtemplate = defaulttemplate
1467 testedtemplate = defaulttemplate
1463 displayer = makelogtemplater(nullui, repo, testedtemplate)
1468 displayer = makelogtemplater(nullui, repo, testedtemplate)
1464 def format():
1469 def format():
1465 for r in revs:
1470 for r in revs:
1466 ctx = repo[r]
1471 ctx = repo[r]
1467 displayer.show(ctx)
1472 displayer.show(ctx)
1468 displayer.flush(ctx)
1473 displayer.flush(ctx)
1469
1474
1470 timer, fm = gettimer(ui, opts)
1475 timer, fm = gettimer(ui, opts)
1471 timer(format)
1476 timer(format)
1472 fm.end()
1477 fm.end()
1473
1478
1474 @command(b'perfhelper-mergecopies', formatteropts +
1479 @command(b'perfhelper-mergecopies', formatteropts +
1475 [
1480 [
1476 (b'r', b'revs', [], b'restrict search to these revisions'),
1481 (b'r', b'revs', [], b'restrict search to these revisions'),
1477 (b'', b'timing', False, b'provides extra data (costly)'),
1482 (b'', b'timing', False, b'provides extra data (costly)'),
1478 ])
1483 ])
1479 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1484 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1480 """find statistics about potential parameters for `perfmergecopies`
1485 """find statistics about potential parameters for `perfmergecopies`
1481
1486
1482 This command find (base, p1, p2) triplet relevant for copytracing
1487 This command find (base, p1, p2) triplet relevant for copytracing
1483 benchmarking in the context of a merge. It reports values for some of the
1488 benchmarking in the context of a merge. It reports values for some of the
1484 parameters that impact merge copy tracing time during merge.
1489 parameters that impact merge copy tracing time during merge.
1485
1490
1486 If `--timing` is set, rename detection is run and the associated timing
1491 If `--timing` is set, rename detection is run and the associated timing
1487 will be reported. The extra details come at the cost of slower command
1492 will be reported. The extra details come at the cost of slower command
1488 execution.
1493 execution.
1489
1494
1490 Since rename detection is only run once, other factors might easily
1495 Since rename detection is only run once, other factors might easily
1491 affect the precision of the timing. However it should give a good
1496 affect the precision of the timing. However it should give a good
1492 approximation of which revision triplets are very costly.
1497 approximation of which revision triplets are very costly.
1493 """
1498 """
1494 opts = _byteskwargs(opts)
1499 opts = _byteskwargs(opts)
1495 fm = ui.formatter(b'perf', opts)
1500 fm = ui.formatter(b'perf', opts)
1496 dotiming = opts[b'timing']
1501 dotiming = opts[b'timing']
1497
1502
1498 output_template = [
1503 output_template = [
1499 ("base", "%(base)12s"),
1504 ("base", "%(base)12s"),
1500 ("p1", "%(p1.node)12s"),
1505 ("p1", "%(p1.node)12s"),
1501 ("p2", "%(p2.node)12s"),
1506 ("p2", "%(p2.node)12s"),
1502 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1507 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1503 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1508 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1504 ("p1.renames", "%(p1.renamedfiles)12d"),
1509 ("p1.renames", "%(p1.renamedfiles)12d"),
1505 ("p1.time", "%(p1.time)12.3f"),
1510 ("p1.time", "%(p1.time)12.3f"),
1506 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1511 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1507 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1512 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1508 ("p2.renames", "%(p2.renamedfiles)12d"),
1513 ("p2.renames", "%(p2.renamedfiles)12d"),
1509 ("p2.time", "%(p2.time)12.3f"),
1514 ("p2.time", "%(p2.time)12.3f"),
1510 ("renames", "%(nbrenamedfiles)12d"),
1515 ("renames", "%(nbrenamedfiles)12d"),
1511 ("total.time", "%(time)12.3f"),
1516 ("total.time", "%(time)12.3f"),
1512 ]
1517 ]
1513 if not dotiming:
1518 if not dotiming:
1514 output_template = [i for i in output_template
1519 output_template = [i for i in output_template
1515 if not ('time' in i[0] or 'renames' in i[0])]
1520 if not ('time' in i[0] or 'renames' in i[0])]
1516 header_names = [h for (h, v) in output_template]
1521 header_names = [h for (h, v) in output_template]
1517 output = ' '.join([v for (h, v) in output_template]) + '\n'
1522 output = ' '.join([v for (h, v) in output_template]) + '\n'
1518 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1523 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1519 fm.plain(header % tuple(header_names))
1524 fm.plain(header % tuple(header_names))
1520
1525
1521 if not revs:
1526 if not revs:
1522 revs = ['all()']
1527 revs = ['all()']
1523 revs = scmutil.revrange(repo, revs)
1528 revs = scmutil.revrange(repo, revs)
1524
1529
1525 roi = repo.revs('merge() and %ld', revs)
1530 roi = repo.revs('merge() and %ld', revs)
1526 for r in roi:
1531 for r in roi:
1527 ctx = repo[r]
1532 ctx = repo[r]
1528 p1 = ctx.p1()
1533 p1 = ctx.p1()
1529 p2 = ctx.p2()
1534 p2 = ctx.p2()
1530 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1535 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1531 for b in bases:
1536 for b in bases:
1532 b = repo[b]
1537 b = repo[b]
1533 p1missing = copies._computeforwardmissing(b, p1)
1538 p1missing = copies._computeforwardmissing(b, p1)
1534 p2missing = copies._computeforwardmissing(b, p2)
1539 p2missing = copies._computeforwardmissing(b, p2)
1535 data = {
1540 data = {
1536 b'base': b.hex(),
1541 b'base': b.hex(),
1537 b'p1.node': p1.hex(),
1542 b'p1.node': p1.hex(),
1538 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1543 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1539 b'p1.nbmissingfiles': len(p1missing),
1544 b'p1.nbmissingfiles': len(p1missing),
1540 b'p2.node': p2.hex(),
1545 b'p2.node': p2.hex(),
1541 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1546 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1542 b'p2.nbmissingfiles': len(p2missing),
1547 b'p2.nbmissingfiles': len(p2missing),
1543 }
1548 }
1544 if dotiming:
1549 if dotiming:
1545 begin = util.timer()
1550 begin = util.timer()
1546 mergedata = copies.mergecopies(repo, p1, p2, b)
1551 mergedata = copies.mergecopies(repo, p1, p2, b)
1547 end = util.timer()
1552 end = util.timer()
1548 # not very stable timing since we did only one run
1553 # not very stable timing since we did only one run
1549 data['time'] = end - begin
1554 data['time'] = end - begin
1550 # mergedata contains five dicts: "copy", "movewithdir",
1555 # mergedata contains five dicts: "copy", "movewithdir",
1551 # "diverge", "renamedelete" and "dirmove".
1556 # "diverge", "renamedelete" and "dirmove".
1552 # The first 4 are about renamed file so lets count that.
1557 # The first 4 are about renamed file so lets count that.
1553 renames = len(mergedata[0])
1558 renames = len(mergedata[0])
1554 renames += len(mergedata[1])
1559 renames += len(mergedata[1])
1555 renames += len(mergedata[2])
1560 renames += len(mergedata[2])
1556 renames += len(mergedata[3])
1561 renames += len(mergedata[3])
1557 data['nbrenamedfiles'] = renames
1562 data['nbrenamedfiles'] = renames
1558 begin = util.timer()
1563 begin = util.timer()
1559 p1renames = copies.pathcopies(b, p1)
1564 p1renames = copies.pathcopies(b, p1)
1560 end = util.timer()
1565 end = util.timer()
1561 data['p1.time'] = end - begin
1566 data['p1.time'] = end - begin
1562 begin = util.timer()
1567 begin = util.timer()
1563 p2renames = copies.pathcopies(b, p2)
1568 p2renames = copies.pathcopies(b, p2)
1564 data['p2.time'] = end - begin
1569 data['p2.time'] = end - begin
1565 end = util.timer()
1570 end = util.timer()
1566 data['p1.renamedfiles'] = len(p1renames)
1571 data['p1.renamedfiles'] = len(p1renames)
1567 data['p2.renamedfiles'] = len(p2renames)
1572 data['p2.renamedfiles'] = len(p2renames)
1568 fm.startitem()
1573 fm.startitem()
1569 fm.data(**data)
1574 fm.data(**data)
1570 # make node pretty for the human output
1575 # make node pretty for the human output
1571 out = data.copy()
1576 out = data.copy()
1572 out['base'] = fm.hexfunc(b.node())
1577 out['base'] = fm.hexfunc(b.node())
1573 out['p1.node'] = fm.hexfunc(p1.node())
1578 out['p1.node'] = fm.hexfunc(p1.node())
1574 out['p2.node'] = fm.hexfunc(p2.node())
1579 out['p2.node'] = fm.hexfunc(p2.node())
1575 fm.plain(output % out)
1580 fm.plain(output % out)
1576
1581
1577 fm.end()
1582 fm.end()
1578
1583
1579 @command(b'perfhelper-pathcopies', formatteropts +
1584 @command(b'perfhelper-pathcopies', formatteropts +
1580 [
1585 [
1581 (b'r', b'revs', [], b'restrict search to these revisions'),
1586 (b'r', b'revs', [], b'restrict search to these revisions'),
1582 (b'', b'timing', False, b'provides extra data (costly)'),
1587 (b'', b'timing', False, b'provides extra data (costly)'),
1583 ])
1588 ])
1584 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1589 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1585 """find statistic about potential parameters for the `perftracecopies`
1590 """find statistic about potential parameters for the `perftracecopies`
1586
1591
1587 This command find source-destination pair relevant for copytracing testing.
1592 This command find source-destination pair relevant for copytracing testing.
1588 It report value for some of the parameters that impact copy tracing time.
1593 It report value for some of the parameters that impact copy tracing time.
1589
1594
1590 If `--timing` is set, rename detection is run and the associated timing
1595 If `--timing` is set, rename detection is run and the associated timing
1591 will be reported. The extra details comes at the cost of a slower command
1596 will be reported. The extra details comes at the cost of a slower command
1592 execution.
1597 execution.
1593
1598
1594 Since the rename detection is only run once, other factors might easily
1599 Since the rename detection is only run once, other factors might easily
1595 affect the precision of the timing. However it should give a good
1600 affect the precision of the timing. However it should give a good
1596 approximation of which revision pairs are very costly.
1601 approximation of which revision pairs are very costly.
1597 """
1602 """
1598 opts = _byteskwargs(opts)
1603 opts = _byteskwargs(opts)
1599 fm = ui.formatter(b'perf', opts)
1604 fm = ui.formatter(b'perf', opts)
1600 dotiming = opts[b'timing']
1605 dotiming = opts[b'timing']
1601
1606
1602 if dotiming:
1607 if dotiming:
1603 header = '%12s %12s %12s %12s %12s %12s\n'
1608 header = '%12s %12s %12s %12s %12s %12s\n'
1604 output = ("%(source)12s %(destination)12s "
1609 output = ("%(source)12s %(destination)12s "
1605 "%(nbrevs)12d %(nbmissingfiles)12d "
1610 "%(nbrevs)12d %(nbmissingfiles)12d "
1606 "%(nbrenamedfiles)12d %(time)18.5f\n")
1611 "%(nbrenamedfiles)12d %(time)18.5f\n")
1607 header_names = ("source", "destination", "nb-revs", "nb-files",
1612 header_names = ("source", "destination", "nb-revs", "nb-files",
1608 "nb-renames", "time")
1613 "nb-renames", "time")
1609 fm.plain(header % header_names)
1614 fm.plain(header % header_names)
1610 else:
1615 else:
1611 header = '%12s %12s %12s %12s\n'
1616 header = '%12s %12s %12s %12s\n'
1612 output = ("%(source)12s %(destination)12s "
1617 output = ("%(source)12s %(destination)12s "
1613 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1618 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1614 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1619 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1615
1620
1616 if not revs:
1621 if not revs:
1617 revs = ['all()']
1622 revs = ['all()']
1618 revs = scmutil.revrange(repo, revs)
1623 revs = scmutil.revrange(repo, revs)
1619
1624
1620 roi = repo.revs('merge() and %ld', revs)
1625 roi = repo.revs('merge() and %ld', revs)
1621 for r in roi:
1626 for r in roi:
1622 ctx = repo[r]
1627 ctx = repo[r]
1623 p1 = ctx.p1().rev()
1628 p1 = ctx.p1().rev()
1624 p2 = ctx.p2().rev()
1629 p2 = ctx.p2().rev()
1625 bases = repo.changelog._commonancestorsheads(p1, p2)
1630 bases = repo.changelog._commonancestorsheads(p1, p2)
1626 for p in (p1, p2):
1631 for p in (p1, p2):
1627 for b in bases:
1632 for b in bases:
1628 base = repo[b]
1633 base = repo[b]
1629 parent = repo[p]
1634 parent = repo[p]
1630 missing = copies._computeforwardmissing(base, parent)
1635 missing = copies._computeforwardmissing(base, parent)
1631 if not missing:
1636 if not missing:
1632 continue
1637 continue
1633 data = {
1638 data = {
1634 b'source': base.hex(),
1639 b'source': base.hex(),
1635 b'destination': parent.hex(),
1640 b'destination': parent.hex(),
1636 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1641 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1637 b'nbmissingfiles': len(missing),
1642 b'nbmissingfiles': len(missing),
1638 }
1643 }
1639 if dotiming:
1644 if dotiming:
1640 begin = util.timer()
1645 begin = util.timer()
1641 renames = copies.pathcopies(base, parent)
1646 renames = copies.pathcopies(base, parent)
1642 end = util.timer()
1647 end = util.timer()
1643 # not very stable timing since we did only one run
1648 # not very stable timing since we did only one run
1644 data['time'] = end - begin
1649 data['time'] = end - begin
1645 data['nbrenamedfiles'] = len(renames)
1650 data['nbrenamedfiles'] = len(renames)
1646 fm.startitem()
1651 fm.startitem()
1647 fm.data(**data)
1652 fm.data(**data)
1648 out = data.copy()
1653 out = data.copy()
1649 out['source'] = fm.hexfunc(base.node())
1654 out['source'] = fm.hexfunc(base.node())
1650 out['destination'] = fm.hexfunc(parent.node())
1655 out['destination'] = fm.hexfunc(parent.node())
1651 fm.plain(output % out)
1656 fm.plain(output % out)
1652
1657
1653 fm.end()
1658 fm.end()
1654
1659
1655 @command(b'perfcca', formatteropts)
1660 @command(b'perfcca', formatteropts)
1656 def perfcca(ui, repo, **opts):
1661 def perfcca(ui, repo, **opts):
1657 opts = _byteskwargs(opts)
1662 opts = _byteskwargs(opts)
1658 timer, fm = gettimer(ui, opts)
1663 timer, fm = gettimer(ui, opts)
1659 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1664 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1660 fm.end()
1665 fm.end()
1661
1666
1662 @command(b'perffncacheload', formatteropts)
1667 @command(b'perffncacheload', formatteropts)
1663 def perffncacheload(ui, repo, **opts):
1668 def perffncacheload(ui, repo, **opts):
1664 opts = _byteskwargs(opts)
1669 opts = _byteskwargs(opts)
1665 timer, fm = gettimer(ui, opts)
1670 timer, fm = gettimer(ui, opts)
1666 s = repo.store
1671 s = repo.store
1667 def d():
1672 def d():
1668 s.fncache._load()
1673 s.fncache._load()
1669 timer(d)
1674 timer(d)
1670 fm.end()
1675 fm.end()
1671
1676
1672 @command(b'perffncachewrite', formatteropts)
1677 @command(b'perffncachewrite', formatteropts)
1673 def perffncachewrite(ui, repo, **opts):
1678 def perffncachewrite(ui, repo, **opts):
1674 opts = _byteskwargs(opts)
1679 opts = _byteskwargs(opts)
1675 timer, fm = gettimer(ui, opts)
1680 timer, fm = gettimer(ui, opts)
1676 s = repo.store
1681 s = repo.store
1677 lock = repo.lock()
1682 lock = repo.lock()
1678 s.fncache._load()
1683 s.fncache._load()
1679 tr = repo.transaction(b'perffncachewrite')
1684 tr = repo.transaction(b'perffncachewrite')
1680 tr.addbackup(b'fncache')
1685 tr.addbackup(b'fncache')
1681 def d():
1686 def d():
1682 s.fncache._dirty = True
1687 s.fncache._dirty = True
1683 s.fncache.write(tr)
1688 s.fncache.write(tr)
1684 timer(d)
1689 timer(d)
1685 tr.close()
1690 tr.close()
1686 lock.release()
1691 lock.release()
1687 fm.end()
1692 fm.end()
1688
1693
1689 @command(b'perffncacheencode', formatteropts)
1694 @command(b'perffncacheencode', formatteropts)
1690 def perffncacheencode(ui, repo, **opts):
1695 def perffncacheencode(ui, repo, **opts):
1691 opts = _byteskwargs(opts)
1696 opts = _byteskwargs(opts)
1692 timer, fm = gettimer(ui, opts)
1697 timer, fm = gettimer(ui, opts)
1693 s = repo.store
1698 s = repo.store
1694 s.fncache._load()
1699 s.fncache._load()
1695 def d():
1700 def d():
1696 for p in s.fncache.entries:
1701 for p in s.fncache.entries:
1697 s.encode(p)
1702 s.encode(p)
1698 timer(d)
1703 timer(d)
1699 fm.end()
1704 fm.end()
1700
1705
1701 def _bdiffworker(q, blocks, xdiff, ready, done):
1706 def _bdiffworker(q, blocks, xdiff, ready, done):
1702 while not done.is_set():
1707 while not done.is_set():
1703 pair = q.get()
1708 pair = q.get()
1704 while pair is not None:
1709 while pair is not None:
1705 if xdiff:
1710 if xdiff:
1706 mdiff.bdiff.xdiffblocks(*pair)
1711 mdiff.bdiff.xdiffblocks(*pair)
1707 elif blocks:
1712 elif blocks:
1708 mdiff.bdiff.blocks(*pair)
1713 mdiff.bdiff.blocks(*pair)
1709 else:
1714 else:
1710 mdiff.textdiff(*pair)
1715 mdiff.textdiff(*pair)
1711 q.task_done()
1716 q.task_done()
1712 pair = q.get()
1717 pair = q.get()
1713 q.task_done() # for the None one
1718 q.task_done() # for the None one
1714 with ready:
1719 with ready:
1715 ready.wait()
1720 ready.wait()
1716
1721
1717 def _manifestrevision(repo, mnode):
1722 def _manifestrevision(repo, mnode):
1718 ml = repo.manifestlog
1723 ml = repo.manifestlog
1719
1724
1720 if util.safehasattr(ml, b'getstorage'):
1725 if util.safehasattr(ml, b'getstorage'):
1721 store = ml.getstorage(b'')
1726 store = ml.getstorage(b'')
1722 else:
1727 else:
1723 store = ml._revlog
1728 store = ml._revlog
1724
1729
1725 return store.revision(mnode)
1730 return store.revision(mnode)
1726
1731
1727 @command(b'perfbdiff', revlogopts + formatteropts + [
1732 @command(b'perfbdiff', revlogopts + formatteropts + [
1728 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1733 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1729 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1734 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1730 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1735 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1731 (b'', b'blocks', False, b'test computing diffs into blocks'),
1736 (b'', b'blocks', False, b'test computing diffs into blocks'),
1732 (b'', b'xdiff', False, b'use xdiff algorithm'),
1737 (b'', b'xdiff', False, b'use xdiff algorithm'),
1733 ],
1738 ],
1734
1739
1735 b'-c|-m|FILE REV')
1740 b'-c|-m|FILE REV')
1736 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1741 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1737 """benchmark a bdiff between revisions
1742 """benchmark a bdiff between revisions
1738
1743
1739 By default, benchmark a bdiff between its delta parent and itself.
1744 By default, benchmark a bdiff between its delta parent and itself.
1740
1745
1741 With ``--count``, benchmark bdiffs between delta parents and self for N
1746 With ``--count``, benchmark bdiffs between delta parents and self for N
1742 revisions starting at the specified revision.
1747 revisions starting at the specified revision.
1743
1748
1744 With ``--alldata``, assume the requested revision is a changeset and
1749 With ``--alldata``, assume the requested revision is a changeset and
1745 measure bdiffs for all changes related to that changeset (manifest
1750 measure bdiffs for all changes related to that changeset (manifest
1746 and filelogs).
1751 and filelogs).
1747 """
1752 """
1748 opts = _byteskwargs(opts)
1753 opts = _byteskwargs(opts)
1749
1754
1750 if opts[b'xdiff'] and not opts[b'blocks']:
1755 if opts[b'xdiff'] and not opts[b'blocks']:
1751 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1756 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1752
1757
1753 if opts[b'alldata']:
1758 if opts[b'alldata']:
1754 opts[b'changelog'] = True
1759 opts[b'changelog'] = True
1755
1760
1756 if opts.get(b'changelog') or opts.get(b'manifest'):
1761 if opts.get(b'changelog') or opts.get(b'manifest'):
1757 file_, rev = None, file_
1762 file_, rev = None, file_
1758 elif rev is None:
1763 elif rev is None:
1759 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1764 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1760
1765
1761 blocks = opts[b'blocks']
1766 blocks = opts[b'blocks']
1762 xdiff = opts[b'xdiff']
1767 xdiff = opts[b'xdiff']
1763 textpairs = []
1768 textpairs = []
1764
1769
1765 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1770 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1766
1771
1767 startrev = r.rev(r.lookup(rev))
1772 startrev = r.rev(r.lookup(rev))
1768 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1773 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1769 if opts[b'alldata']:
1774 if opts[b'alldata']:
1770 # Load revisions associated with changeset.
1775 # Load revisions associated with changeset.
1771 ctx = repo[rev]
1776 ctx = repo[rev]
1772 mtext = _manifestrevision(repo, ctx.manifestnode())
1777 mtext = _manifestrevision(repo, ctx.manifestnode())
1773 for pctx in ctx.parents():
1778 for pctx in ctx.parents():
1774 pman = _manifestrevision(repo, pctx.manifestnode())
1779 pman = _manifestrevision(repo, pctx.manifestnode())
1775 textpairs.append((pman, mtext))
1780 textpairs.append((pman, mtext))
1776
1781
1777 # Load filelog revisions by iterating manifest delta.
1782 # Load filelog revisions by iterating manifest delta.
1778 man = ctx.manifest()
1783 man = ctx.manifest()
1779 pman = ctx.p1().manifest()
1784 pman = ctx.p1().manifest()
1780 for filename, change in pman.diff(man).items():
1785 for filename, change in pman.diff(man).items():
1781 fctx = repo.file(filename)
1786 fctx = repo.file(filename)
1782 f1 = fctx.revision(change[0][0] or -1)
1787 f1 = fctx.revision(change[0][0] or -1)
1783 f2 = fctx.revision(change[1][0] or -1)
1788 f2 = fctx.revision(change[1][0] or -1)
1784 textpairs.append((f1, f2))
1789 textpairs.append((f1, f2))
1785 else:
1790 else:
1786 dp = r.deltaparent(rev)
1791 dp = r.deltaparent(rev)
1787 textpairs.append((r.revision(dp), r.revision(rev)))
1792 textpairs.append((r.revision(dp), r.revision(rev)))
1788
1793
1789 withthreads = threads > 0
1794 withthreads = threads > 0
1790 if not withthreads:
1795 if not withthreads:
1791 def d():
1796 def d():
1792 for pair in textpairs:
1797 for pair in textpairs:
1793 if xdiff:
1798 if xdiff:
1794 mdiff.bdiff.xdiffblocks(*pair)
1799 mdiff.bdiff.xdiffblocks(*pair)
1795 elif blocks:
1800 elif blocks:
1796 mdiff.bdiff.blocks(*pair)
1801 mdiff.bdiff.blocks(*pair)
1797 else:
1802 else:
1798 mdiff.textdiff(*pair)
1803 mdiff.textdiff(*pair)
1799 else:
1804 else:
1800 q = queue()
1805 q = queue()
1801 for i in _xrange(threads):
1806 for i in _xrange(threads):
1802 q.put(None)
1807 q.put(None)
1803 ready = threading.Condition()
1808 ready = threading.Condition()
1804 done = threading.Event()
1809 done = threading.Event()
1805 for i in _xrange(threads):
1810 for i in _xrange(threads):
1806 threading.Thread(target=_bdiffworker,
1811 threading.Thread(target=_bdiffworker,
1807 args=(q, blocks, xdiff, ready, done)).start()
1812 args=(q, blocks, xdiff, ready, done)).start()
1808 q.join()
1813 q.join()
1809 def d():
1814 def d():
1810 for pair in textpairs:
1815 for pair in textpairs:
1811 q.put(pair)
1816 q.put(pair)
1812 for i in _xrange(threads):
1817 for i in _xrange(threads):
1813 q.put(None)
1818 q.put(None)
1814 with ready:
1819 with ready:
1815 ready.notify_all()
1820 ready.notify_all()
1816 q.join()
1821 q.join()
1817 timer, fm = gettimer(ui, opts)
1822 timer, fm = gettimer(ui, opts)
1818 timer(d)
1823 timer(d)
1819 fm.end()
1824 fm.end()
1820
1825
1821 if withthreads:
1826 if withthreads:
1822 done.set()
1827 done.set()
1823 for i in _xrange(threads):
1828 for i in _xrange(threads):
1824 q.put(None)
1829 q.put(None)
1825 with ready:
1830 with ready:
1826 ready.notify_all()
1831 ready.notify_all()
1827
1832
1828 @command(b'perfunidiff', revlogopts + formatteropts + [
1833 @command(b'perfunidiff', revlogopts + formatteropts + [
1829 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1834 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1830 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1835 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1831 ], b'-c|-m|FILE REV')
1836 ], b'-c|-m|FILE REV')
1832 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1837 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1833 """benchmark a unified diff between revisions
1838 """benchmark a unified diff between revisions
1834
1839
1835 This doesn't include any copy tracing - it's just a unified diff
1840 This doesn't include any copy tracing - it's just a unified diff
1836 of the texts.
1841 of the texts.
1837
1842
1838 By default, benchmark a diff between its delta parent and itself.
1843 By default, benchmark a diff between its delta parent and itself.
1839
1844
1840 With ``--count``, benchmark diffs between delta parents and self for N
1845 With ``--count``, benchmark diffs between delta parents and self for N
1841 revisions starting at the specified revision.
1846 revisions starting at the specified revision.
1842
1847
1843 With ``--alldata``, assume the requested revision is a changeset and
1848 With ``--alldata``, assume the requested revision is a changeset and
1844 measure diffs for all changes related to that changeset (manifest
1849 measure diffs for all changes related to that changeset (manifest
1845 and filelogs).
1850 and filelogs).
1846 """
1851 """
1847 opts = _byteskwargs(opts)
1852 opts = _byteskwargs(opts)
1848 if opts[b'alldata']:
1853 if opts[b'alldata']:
1849 opts[b'changelog'] = True
1854 opts[b'changelog'] = True
1850
1855
1851 if opts.get(b'changelog') or opts.get(b'manifest'):
1856 if opts.get(b'changelog') or opts.get(b'manifest'):
1852 file_, rev = None, file_
1857 file_, rev = None, file_
1853 elif rev is None:
1858 elif rev is None:
1854 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1859 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1855
1860
1856 textpairs = []
1861 textpairs = []
1857
1862
1858 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1863 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1859
1864
1860 startrev = r.rev(r.lookup(rev))
1865 startrev = r.rev(r.lookup(rev))
1861 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1866 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1862 if opts[b'alldata']:
1867 if opts[b'alldata']:
1863 # Load revisions associated with changeset.
1868 # Load revisions associated with changeset.
1864 ctx = repo[rev]
1869 ctx = repo[rev]
1865 mtext = _manifestrevision(repo, ctx.manifestnode())
1870 mtext = _manifestrevision(repo, ctx.manifestnode())
1866 for pctx in ctx.parents():
1871 for pctx in ctx.parents():
1867 pman = _manifestrevision(repo, pctx.manifestnode())
1872 pman = _manifestrevision(repo, pctx.manifestnode())
1868 textpairs.append((pman, mtext))
1873 textpairs.append((pman, mtext))
1869
1874
1870 # Load filelog revisions by iterating manifest delta.
1875 # Load filelog revisions by iterating manifest delta.
1871 man = ctx.manifest()
1876 man = ctx.manifest()
1872 pman = ctx.p1().manifest()
1877 pman = ctx.p1().manifest()
1873 for filename, change in pman.diff(man).items():
1878 for filename, change in pman.diff(man).items():
1874 fctx = repo.file(filename)
1879 fctx = repo.file(filename)
1875 f1 = fctx.revision(change[0][0] or -1)
1880 f1 = fctx.revision(change[0][0] or -1)
1876 f2 = fctx.revision(change[1][0] or -1)
1881 f2 = fctx.revision(change[1][0] or -1)
1877 textpairs.append((f1, f2))
1882 textpairs.append((f1, f2))
1878 else:
1883 else:
1879 dp = r.deltaparent(rev)
1884 dp = r.deltaparent(rev)
1880 textpairs.append((r.revision(dp), r.revision(rev)))
1885 textpairs.append((r.revision(dp), r.revision(rev)))
1881
1886
1882 def d():
1887 def d():
1883 for left, right in textpairs:
1888 for left, right in textpairs:
1884 # The date strings don't matter, so we pass empty strings.
1889 # The date strings don't matter, so we pass empty strings.
1885 headerlines, hunks = mdiff.unidiff(
1890 headerlines, hunks = mdiff.unidiff(
1886 left, b'', right, b'', b'left', b'right', binary=False)
1891 left, b'', right, b'', b'left', b'right', binary=False)
1887 # consume iterators in roughly the way patch.py does
1892 # consume iterators in roughly the way patch.py does
1888 b'\n'.join(headerlines)
1893 b'\n'.join(headerlines)
1889 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1894 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1890 timer, fm = gettimer(ui, opts)
1895 timer, fm = gettimer(ui, opts)
1891 timer(d)
1896 timer(d)
1892 fm.end()
1897 fm.end()
1893
1898
1894 @command(b'perfdiffwd', formatteropts)
1899 @command(b'perfdiffwd', formatteropts)
1895 def perfdiffwd(ui, repo, **opts):
1900 def perfdiffwd(ui, repo, **opts):
1896 """Profile diff of working directory changes"""
1901 """Profile diff of working directory changes"""
1897 opts = _byteskwargs(opts)
1902 opts = _byteskwargs(opts)
1898 timer, fm = gettimer(ui, opts)
1903 timer, fm = gettimer(ui, opts)
1899 options = {
1904 options = {
1900 'w': 'ignore_all_space',
1905 'w': 'ignore_all_space',
1901 'b': 'ignore_space_change',
1906 'b': 'ignore_space_change',
1902 'B': 'ignore_blank_lines',
1907 'B': 'ignore_blank_lines',
1903 }
1908 }
1904
1909
1905 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1910 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1906 opts = dict((options[c], b'1') for c in diffopt)
1911 opts = dict((options[c], b'1') for c in diffopt)
1907 def d():
1912 def d():
1908 ui.pushbuffer()
1913 ui.pushbuffer()
1909 commands.diff(ui, repo, **opts)
1914 commands.diff(ui, repo, **opts)
1910 ui.popbuffer()
1915 ui.popbuffer()
1911 diffopt = diffopt.encode('ascii')
1916 diffopt = diffopt.encode('ascii')
1912 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1917 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1913 timer(d, title=title)
1918 timer(d, title=title)
1914 fm.end()
1919 fm.end()
1915
1920
1916 @command(b'perfrevlogindex', revlogopts + formatteropts,
1921 @command(b'perfrevlogindex', revlogopts + formatteropts,
1917 b'-c|-m|FILE')
1922 b'-c|-m|FILE')
1918 def perfrevlogindex(ui, repo, file_=None, **opts):
1923 def perfrevlogindex(ui, repo, file_=None, **opts):
1919 """Benchmark operations against a revlog index.
1924 """Benchmark operations against a revlog index.
1920
1925
1921 This tests constructing a revlog instance, reading index data,
1926 This tests constructing a revlog instance, reading index data,
1922 parsing index data, and performing various operations related to
1927 parsing index data, and performing various operations related to
1923 index data.
1928 index data.
1924 """
1929 """
1925
1930
1926 opts = _byteskwargs(opts)
1931 opts = _byteskwargs(opts)
1927
1932
1928 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1933 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1929
1934
1930 opener = getattr(rl, 'opener') # trick linter
1935 opener = getattr(rl, 'opener') # trick linter
1931 indexfile = rl.indexfile
1936 indexfile = rl.indexfile
1932 data = opener.read(indexfile)
1937 data = opener.read(indexfile)
1933
1938
1934 header = struct.unpack(b'>I', data[0:4])[0]
1939 header = struct.unpack(b'>I', data[0:4])[0]
1935 version = header & 0xFFFF
1940 version = header & 0xFFFF
1936 if version == 1:
1941 if version == 1:
1937 revlogio = revlog.revlogio()
1942 revlogio = revlog.revlogio()
1938 inline = header & (1 << 16)
1943 inline = header & (1 << 16)
1939 else:
1944 else:
1940 raise error.Abort((b'unsupported revlog version: %d') % version)
1945 raise error.Abort((b'unsupported revlog version: %d') % version)
1941
1946
1942 rllen = len(rl)
1947 rllen = len(rl)
1943
1948
1944 node0 = rl.node(0)
1949 node0 = rl.node(0)
1945 node25 = rl.node(rllen // 4)
1950 node25 = rl.node(rllen // 4)
1946 node50 = rl.node(rllen // 2)
1951 node50 = rl.node(rllen // 2)
1947 node75 = rl.node(rllen // 4 * 3)
1952 node75 = rl.node(rllen // 4 * 3)
1948 node100 = rl.node(rllen - 1)
1953 node100 = rl.node(rllen - 1)
1949
1954
1950 allrevs = range(rllen)
1955 allrevs = range(rllen)
1951 allrevsrev = list(reversed(allrevs))
1956 allrevsrev = list(reversed(allrevs))
1952 allnodes = [rl.node(rev) for rev in range(rllen)]
1957 allnodes = [rl.node(rev) for rev in range(rllen)]
1953 allnodesrev = list(reversed(allnodes))
1958 allnodesrev = list(reversed(allnodes))
1954
1959
1955 def constructor():
1960 def constructor():
1956 revlog.revlog(opener, indexfile)
1961 revlog.revlog(opener, indexfile)
1957
1962
1958 def read():
1963 def read():
1959 with opener(indexfile) as fh:
1964 with opener(indexfile) as fh:
1960 fh.read()
1965 fh.read()
1961
1966
1962 def parseindex():
1967 def parseindex():
1963 revlogio.parseindex(data, inline)
1968 revlogio.parseindex(data, inline)
1964
1969
1965 def getentry(revornode):
1970 def getentry(revornode):
1966 index = revlogio.parseindex(data, inline)[0]
1971 index = revlogio.parseindex(data, inline)[0]
1967 index[revornode]
1972 index[revornode]
1968
1973
1969 def getentries(revs, count=1):
1974 def getentries(revs, count=1):
1970 index = revlogio.parseindex(data, inline)[0]
1975 index = revlogio.parseindex(data, inline)[0]
1971
1976
1972 for i in range(count):
1977 for i in range(count):
1973 for rev in revs:
1978 for rev in revs:
1974 index[rev]
1979 index[rev]
1975
1980
1976 def resolvenode(node):
1981 def resolvenode(node):
1977 nodemap = revlogio.parseindex(data, inline)[1]
1982 nodemap = revlogio.parseindex(data, inline)[1]
1978 # This only works for the C code.
1983 # This only works for the C code.
1979 if nodemap is None:
1984 if nodemap is None:
1980 return
1985 return
1981
1986
1982 try:
1987 try:
1983 nodemap[node]
1988 nodemap[node]
1984 except error.RevlogError:
1989 except error.RevlogError:
1985 pass
1990 pass
1986
1991
1987 def resolvenodes(nodes, count=1):
1992 def resolvenodes(nodes, count=1):
1988 nodemap = revlogio.parseindex(data, inline)[1]
1993 nodemap = revlogio.parseindex(data, inline)[1]
1989 if nodemap is None:
1994 if nodemap is None:
1990 return
1995 return
1991
1996
1992 for i in range(count):
1997 for i in range(count):
1993 for node in nodes:
1998 for node in nodes:
1994 try:
1999 try:
1995 nodemap[node]
2000 nodemap[node]
1996 except error.RevlogError:
2001 except error.RevlogError:
1997 pass
2002 pass
1998
2003
1999 benches = [
2004 benches = [
2000 (constructor, b'revlog constructor'),
2005 (constructor, b'revlog constructor'),
2001 (read, b'read'),
2006 (read, b'read'),
2002 (parseindex, b'create index object'),
2007 (parseindex, b'create index object'),
2003 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2008 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2004 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2009 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2005 (lambda: resolvenode(node0), b'look up node at rev 0'),
2010 (lambda: resolvenode(node0), b'look up node at rev 0'),
2006 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2011 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2007 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2012 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2008 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2013 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2009 (lambda: resolvenode(node100), b'look up node at tip'),
2014 (lambda: resolvenode(node100), b'look up node at tip'),
2010 # 2x variation is to measure caching impact.
2015 # 2x variation is to measure caching impact.
2011 (lambda: resolvenodes(allnodes),
2016 (lambda: resolvenodes(allnodes),
2012 b'look up all nodes (forward)'),
2017 b'look up all nodes (forward)'),
2013 (lambda: resolvenodes(allnodes, 2),
2018 (lambda: resolvenodes(allnodes, 2),
2014 b'look up all nodes 2x (forward)'),
2019 b'look up all nodes 2x (forward)'),
2015 (lambda: resolvenodes(allnodesrev),
2020 (lambda: resolvenodes(allnodesrev),
2016 b'look up all nodes (reverse)'),
2021 b'look up all nodes (reverse)'),
2017 (lambda: resolvenodes(allnodesrev, 2),
2022 (lambda: resolvenodes(allnodesrev, 2),
2018 b'look up all nodes 2x (reverse)'),
2023 b'look up all nodes 2x (reverse)'),
2019 (lambda: getentries(allrevs),
2024 (lambda: getentries(allrevs),
2020 b'retrieve all index entries (forward)'),
2025 b'retrieve all index entries (forward)'),
2021 (lambda: getentries(allrevs, 2),
2026 (lambda: getentries(allrevs, 2),
2022 b'retrieve all index entries 2x (forward)'),
2027 b'retrieve all index entries 2x (forward)'),
2023 (lambda: getentries(allrevsrev),
2028 (lambda: getentries(allrevsrev),
2024 b'retrieve all index entries (reverse)'),
2029 b'retrieve all index entries (reverse)'),
2025 (lambda: getentries(allrevsrev, 2),
2030 (lambda: getentries(allrevsrev, 2),
2026 b'retrieve all index entries 2x (reverse)'),
2031 b'retrieve all index entries 2x (reverse)'),
2027 ]
2032 ]
2028
2033
2029 for fn, title in benches:
2034 for fn, title in benches:
2030 timer, fm = gettimer(ui, opts)
2035 timer, fm = gettimer(ui, opts)
2031 timer(fn, title=title)
2036 timer(fn, title=title)
2032 fm.end()
2037 fm.end()
2033
2038
2034 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
2039 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
2035 [(b'd', b'dist', 100, b'distance between the revisions'),
2040 [(b'd', b'dist', 100, b'distance between the revisions'),
2036 (b's', b'startrev', 0, b'revision to start reading at'),
2041 (b's', b'startrev', 0, b'revision to start reading at'),
2037 (b'', b'reverse', False, b'read in reverse')],
2042 (b'', b'reverse', False, b'read in reverse')],
2038 b'-c|-m|FILE')
2043 b'-c|-m|FILE')
2039 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
2044 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
2040 **opts):
2045 **opts):
2041 """Benchmark reading a series of revisions from a revlog.
2046 """Benchmark reading a series of revisions from a revlog.
2042
2047
2043 By default, we read every ``-d/--dist`` revision from 0 to tip of
2048 By default, we read every ``-d/--dist`` revision from 0 to tip of
2044 the specified revlog.
2049 the specified revlog.
2045
2050
2046 The start revision can be defined via ``-s/--startrev``.
2051 The start revision can be defined via ``-s/--startrev``.
2047 """
2052 """
2048 opts = _byteskwargs(opts)
2053 opts = _byteskwargs(opts)
2049
2054
2050 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2055 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2051 rllen = getlen(ui)(rl)
2056 rllen = getlen(ui)(rl)
2052
2057
2053 if startrev < 0:
2058 if startrev < 0:
2054 startrev = rllen + startrev
2059 startrev = rllen + startrev
2055
2060
2056 def d():
2061 def d():
2057 rl.clearcaches()
2062 rl.clearcaches()
2058
2063
2059 beginrev = startrev
2064 beginrev = startrev
2060 endrev = rllen
2065 endrev = rllen
2061 dist = opts[b'dist']
2066 dist = opts[b'dist']
2062
2067
2063 if reverse:
2068 if reverse:
2064 beginrev, endrev = endrev - 1, beginrev - 1
2069 beginrev, endrev = endrev - 1, beginrev - 1
2065 dist = -1 * dist
2070 dist = -1 * dist
2066
2071
2067 for x in _xrange(beginrev, endrev, dist):
2072 for x in _xrange(beginrev, endrev, dist):
2068 # Old revisions don't support passing int.
2073 # Old revisions don't support passing int.
2069 n = rl.node(x)
2074 n = rl.node(x)
2070 rl.revision(n)
2075 rl.revision(n)
2071
2076
2072 timer, fm = gettimer(ui, opts)
2077 timer, fm = gettimer(ui, opts)
2073 timer(d)
2078 timer(d)
2074 fm.end()
2079 fm.end()
2075
2080
2076 @command(b'perfrevlogwrite', revlogopts + formatteropts +
2081 @command(b'perfrevlogwrite', revlogopts + formatteropts +
2077 [(b's', b'startrev', 1000, b'revision to start writing at'),
2082 [(b's', b'startrev', 1000, b'revision to start writing at'),
2078 (b'', b'stoprev', -1, b'last revision to write'),
2083 (b'', b'stoprev', -1, b'last revision to write'),
2079 (b'', b'count', 3, b'number of passes to perform'),
2084 (b'', b'count', 3, b'number of passes to perform'),
2080 (b'', b'details', False, b'print timing for every revisions tested'),
2085 (b'', b'details', False, b'print timing for every revisions tested'),
2081 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2086 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2082 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2087 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2083 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2088 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2084 ],
2089 ],
2085 b'-c|-m|FILE')
2090 b'-c|-m|FILE')
2086 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2091 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2087 """Benchmark writing a series of revisions to a revlog.
2092 """Benchmark writing a series of revisions to a revlog.
2088
2093
2089 Possible source values are:
2094 Possible source values are:
2090 * `full`: add from a full text (default).
2095 * `full`: add from a full text (default).
2091 * `parent-1`: add from a delta to the first parent
2096 * `parent-1`: add from a delta to the first parent
2092 * `parent-2`: add from a delta to the second parent if it exists
2097 * `parent-2`: add from a delta to the second parent if it exists
2093 (use a delta from the first parent otherwise)
2098 (use a delta from the first parent otherwise)
2094 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2099 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2095 * `storage`: add from the existing precomputed deltas
2100 * `storage`: add from the existing precomputed deltas
2096
2101
2097 Note: This performance command measures performance in a custom way. As a
2102 Note: This performance command measures performance in a custom way. As a
2098 result some of the global configuration of the 'perf' command does not
2103 result some of the global configuration of the 'perf' command does not
2099 apply to it:
2104 apply to it:
2100
2105
2101 * ``pre-run``: disabled
2106 * ``pre-run``: disabled
2102
2107
2103 * ``profile-benchmark``: disabled
2108 * ``profile-benchmark``: disabled
2104
2109
2105 * ``run-limits``: disabled use --count instead
2110 * ``run-limits``: disabled use --count instead
2106 """
2111 """
2107 opts = _byteskwargs(opts)
2112 opts = _byteskwargs(opts)
2108
2113
2109 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2114 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2110 rllen = getlen(ui)(rl)
2115 rllen = getlen(ui)(rl)
2111 if startrev < 0:
2116 if startrev < 0:
2112 startrev = rllen + startrev
2117 startrev = rllen + startrev
2113 if stoprev < 0:
2118 if stoprev < 0:
2114 stoprev = rllen + stoprev
2119 stoprev = rllen + stoprev
2115
2120
2116 lazydeltabase = opts['lazydeltabase']
2121 lazydeltabase = opts['lazydeltabase']
2117 source = opts['source']
2122 source = opts['source']
2118 clearcaches = opts['clear_caches']
2123 clearcaches = opts['clear_caches']
2119 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
2124 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
2120 b'storage')
2125 b'storage')
2121 if source not in validsource:
2126 if source not in validsource:
2122 raise error.Abort('invalid source type: %s' % source)
2127 raise error.Abort('invalid source type: %s' % source)
2123
2128
2124 ### actually gather results
2129 ### actually gather results
2125 count = opts['count']
2130 count = opts['count']
2126 if count <= 0:
2131 if count <= 0:
2127 raise error.Abort('invalide run count: %d' % count)
2132 raise error.Abort('invalide run count: %d' % count)
2128 allresults = []
2133 allresults = []
2129 for c in range(count):
2134 for c in range(count):
2130 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
2135 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
2131 lazydeltabase=lazydeltabase,
2136 lazydeltabase=lazydeltabase,
2132 clearcaches=clearcaches)
2137 clearcaches=clearcaches)
2133 allresults.append(timing)
2138 allresults.append(timing)
2134
2139
2135 ### consolidate the results in a single list
2140 ### consolidate the results in a single list
2136 results = []
2141 results = []
2137 for idx, (rev, t) in enumerate(allresults[0]):
2142 for idx, (rev, t) in enumerate(allresults[0]):
2138 ts = [t]
2143 ts = [t]
2139 for other in allresults[1:]:
2144 for other in allresults[1:]:
2140 orev, ot = other[idx]
2145 orev, ot = other[idx]
2141 assert orev == rev
2146 assert orev == rev
2142 ts.append(ot)
2147 ts.append(ot)
2143 results.append((rev, ts))
2148 results.append((rev, ts))
2144 resultcount = len(results)
2149 resultcount = len(results)
2145
2150
2146 ### Compute and display relevant statistics
2151 ### Compute and display relevant statistics
2147
2152
2148 # get a formatter
2153 # get a formatter
2149 fm = ui.formatter(b'perf', opts)
2154 fm = ui.formatter(b'perf', opts)
2150 displayall = ui.configbool(b"perf", b"all-timing", False)
2155 displayall = ui.configbool(b"perf", b"all-timing", False)
2151
2156
2152 # print individual details if requested
2157 # print individual details if requested
2153 if opts['details']:
2158 if opts['details']:
2154 for idx, item in enumerate(results, 1):
2159 for idx, item in enumerate(results, 1):
2155 rev, data = item
2160 rev, data = item
2156 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2161 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2157 formatone(fm, data, title=title, displayall=displayall)
2162 formatone(fm, data, title=title, displayall=displayall)
2158
2163
2159 # sorts results by median time
2164 # sorts results by median time
2160 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2165 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2161 # list of (name, index) to display)
2166 # list of (name, index) to display)
2162 relevants = [
2167 relevants = [
2163 ("min", 0),
2168 ("min", 0),
2164 ("10%", resultcount * 10 // 100),
2169 ("10%", resultcount * 10 // 100),
2165 ("25%", resultcount * 25 // 100),
2170 ("25%", resultcount * 25 // 100),
2166 ("50%", resultcount * 70 // 100),
2171 ("50%", resultcount * 70 // 100),
2167 ("75%", resultcount * 75 // 100),
2172 ("75%", resultcount * 75 // 100),
2168 ("90%", resultcount * 90 // 100),
2173 ("90%", resultcount * 90 // 100),
2169 ("95%", resultcount * 95 // 100),
2174 ("95%", resultcount * 95 // 100),
2170 ("99%", resultcount * 99 // 100),
2175 ("99%", resultcount * 99 // 100),
2171 ("99.9%", resultcount * 999 // 1000),
2176 ("99.9%", resultcount * 999 // 1000),
2172 ("99.99%", resultcount * 9999 // 10000),
2177 ("99.99%", resultcount * 9999 // 10000),
2173 ("99.999%", resultcount * 99999 // 100000),
2178 ("99.999%", resultcount * 99999 // 100000),
2174 ("max", -1),
2179 ("max", -1),
2175 ]
2180 ]
2176 if not ui.quiet:
2181 if not ui.quiet:
2177 for name, idx in relevants:
2182 for name, idx in relevants:
2178 data = results[idx]
2183 data = results[idx]
2179 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2184 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2180 formatone(fm, data[1], title=title, displayall=displayall)
2185 formatone(fm, data[1], title=title, displayall=displayall)
2181
2186
2182 # XXX summing that many float will not be very precise, we ignore this fact
2187 # XXX summing that many float will not be very precise, we ignore this fact
2183 # for now
2188 # for now
2184 totaltime = []
2189 totaltime = []
2185 for item in allresults:
2190 for item in allresults:
2186 totaltime.append((sum(x[1][0] for x in item),
2191 totaltime.append((sum(x[1][0] for x in item),
2187 sum(x[1][1] for x in item),
2192 sum(x[1][1] for x in item),
2188 sum(x[1][2] for x in item),)
2193 sum(x[1][2] for x in item),)
2189 )
2194 )
2190 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2195 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2191 displayall=displayall)
2196 displayall=displayall)
2192 fm.end()
2197 fm.end()
2193
2198
2194 class _faketr(object):
2199 class _faketr(object):
2195 def add(s, x, y, z=None):
2200 def add(s, x, y, z=None):
2196 return None
2201 return None
2197
2202
2198 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2203 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2199 lazydeltabase=True, clearcaches=True):
2204 lazydeltabase=True, clearcaches=True):
2200 timings = []
2205 timings = []
2201 tr = _faketr()
2206 tr = _faketr()
2202 with _temprevlog(ui, orig, startrev) as dest:
2207 with _temprevlog(ui, orig, startrev) as dest:
2203 dest._lazydeltabase = lazydeltabase
2208 dest._lazydeltabase = lazydeltabase
2204 revs = list(orig.revs(startrev, stoprev))
2209 revs = list(orig.revs(startrev, stoprev))
2205 total = len(revs)
2210 total = len(revs)
2206 topic = 'adding'
2211 topic = 'adding'
2207 if runidx is not None:
2212 if runidx is not None:
2208 topic += ' (run #%d)' % runidx
2213 topic += ' (run #%d)' % runidx
2209 # Support both old and new progress API
2214 # Support both old and new progress API
2210 if util.safehasattr(ui, 'makeprogress'):
2215 if util.safehasattr(ui, 'makeprogress'):
2211 progress = ui.makeprogress(topic, unit='revs', total=total)
2216 progress = ui.makeprogress(topic, unit='revs', total=total)
2212 def updateprogress(pos):
2217 def updateprogress(pos):
2213 progress.update(pos)
2218 progress.update(pos)
2214 def completeprogress():
2219 def completeprogress():
2215 progress.complete()
2220 progress.complete()
2216 else:
2221 else:
2217 def updateprogress(pos):
2222 def updateprogress(pos):
2218 ui.progress(topic, pos, unit='revs', total=total)
2223 ui.progress(topic, pos, unit='revs', total=total)
2219 def completeprogress():
2224 def completeprogress():
2220 ui.progress(topic, None, unit='revs', total=total)
2225 ui.progress(topic, None, unit='revs', total=total)
2221
2226
2222 for idx, rev in enumerate(revs):
2227 for idx, rev in enumerate(revs):
2223 updateprogress(idx)
2228 updateprogress(idx)
2224 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2229 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2225 if clearcaches:
2230 if clearcaches:
2226 dest.index.clearcaches()
2231 dest.index.clearcaches()
2227 dest.clearcaches()
2232 dest.clearcaches()
2228 with timeone() as r:
2233 with timeone() as r:
2229 dest.addrawrevision(*addargs, **addkwargs)
2234 dest.addrawrevision(*addargs, **addkwargs)
2230 timings.append((rev, r[0]))
2235 timings.append((rev, r[0]))
2231 updateprogress(total)
2236 updateprogress(total)
2232 completeprogress()
2237 completeprogress()
2233 return timings
2238 return timings
2234
2239
2235 def _getrevisionseed(orig, rev, tr, source):
2240 def _getrevisionseed(orig, rev, tr, source):
2236 from mercurial.node import nullid
2241 from mercurial.node import nullid
2237
2242
2238 linkrev = orig.linkrev(rev)
2243 linkrev = orig.linkrev(rev)
2239 node = orig.node(rev)
2244 node = orig.node(rev)
2240 p1, p2 = orig.parents(node)
2245 p1, p2 = orig.parents(node)
2241 flags = orig.flags(rev)
2246 flags = orig.flags(rev)
2242 cachedelta = None
2247 cachedelta = None
2243 text = None
2248 text = None
2244
2249
2245 if source == b'full':
2250 if source == b'full':
2246 text = orig.revision(rev)
2251 text = orig.revision(rev)
2247 elif source == b'parent-1':
2252 elif source == b'parent-1':
2248 baserev = orig.rev(p1)
2253 baserev = orig.rev(p1)
2249 cachedelta = (baserev, orig.revdiff(p1, rev))
2254 cachedelta = (baserev, orig.revdiff(p1, rev))
2250 elif source == b'parent-2':
2255 elif source == b'parent-2':
2251 parent = p2
2256 parent = p2
2252 if p2 == nullid:
2257 if p2 == nullid:
2253 parent = p1
2258 parent = p1
2254 baserev = orig.rev(parent)
2259 baserev = orig.rev(parent)
2255 cachedelta = (baserev, orig.revdiff(parent, rev))
2260 cachedelta = (baserev, orig.revdiff(parent, rev))
2256 elif source == b'parent-smallest':
2261 elif source == b'parent-smallest':
2257 p1diff = orig.revdiff(p1, rev)
2262 p1diff = orig.revdiff(p1, rev)
2258 parent = p1
2263 parent = p1
2259 diff = p1diff
2264 diff = p1diff
2260 if p2 != nullid:
2265 if p2 != nullid:
2261 p2diff = orig.revdiff(p2, rev)
2266 p2diff = orig.revdiff(p2, rev)
2262 if len(p1diff) > len(p2diff):
2267 if len(p1diff) > len(p2diff):
2263 parent = p2
2268 parent = p2
2264 diff = p2diff
2269 diff = p2diff
2265 baserev = orig.rev(parent)
2270 baserev = orig.rev(parent)
2266 cachedelta = (baserev, diff)
2271 cachedelta = (baserev, diff)
2267 elif source == b'storage':
2272 elif source == b'storage':
2268 baserev = orig.deltaparent(rev)
2273 baserev = orig.deltaparent(rev)
2269 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2274 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2270
2275
2271 return ((text, tr, linkrev, p1, p2),
2276 return ((text, tr, linkrev, p1, p2),
2272 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2277 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2273
2278
2274 @contextlib.contextmanager
2279 @contextlib.contextmanager
2275 def _temprevlog(ui, orig, truncaterev):
2280 def _temprevlog(ui, orig, truncaterev):
2276 from mercurial import vfs as vfsmod
2281 from mercurial import vfs as vfsmod
2277
2282
2278 if orig._inline:
2283 if orig._inline:
2279 raise error.Abort('not supporting inline revlog (yet)')
2284 raise error.Abort('not supporting inline revlog (yet)')
2280 revlogkwargs = {}
2285 revlogkwargs = {}
2281 k = 'upperboundcomp'
2286 k = 'upperboundcomp'
2282 if util.safehasattr(orig, k):
2287 if util.safehasattr(orig, k):
2283 revlogkwargs[k] = getattr(orig, k)
2288 revlogkwargs[k] = getattr(orig, k)
2284
2289
2285 origindexpath = orig.opener.join(orig.indexfile)
2290 origindexpath = orig.opener.join(orig.indexfile)
2286 origdatapath = orig.opener.join(orig.datafile)
2291 origdatapath = orig.opener.join(orig.datafile)
2287 indexname = 'revlog.i'
2292 indexname = 'revlog.i'
2288 dataname = 'revlog.d'
2293 dataname = 'revlog.d'
2289
2294
2290 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2295 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2291 try:
2296 try:
2292 # copy the data file in a temporary directory
2297 # copy the data file in a temporary directory
2293 ui.debug('copying data in %s\n' % tmpdir)
2298 ui.debug('copying data in %s\n' % tmpdir)
2294 destindexpath = os.path.join(tmpdir, 'revlog.i')
2299 destindexpath = os.path.join(tmpdir, 'revlog.i')
2295 destdatapath = os.path.join(tmpdir, 'revlog.d')
2300 destdatapath = os.path.join(tmpdir, 'revlog.d')
2296 shutil.copyfile(origindexpath, destindexpath)
2301 shutil.copyfile(origindexpath, destindexpath)
2297 shutil.copyfile(origdatapath, destdatapath)
2302 shutil.copyfile(origdatapath, destdatapath)
2298
2303
2299 # remove the data we want to add again
2304 # remove the data we want to add again
2300 ui.debug('truncating data to be rewritten\n')
2305 ui.debug('truncating data to be rewritten\n')
2301 with open(destindexpath, 'ab') as index:
2306 with open(destindexpath, 'ab') as index:
2302 index.seek(0)
2307 index.seek(0)
2303 index.truncate(truncaterev * orig._io.size)
2308 index.truncate(truncaterev * orig._io.size)
2304 with open(destdatapath, 'ab') as data:
2309 with open(destdatapath, 'ab') as data:
2305 data.seek(0)
2310 data.seek(0)
2306 data.truncate(orig.start(truncaterev))
2311 data.truncate(orig.start(truncaterev))
2307
2312
2308 # instantiate a new revlog from the temporary copy
2313 # instantiate a new revlog from the temporary copy
2309 ui.debug('truncating adding to be rewritten\n')
2314 ui.debug('truncating adding to be rewritten\n')
2310 vfs = vfsmod.vfs(tmpdir)
2315 vfs = vfsmod.vfs(tmpdir)
2311 vfs.options = getattr(orig.opener, 'options', None)
2316 vfs.options = getattr(orig.opener, 'options', None)
2312
2317
2313 dest = revlog.revlog(vfs,
2318 dest = revlog.revlog(vfs,
2314 indexfile=indexname,
2319 indexfile=indexname,
2315 datafile=dataname, **revlogkwargs)
2320 datafile=dataname, **revlogkwargs)
2316 if dest._inline:
2321 if dest._inline:
2317 raise error.Abort('not supporting inline revlog (yet)')
2322 raise error.Abort('not supporting inline revlog (yet)')
2318 # make sure internals are initialized
2323 # make sure internals are initialized
2319 dest.revision(len(dest) - 1)
2324 dest.revision(len(dest) - 1)
2320 yield dest
2325 yield dest
2321 del dest, vfs
2326 del dest, vfs
2322 finally:
2327 finally:
2323 shutil.rmtree(tmpdir, True)
2328 shutil.rmtree(tmpdir, True)
2324
2329
2325 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2330 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2326 [(b'e', b'engines', b'', b'compression engines to use'),
2331 [(b'e', b'engines', b'', b'compression engines to use'),
2327 (b's', b'startrev', 0, b'revision to start at')],
2332 (b's', b'startrev', 0, b'revision to start at')],
2328 b'-c|-m|FILE')
2333 b'-c|-m|FILE')
2329 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2334 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2330 """Benchmark operations on revlog chunks.
2335 """Benchmark operations on revlog chunks.
2331
2336
2332 Logically, each revlog is a collection of fulltext revisions. However,
2337 Logically, each revlog is a collection of fulltext revisions. However,
2333 stored within each revlog are "chunks" of possibly compressed data. This
2338 stored within each revlog are "chunks" of possibly compressed data. This
2334 data needs to be read and decompressed or compressed and written.
2339 data needs to be read and decompressed or compressed and written.
2335
2340
2336 This command measures the time it takes to read+decompress and recompress
2341 This command measures the time it takes to read+decompress and recompress
2337 chunks in a revlog. It effectively isolates I/O and compression performance.
2342 chunks in a revlog. It effectively isolates I/O and compression performance.
2338 For measurements of higher-level operations like resolving revisions,
2343 For measurements of higher-level operations like resolving revisions,
2339 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2344 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2340 """
2345 """
2341 opts = _byteskwargs(opts)
2346 opts = _byteskwargs(opts)
2342
2347
2343 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2348 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2344
2349
2345 # _chunkraw was renamed to _getsegmentforrevs.
2350 # _chunkraw was renamed to _getsegmentforrevs.
2346 try:
2351 try:
2347 segmentforrevs = rl._getsegmentforrevs
2352 segmentforrevs = rl._getsegmentforrevs
2348 except AttributeError:
2353 except AttributeError:
2349 segmentforrevs = rl._chunkraw
2354 segmentforrevs = rl._chunkraw
2350
2355
2351 # Verify engines argument.
2356 # Verify engines argument.
2352 if engines:
2357 if engines:
2353 engines = set(e.strip() for e in engines.split(b','))
2358 engines = set(e.strip() for e in engines.split(b','))
2354 for engine in engines:
2359 for engine in engines:
2355 try:
2360 try:
2356 util.compressionengines[engine]
2361 util.compressionengines[engine]
2357 except KeyError:
2362 except KeyError:
2358 raise error.Abort(b'unknown compression engine: %s' % engine)
2363 raise error.Abort(b'unknown compression engine: %s' % engine)
2359 else:
2364 else:
2360 engines = []
2365 engines = []
2361 for e in util.compengines:
2366 for e in util.compengines:
2362 engine = util.compengines[e]
2367 engine = util.compengines[e]
2363 try:
2368 try:
2364 if engine.available():
2369 if engine.available():
2365 engine.revlogcompressor().compress(b'dummy')
2370 engine.revlogcompressor().compress(b'dummy')
2366 engines.append(e)
2371 engines.append(e)
2367 except NotImplementedError:
2372 except NotImplementedError:
2368 pass
2373 pass
2369
2374
2370 revs = list(rl.revs(startrev, len(rl) - 1))
2375 revs = list(rl.revs(startrev, len(rl) - 1))
2371
2376
2372 def rlfh(rl):
2377 def rlfh(rl):
2373 if rl._inline:
2378 if rl._inline:
2374 return getsvfs(repo)(rl.indexfile)
2379 return getsvfs(repo)(rl.indexfile)
2375 else:
2380 else:
2376 return getsvfs(repo)(rl.datafile)
2381 return getsvfs(repo)(rl.datafile)
2377
2382
2378 def doread():
2383 def doread():
2379 rl.clearcaches()
2384 rl.clearcaches()
2380 for rev in revs:
2385 for rev in revs:
2381 segmentforrevs(rev, rev)
2386 segmentforrevs(rev, rev)
2382
2387
2383 def doreadcachedfh():
2388 def doreadcachedfh():
2384 rl.clearcaches()
2389 rl.clearcaches()
2385 fh = rlfh(rl)
2390 fh = rlfh(rl)
2386 for rev in revs:
2391 for rev in revs:
2387 segmentforrevs(rev, rev, df=fh)
2392 segmentforrevs(rev, rev, df=fh)
2388
2393
2389 def doreadbatch():
2394 def doreadbatch():
2390 rl.clearcaches()
2395 rl.clearcaches()
2391 segmentforrevs(revs[0], revs[-1])
2396 segmentforrevs(revs[0], revs[-1])
2392
2397
2393 def doreadbatchcachedfh():
2398 def doreadbatchcachedfh():
2394 rl.clearcaches()
2399 rl.clearcaches()
2395 fh = rlfh(rl)
2400 fh = rlfh(rl)
2396 segmentforrevs(revs[0], revs[-1], df=fh)
2401 segmentforrevs(revs[0], revs[-1], df=fh)
2397
2402
2398 def dochunk():
2403 def dochunk():
2399 rl.clearcaches()
2404 rl.clearcaches()
2400 fh = rlfh(rl)
2405 fh = rlfh(rl)
2401 for rev in revs:
2406 for rev in revs:
2402 rl._chunk(rev, df=fh)
2407 rl._chunk(rev, df=fh)
2403
2408
2404 chunks = [None]
2409 chunks = [None]
2405
2410
2406 def dochunkbatch():
2411 def dochunkbatch():
2407 rl.clearcaches()
2412 rl.clearcaches()
2408 fh = rlfh(rl)
2413 fh = rlfh(rl)
2409 # Save chunks as a side-effect.
2414 # Save chunks as a side-effect.
2410 chunks[0] = rl._chunks(revs, df=fh)
2415 chunks[0] = rl._chunks(revs, df=fh)
2411
2416
2412 def docompress(compressor):
2417 def docompress(compressor):
2413 rl.clearcaches()
2418 rl.clearcaches()
2414
2419
2415 try:
2420 try:
2416 # Swap in the requested compression engine.
2421 # Swap in the requested compression engine.
2417 oldcompressor = rl._compressor
2422 oldcompressor = rl._compressor
2418 rl._compressor = compressor
2423 rl._compressor = compressor
2419 for chunk in chunks[0]:
2424 for chunk in chunks[0]:
2420 rl.compress(chunk)
2425 rl.compress(chunk)
2421 finally:
2426 finally:
2422 rl._compressor = oldcompressor
2427 rl._compressor = oldcompressor
2423
2428
2424 benches = [
2429 benches = [
2425 (lambda: doread(), b'read'),
2430 (lambda: doread(), b'read'),
2426 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2431 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2427 (lambda: doreadbatch(), b'read batch'),
2432 (lambda: doreadbatch(), b'read batch'),
2428 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2433 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2429 (lambda: dochunk(), b'chunk'),
2434 (lambda: dochunk(), b'chunk'),
2430 (lambda: dochunkbatch(), b'chunk batch'),
2435 (lambda: dochunkbatch(), b'chunk batch'),
2431 ]
2436 ]
2432
2437
2433 for engine in sorted(engines):
2438 for engine in sorted(engines):
2434 compressor = util.compengines[engine].revlogcompressor()
2439 compressor = util.compengines[engine].revlogcompressor()
2435 benches.append((functools.partial(docompress, compressor),
2440 benches.append((functools.partial(docompress, compressor),
2436 b'compress w/ %s' % engine))
2441 b'compress w/ %s' % engine))
2437
2442
2438 for fn, title in benches:
2443 for fn, title in benches:
2439 timer, fm = gettimer(ui, opts)
2444 timer, fm = gettimer(ui, opts)
2440 timer(fn, title=title)
2445 timer(fn, title=title)
2441 fm.end()
2446 fm.end()
2442
2447
2443 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2448 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2444 [(b'', b'cache', False, b'use caches instead of clearing')],
2449 [(b'', b'cache', False, b'use caches instead of clearing')],
2445 b'-c|-m|FILE REV')
2450 b'-c|-m|FILE REV')
2446 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2451 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2447 """Benchmark obtaining a revlog revision.
2452 """Benchmark obtaining a revlog revision.
2448
2453
2449 Obtaining a revlog revision consists of roughly the following steps:
2454 Obtaining a revlog revision consists of roughly the following steps:
2450
2455
2451 1. Compute the delta chain
2456 1. Compute the delta chain
2452 2. Slice the delta chain if applicable
2457 2. Slice the delta chain if applicable
2453 3. Obtain the raw chunks for that delta chain
2458 3. Obtain the raw chunks for that delta chain
2454 4. Decompress each raw chunk
2459 4. Decompress each raw chunk
2455 5. Apply binary patches to obtain fulltext
2460 5. Apply binary patches to obtain fulltext
2456 6. Verify hash of fulltext
2461 6. Verify hash of fulltext
2457
2462
2458 This command measures the time spent in each of these phases.
2463 This command measures the time spent in each of these phases.
2459 """
2464 """
2460 opts = _byteskwargs(opts)
2465 opts = _byteskwargs(opts)
2461
2466
2462 if opts.get(b'changelog') or opts.get(b'manifest'):
2467 if opts.get(b'changelog') or opts.get(b'manifest'):
2463 file_, rev = None, file_
2468 file_, rev = None, file_
2464 elif rev is None:
2469 elif rev is None:
2465 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2470 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2466
2471
2467 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2472 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2468
2473
2469 # _chunkraw was renamed to _getsegmentforrevs.
2474 # _chunkraw was renamed to _getsegmentforrevs.
2470 try:
2475 try:
2471 segmentforrevs = r._getsegmentforrevs
2476 segmentforrevs = r._getsegmentforrevs
2472 except AttributeError:
2477 except AttributeError:
2473 segmentforrevs = r._chunkraw
2478 segmentforrevs = r._chunkraw
2474
2479
2475 node = r.lookup(rev)
2480 node = r.lookup(rev)
2476 rev = r.rev(node)
2481 rev = r.rev(node)
2477
2482
2478 def getrawchunks(data, chain):
2483 def getrawchunks(data, chain):
2479 start = r.start
2484 start = r.start
2480 length = r.length
2485 length = r.length
2481 inline = r._inline
2486 inline = r._inline
2482 iosize = r._io.size
2487 iosize = r._io.size
2483 buffer = util.buffer
2488 buffer = util.buffer
2484
2489
2485 chunks = []
2490 chunks = []
2486 ladd = chunks.append
2491 ladd = chunks.append
2487 for idx, item in enumerate(chain):
2492 for idx, item in enumerate(chain):
2488 offset = start(item[0])
2493 offset = start(item[0])
2489 bits = data[idx]
2494 bits = data[idx]
2490 for rev in item:
2495 for rev in item:
2491 chunkstart = start(rev)
2496 chunkstart = start(rev)
2492 if inline:
2497 if inline:
2493 chunkstart += (rev + 1) * iosize
2498 chunkstart += (rev + 1) * iosize
2494 chunklength = length(rev)
2499 chunklength = length(rev)
2495 ladd(buffer(bits, chunkstart - offset, chunklength))
2500 ladd(buffer(bits, chunkstart - offset, chunklength))
2496
2501
2497 return chunks
2502 return chunks
2498
2503
2499 def dodeltachain(rev):
2504 def dodeltachain(rev):
2500 if not cache:
2505 if not cache:
2501 r.clearcaches()
2506 r.clearcaches()
2502 r._deltachain(rev)
2507 r._deltachain(rev)
2503
2508
2504 def doread(chain):
2509 def doread(chain):
2505 if not cache:
2510 if not cache:
2506 r.clearcaches()
2511 r.clearcaches()
2507 for item in slicedchain:
2512 for item in slicedchain:
2508 segmentforrevs(item[0], item[-1])
2513 segmentforrevs(item[0], item[-1])
2509
2514
2510 def doslice(r, chain, size):
2515 def doslice(r, chain, size):
2511 for s in slicechunk(r, chain, targetsize=size):
2516 for s in slicechunk(r, chain, targetsize=size):
2512 pass
2517 pass
2513
2518
2514 def dorawchunks(data, chain):
2519 def dorawchunks(data, chain):
2515 if not cache:
2520 if not cache:
2516 r.clearcaches()
2521 r.clearcaches()
2517 getrawchunks(data, chain)
2522 getrawchunks(data, chain)
2518
2523
2519 def dodecompress(chunks):
2524 def dodecompress(chunks):
2520 decomp = r.decompress
2525 decomp = r.decompress
2521 for chunk in chunks:
2526 for chunk in chunks:
2522 decomp(chunk)
2527 decomp(chunk)
2523
2528
2524 def dopatch(text, bins):
2529 def dopatch(text, bins):
2525 if not cache:
2530 if not cache:
2526 r.clearcaches()
2531 r.clearcaches()
2527 mdiff.patches(text, bins)
2532 mdiff.patches(text, bins)
2528
2533
2529 def dohash(text):
2534 def dohash(text):
2530 if not cache:
2535 if not cache:
2531 r.clearcaches()
2536 r.clearcaches()
2532 r.checkhash(text, node, rev=rev)
2537 r.checkhash(text, node, rev=rev)
2533
2538
2534 def dorevision():
2539 def dorevision():
2535 if not cache:
2540 if not cache:
2536 r.clearcaches()
2541 r.clearcaches()
2537 r.revision(node)
2542 r.revision(node)
2538
2543
2539 try:
2544 try:
2540 from mercurial.revlogutils.deltas import slicechunk
2545 from mercurial.revlogutils.deltas import slicechunk
2541 except ImportError:
2546 except ImportError:
2542 slicechunk = getattr(revlog, '_slicechunk', None)
2547 slicechunk = getattr(revlog, '_slicechunk', None)
2543
2548
2544 size = r.length(rev)
2549 size = r.length(rev)
2545 chain = r._deltachain(rev)[0]
2550 chain = r._deltachain(rev)[0]
2546 if not getattr(r, '_withsparseread', False):
2551 if not getattr(r, '_withsparseread', False):
2547 slicedchain = (chain,)
2552 slicedchain = (chain,)
2548 else:
2553 else:
2549 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2554 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2550 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2555 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2551 rawchunks = getrawchunks(data, slicedchain)
2556 rawchunks = getrawchunks(data, slicedchain)
2552 bins = r._chunks(chain)
2557 bins = r._chunks(chain)
2553 text = bytes(bins[0])
2558 text = bytes(bins[0])
2554 bins = bins[1:]
2559 bins = bins[1:]
2555 text = mdiff.patches(text, bins)
2560 text = mdiff.patches(text, bins)
2556
2561
2557 benches = [
2562 benches = [
2558 (lambda: dorevision(), b'full'),
2563 (lambda: dorevision(), b'full'),
2559 (lambda: dodeltachain(rev), b'deltachain'),
2564 (lambda: dodeltachain(rev), b'deltachain'),
2560 (lambda: doread(chain), b'read'),
2565 (lambda: doread(chain), b'read'),
2561 ]
2566 ]
2562
2567
2563 if getattr(r, '_withsparseread', False):
2568 if getattr(r, '_withsparseread', False):
2564 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2569 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2565 benches.append(slicing)
2570 benches.append(slicing)
2566
2571
2567 benches.extend([
2572 benches.extend([
2568 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2573 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2569 (lambda: dodecompress(rawchunks), b'decompress'),
2574 (lambda: dodecompress(rawchunks), b'decompress'),
2570 (lambda: dopatch(text, bins), b'patch'),
2575 (lambda: dopatch(text, bins), b'patch'),
2571 (lambda: dohash(text), b'hash'),
2576 (lambda: dohash(text), b'hash'),
2572 ])
2577 ])
2573
2578
2574 timer, fm = gettimer(ui, opts)
2579 timer, fm = gettimer(ui, opts)
2575 for fn, title in benches:
2580 for fn, title in benches:
2576 timer(fn, title=title)
2581 timer(fn, title=title)
2577 fm.end()
2582 fm.end()
2578
2583
2579 @command(b'perfrevset',
2584 @command(b'perfrevset',
2580 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2585 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2581 (b'', b'contexts', False, b'obtain changectx for each revision')]
2586 (b'', b'contexts', False, b'obtain changectx for each revision')]
2582 + formatteropts, b"REVSET")
2587 + formatteropts, b"REVSET")
2583 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2588 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2584 """benchmark the execution time of a revset
2589 """benchmark the execution time of a revset
2585
2590
2586 Use the --clean option if need to evaluate the impact of build volatile
2591 Use the --clean option if need to evaluate the impact of build volatile
2587 revisions set cache on the revset execution. Volatile cache hold filtered
2592 revisions set cache on the revset execution. Volatile cache hold filtered
2588 and obsolete related cache."""
2593 and obsolete related cache."""
2589 opts = _byteskwargs(opts)
2594 opts = _byteskwargs(opts)
2590
2595
2591 timer, fm = gettimer(ui, opts)
2596 timer, fm = gettimer(ui, opts)
2592 def d():
2597 def d():
2593 if clear:
2598 if clear:
2594 repo.invalidatevolatilesets()
2599 repo.invalidatevolatilesets()
2595 if contexts:
2600 if contexts:
2596 for ctx in repo.set(expr): pass
2601 for ctx in repo.set(expr): pass
2597 else:
2602 else:
2598 for r in repo.revs(expr): pass
2603 for r in repo.revs(expr): pass
2599 timer(d)
2604 timer(d)
2600 fm.end()
2605 fm.end()
2601
2606
2602 @command(b'perfvolatilesets',
2607 @command(b'perfvolatilesets',
2603 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2608 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2604 ] + formatteropts)
2609 ] + formatteropts)
2605 def perfvolatilesets(ui, repo, *names, **opts):
2610 def perfvolatilesets(ui, repo, *names, **opts):
2606 """benchmark the computation of various volatile set
2611 """benchmark the computation of various volatile set
2607
2612
2608 Volatile set computes element related to filtering and obsolescence."""
2613 Volatile set computes element related to filtering and obsolescence."""
2609 opts = _byteskwargs(opts)
2614 opts = _byteskwargs(opts)
2610 timer, fm = gettimer(ui, opts)
2615 timer, fm = gettimer(ui, opts)
2611 repo = repo.unfiltered()
2616 repo = repo.unfiltered()
2612
2617
2613 def getobs(name):
2618 def getobs(name):
2614 def d():
2619 def d():
2615 repo.invalidatevolatilesets()
2620 repo.invalidatevolatilesets()
2616 if opts[b'clear_obsstore']:
2621 if opts[b'clear_obsstore']:
2617 clearfilecache(repo, b'obsstore')
2622 clearfilecache(repo, b'obsstore')
2618 obsolete.getrevs(repo, name)
2623 obsolete.getrevs(repo, name)
2619 return d
2624 return d
2620
2625
2621 allobs = sorted(obsolete.cachefuncs)
2626 allobs = sorted(obsolete.cachefuncs)
2622 if names:
2627 if names:
2623 allobs = [n for n in allobs if n in names]
2628 allobs = [n for n in allobs if n in names]
2624
2629
2625 for name in allobs:
2630 for name in allobs:
2626 timer(getobs(name), title=name)
2631 timer(getobs(name), title=name)
2627
2632
2628 def getfiltered(name):
2633 def getfiltered(name):
2629 def d():
2634 def d():
2630 repo.invalidatevolatilesets()
2635 repo.invalidatevolatilesets()
2631 if opts[b'clear_obsstore']:
2636 if opts[b'clear_obsstore']:
2632 clearfilecache(repo, b'obsstore')
2637 clearfilecache(repo, b'obsstore')
2633 repoview.filterrevs(repo, name)
2638 repoview.filterrevs(repo, name)
2634 return d
2639 return d
2635
2640
2636 allfilter = sorted(repoview.filtertable)
2641 allfilter = sorted(repoview.filtertable)
2637 if names:
2642 if names:
2638 allfilter = [n for n in allfilter if n in names]
2643 allfilter = [n for n in allfilter if n in names]
2639
2644
2640 for name in allfilter:
2645 for name in allfilter:
2641 timer(getfiltered(name), title=name)
2646 timer(getfiltered(name), title=name)
2642 fm.end()
2647 fm.end()
2643
2648
2644 @command(b'perfbranchmap',
2649 @command(b'perfbranchmap',
2645 [(b'f', b'full', False,
2650 [(b'f', b'full', False,
2646 b'Includes build time of subset'),
2651 b'Includes build time of subset'),
2647 (b'', b'clear-revbranch', False,
2652 (b'', b'clear-revbranch', False,
2648 b'purge the revbranch cache between computation'),
2653 b'purge the revbranch cache between computation'),
2649 ] + formatteropts)
2654 ] + formatteropts)
2650 def perfbranchmap(ui, repo, *filternames, **opts):
2655 def perfbranchmap(ui, repo, *filternames, **opts):
2651 """benchmark the update of a branchmap
2656 """benchmark the update of a branchmap
2652
2657
2653 This benchmarks the full repo.branchmap() call with read and write disabled
2658 This benchmarks the full repo.branchmap() call with read and write disabled
2654 """
2659 """
2655 opts = _byteskwargs(opts)
2660 opts = _byteskwargs(opts)
2656 full = opts.get(b"full", False)
2661 full = opts.get(b"full", False)
2657 clear_revbranch = opts.get(b"clear_revbranch", False)
2662 clear_revbranch = opts.get(b"clear_revbranch", False)
2658 timer, fm = gettimer(ui, opts)
2663 timer, fm = gettimer(ui, opts)
2659 def getbranchmap(filtername):
2664 def getbranchmap(filtername):
2660 """generate a benchmark function for the filtername"""
2665 """generate a benchmark function for the filtername"""
2661 if filtername is None:
2666 if filtername is None:
2662 view = repo
2667 view = repo
2663 else:
2668 else:
2664 view = repo.filtered(filtername)
2669 view = repo.filtered(filtername)
2665 if util.safehasattr(view._branchcaches, '_per_filter'):
2670 if util.safehasattr(view._branchcaches, '_per_filter'):
2666 filtered = view._branchcaches._per_filter
2671 filtered = view._branchcaches._per_filter
2667 else:
2672 else:
2668 # older versions
2673 # older versions
2669 filtered = view._branchcaches
2674 filtered = view._branchcaches
2670 def d():
2675 def d():
2671 if clear_revbranch:
2676 if clear_revbranch:
2672 repo.revbranchcache()._clear()
2677 repo.revbranchcache()._clear()
2673 if full:
2678 if full:
2674 view._branchcaches.clear()
2679 view._branchcaches.clear()
2675 else:
2680 else:
2676 filtered.pop(filtername, None)
2681 filtered.pop(filtername, None)
2677 view.branchmap()
2682 view.branchmap()
2678 return d
2683 return d
2679 # add filter in smaller subset to bigger subset
2684 # add filter in smaller subset to bigger subset
2680 possiblefilters = set(repoview.filtertable)
2685 possiblefilters = set(repoview.filtertable)
2681 if filternames:
2686 if filternames:
2682 possiblefilters &= set(filternames)
2687 possiblefilters &= set(filternames)
2683 subsettable = getbranchmapsubsettable()
2688 subsettable = getbranchmapsubsettable()
2684 allfilters = []
2689 allfilters = []
2685 while possiblefilters:
2690 while possiblefilters:
2686 for name in possiblefilters:
2691 for name in possiblefilters:
2687 subset = subsettable.get(name)
2692 subset = subsettable.get(name)
2688 if subset not in possiblefilters:
2693 if subset not in possiblefilters:
2689 break
2694 break
2690 else:
2695 else:
2691 assert False, b'subset cycle %s!' % possiblefilters
2696 assert False, b'subset cycle %s!' % possiblefilters
2692 allfilters.append(name)
2697 allfilters.append(name)
2693 possiblefilters.remove(name)
2698 possiblefilters.remove(name)
2694
2699
2695 # warm the cache
2700 # warm the cache
2696 if not full:
2701 if not full:
2697 for name in allfilters:
2702 for name in allfilters:
2698 repo.filtered(name).branchmap()
2703 repo.filtered(name).branchmap()
2699 if not filternames or b'unfiltered' in filternames:
2704 if not filternames or b'unfiltered' in filternames:
2700 # add unfiltered
2705 # add unfiltered
2701 allfilters.append(None)
2706 allfilters.append(None)
2702
2707
2703 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2708 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2704 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2709 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2705 branchcacheread.set(classmethod(lambda *args: None))
2710 branchcacheread.set(classmethod(lambda *args: None))
2706 else:
2711 else:
2707 # older versions
2712 # older versions
2708 branchcacheread = safeattrsetter(branchmap, b'read')
2713 branchcacheread = safeattrsetter(branchmap, b'read')
2709 branchcacheread.set(lambda *args: None)
2714 branchcacheread.set(lambda *args: None)
2710 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2715 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2711 branchcachewrite.set(lambda *args: None)
2716 branchcachewrite.set(lambda *args: None)
2712 try:
2717 try:
2713 for name in allfilters:
2718 for name in allfilters:
2714 printname = name
2719 printname = name
2715 if name is None:
2720 if name is None:
2716 printname = b'unfiltered'
2721 printname = b'unfiltered'
2717 timer(getbranchmap(name), title=str(printname))
2722 timer(getbranchmap(name), title=str(printname))
2718 finally:
2723 finally:
2719 branchcacheread.restore()
2724 branchcacheread.restore()
2720 branchcachewrite.restore()
2725 branchcachewrite.restore()
2721 fm.end()
2726 fm.end()
2722
2727
2723 @command(b'perfbranchmapupdate', [
2728 @command(b'perfbranchmapupdate', [
2724 (b'', b'base', [], b'subset of revision to start from'),
2729 (b'', b'base', [], b'subset of revision to start from'),
2725 (b'', b'target', [], b'subset of revision to end with'),
2730 (b'', b'target', [], b'subset of revision to end with'),
2726 (b'', b'clear-caches', False, b'clear cache between each runs')
2731 (b'', b'clear-caches', False, b'clear cache between each runs')
2727 ] + formatteropts)
2732 ] + formatteropts)
2728 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2733 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2729 """benchmark branchmap update from for <base> revs to <target> revs
2734 """benchmark branchmap update from for <base> revs to <target> revs
2730
2735
2731 If `--clear-caches` is passed, the following items will be reset before
2736 If `--clear-caches` is passed, the following items will be reset before
2732 each update:
2737 each update:
2733 * the changelog instance and associated indexes
2738 * the changelog instance and associated indexes
2734 * the rev-branch-cache instance
2739 * the rev-branch-cache instance
2735
2740
2736 Examples:
2741 Examples:
2737
2742
2738 # update for the one last revision
2743 # update for the one last revision
2739 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2744 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2740
2745
2741 $ update for change coming with a new branch
2746 $ update for change coming with a new branch
2742 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2747 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2743 """
2748 """
2744 from mercurial import branchmap
2749 from mercurial import branchmap
2745 from mercurial import repoview
2750 from mercurial import repoview
2746 opts = _byteskwargs(opts)
2751 opts = _byteskwargs(opts)
2747 timer, fm = gettimer(ui, opts)
2752 timer, fm = gettimer(ui, opts)
2748 clearcaches = opts[b'clear_caches']
2753 clearcaches = opts[b'clear_caches']
2749 unfi = repo.unfiltered()
2754 unfi = repo.unfiltered()
2750 x = [None] # used to pass data between closure
2755 x = [None] # used to pass data between closure
2751
2756
2752 # we use a `list` here to avoid possible side effect from smartset
2757 # we use a `list` here to avoid possible side effect from smartset
2753 baserevs = list(scmutil.revrange(repo, base))
2758 baserevs = list(scmutil.revrange(repo, base))
2754 targetrevs = list(scmutil.revrange(repo, target))
2759 targetrevs = list(scmutil.revrange(repo, target))
2755 if not baserevs:
2760 if not baserevs:
2756 raise error.Abort(b'no revisions selected for --base')
2761 raise error.Abort(b'no revisions selected for --base')
2757 if not targetrevs:
2762 if not targetrevs:
2758 raise error.Abort(b'no revisions selected for --target')
2763 raise error.Abort(b'no revisions selected for --target')
2759
2764
2760 # make sure the target branchmap also contains the one in the base
2765 # make sure the target branchmap also contains the one in the base
2761 targetrevs = list(set(baserevs) | set(targetrevs))
2766 targetrevs = list(set(baserevs) | set(targetrevs))
2762 targetrevs.sort()
2767 targetrevs.sort()
2763
2768
2764 cl = repo.changelog
2769 cl = repo.changelog
2765 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2770 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2766 allbaserevs.sort()
2771 allbaserevs.sort()
2767 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2772 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2768
2773
2769 newrevs = list(alltargetrevs.difference(allbaserevs))
2774 newrevs = list(alltargetrevs.difference(allbaserevs))
2770 newrevs.sort()
2775 newrevs.sort()
2771
2776
2772 allrevs = frozenset(unfi.changelog.revs())
2777 allrevs = frozenset(unfi.changelog.revs())
2773 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2778 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2774 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2779 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2775
2780
2776 def basefilter(repo, visibilityexceptions=None):
2781 def basefilter(repo, visibilityexceptions=None):
2777 return basefilterrevs
2782 return basefilterrevs
2778
2783
2779 def targetfilter(repo, visibilityexceptions=None):
2784 def targetfilter(repo, visibilityexceptions=None):
2780 return targetfilterrevs
2785 return targetfilterrevs
2781
2786
2782 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2787 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2783 ui.status(msg % (len(allbaserevs), len(newrevs)))
2788 ui.status(msg % (len(allbaserevs), len(newrevs)))
2784 if targetfilterrevs:
2789 if targetfilterrevs:
2785 msg = b'(%d revisions still filtered)\n'
2790 msg = b'(%d revisions still filtered)\n'
2786 ui.status(msg % len(targetfilterrevs))
2791 ui.status(msg % len(targetfilterrevs))
2787
2792
2788 try:
2793 try:
2789 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2794 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2790 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2795 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2791
2796
2792 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2797 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2793 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2798 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2794
2799
2795 # try to find an existing branchmap to reuse
2800 # try to find an existing branchmap to reuse
2796 subsettable = getbranchmapsubsettable()
2801 subsettable = getbranchmapsubsettable()
2797 candidatefilter = subsettable.get(None)
2802 candidatefilter = subsettable.get(None)
2798 while candidatefilter is not None:
2803 while candidatefilter is not None:
2799 candidatebm = repo.filtered(candidatefilter).branchmap()
2804 candidatebm = repo.filtered(candidatefilter).branchmap()
2800 if candidatebm.validfor(baserepo):
2805 if candidatebm.validfor(baserepo):
2801 filtered = repoview.filterrevs(repo, candidatefilter)
2806 filtered = repoview.filterrevs(repo, candidatefilter)
2802 missing = [r for r in allbaserevs if r in filtered]
2807 missing = [r for r in allbaserevs if r in filtered]
2803 base = candidatebm.copy()
2808 base = candidatebm.copy()
2804 base.update(baserepo, missing)
2809 base.update(baserepo, missing)
2805 break
2810 break
2806 candidatefilter = subsettable.get(candidatefilter)
2811 candidatefilter = subsettable.get(candidatefilter)
2807 else:
2812 else:
2808 # no suitable subset where found
2813 # no suitable subset where found
2809 base = branchmap.branchcache()
2814 base = branchmap.branchcache()
2810 base.update(baserepo, allbaserevs)
2815 base.update(baserepo, allbaserevs)
2811
2816
2812 def setup():
2817 def setup():
2813 x[0] = base.copy()
2818 x[0] = base.copy()
2814 if clearcaches:
2819 if clearcaches:
2815 unfi._revbranchcache = None
2820 unfi._revbranchcache = None
2816 clearchangelog(repo)
2821 clearchangelog(repo)
2817
2822
2818 def bench():
2823 def bench():
2819 x[0].update(targetrepo, newrevs)
2824 x[0].update(targetrepo, newrevs)
2820
2825
2821 timer(bench, setup=setup)
2826 timer(bench, setup=setup)
2822 fm.end()
2827 fm.end()
2823 finally:
2828 finally:
2824 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2829 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2825 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2830 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2826
2831
2827 @command(b'perfbranchmapload', [
2832 @command(b'perfbranchmapload', [
2828 (b'f', b'filter', b'', b'Specify repoview filter'),
2833 (b'f', b'filter', b'', b'Specify repoview filter'),
2829 (b'', b'list', False, b'List brachmap filter caches'),
2834 (b'', b'list', False, b'List brachmap filter caches'),
2830 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2835 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2831
2836
2832 ] + formatteropts)
2837 ] + formatteropts)
2833 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2838 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2834 """benchmark reading the branchmap"""
2839 """benchmark reading the branchmap"""
2835 opts = _byteskwargs(opts)
2840 opts = _byteskwargs(opts)
2836 clearrevlogs = opts[b'clear_revlogs']
2841 clearrevlogs = opts[b'clear_revlogs']
2837
2842
2838 if list:
2843 if list:
2839 for name, kind, st in repo.cachevfs.readdir(stat=True):
2844 for name, kind, st in repo.cachevfs.readdir(stat=True):
2840 if name.startswith(b'branch2'):
2845 if name.startswith(b'branch2'):
2841 filtername = name.partition(b'-')[2] or b'unfiltered'
2846 filtername = name.partition(b'-')[2] or b'unfiltered'
2842 ui.status(b'%s - %s\n'
2847 ui.status(b'%s - %s\n'
2843 % (filtername, util.bytecount(st.st_size)))
2848 % (filtername, util.bytecount(st.st_size)))
2844 return
2849 return
2845 if not filter:
2850 if not filter:
2846 filter = None
2851 filter = None
2847 subsettable = getbranchmapsubsettable()
2852 subsettable = getbranchmapsubsettable()
2848 if filter is None:
2853 if filter is None:
2849 repo = repo.unfiltered()
2854 repo = repo.unfiltered()
2850 else:
2855 else:
2851 repo = repoview.repoview(repo, filter)
2856 repo = repoview.repoview(repo, filter)
2852
2857
2853 repo.branchmap() # make sure we have a relevant, up to date branchmap
2858 repo.branchmap() # make sure we have a relevant, up to date branchmap
2854
2859
2855 try:
2860 try:
2856 fromfile = branchmap.branchcache.fromfile
2861 fromfile = branchmap.branchcache.fromfile
2857 except AttributeError:
2862 except AttributeError:
2858 # older versions
2863 # older versions
2859 fromfile = branchmap.read
2864 fromfile = branchmap.read
2860
2865
2861 currentfilter = filter
2866 currentfilter = filter
2862 # try once without timer, the filter may not be cached
2867 # try once without timer, the filter may not be cached
2863 while fromfile(repo) is None:
2868 while fromfile(repo) is None:
2864 currentfilter = subsettable.get(currentfilter)
2869 currentfilter = subsettable.get(currentfilter)
2865 if currentfilter is None:
2870 if currentfilter is None:
2866 raise error.Abort(b'No branchmap cached for %s repo'
2871 raise error.Abort(b'No branchmap cached for %s repo'
2867 % (filter or b'unfiltered'))
2872 % (filter or b'unfiltered'))
2868 repo = repo.filtered(currentfilter)
2873 repo = repo.filtered(currentfilter)
2869 timer, fm = gettimer(ui, opts)
2874 timer, fm = gettimer(ui, opts)
2870 def setup():
2875 def setup():
2871 if clearrevlogs:
2876 if clearrevlogs:
2872 clearchangelog(repo)
2877 clearchangelog(repo)
2873 def bench():
2878 def bench():
2874 fromfile(repo)
2879 fromfile(repo)
2875 timer(bench, setup=setup)
2880 timer(bench, setup=setup)
2876 fm.end()
2881 fm.end()
2877
2882
2878 @command(b'perfloadmarkers')
2883 @command(b'perfloadmarkers')
2879 def perfloadmarkers(ui, repo):
2884 def perfloadmarkers(ui, repo):
2880 """benchmark the time to parse the on-disk markers for a repo
2885 """benchmark the time to parse the on-disk markers for a repo
2881
2886
2882 Result is the number of markers in the repo."""
2887 Result is the number of markers in the repo."""
2883 timer, fm = gettimer(ui)
2888 timer, fm = gettimer(ui)
2884 svfs = getsvfs(repo)
2889 svfs = getsvfs(repo)
2885 timer(lambda: len(obsolete.obsstore(svfs)))
2890 timer(lambda: len(obsolete.obsstore(svfs)))
2886 fm.end()
2891 fm.end()
2887
2892
2888 @command(b'perflrucachedict', formatteropts +
2893 @command(b'perflrucachedict', formatteropts +
2889 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2894 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2890 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2895 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2891 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2896 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2892 (b'', b'size', 4, b'size of cache'),
2897 (b'', b'size', 4, b'size of cache'),
2893 (b'', b'gets', 10000, b'number of key lookups'),
2898 (b'', b'gets', 10000, b'number of key lookups'),
2894 (b'', b'sets', 10000, b'number of key sets'),
2899 (b'', b'sets', 10000, b'number of key sets'),
2895 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2900 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2896 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2901 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2897 norepo=True)
2902 norepo=True)
2898 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2903 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2899 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2904 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2900 opts = _byteskwargs(opts)
2905 opts = _byteskwargs(opts)
2901
2906
2902 def doinit():
2907 def doinit():
2903 for i in _xrange(10000):
2908 for i in _xrange(10000):
2904 util.lrucachedict(size)
2909 util.lrucachedict(size)
2905
2910
2906 costrange = list(range(mincost, maxcost + 1))
2911 costrange = list(range(mincost, maxcost + 1))
2907
2912
2908 values = []
2913 values = []
2909 for i in _xrange(size):
2914 for i in _xrange(size):
2910 values.append(random.randint(0, _maxint))
2915 values.append(random.randint(0, _maxint))
2911
2916
2912 # Get mode fills the cache and tests raw lookup performance with no
2917 # Get mode fills the cache and tests raw lookup performance with no
2913 # eviction.
2918 # eviction.
2914 getseq = []
2919 getseq = []
2915 for i in _xrange(gets):
2920 for i in _xrange(gets):
2916 getseq.append(random.choice(values))
2921 getseq.append(random.choice(values))
2917
2922
2918 def dogets():
2923 def dogets():
2919 d = util.lrucachedict(size)
2924 d = util.lrucachedict(size)
2920 for v in values:
2925 for v in values:
2921 d[v] = v
2926 d[v] = v
2922 for key in getseq:
2927 for key in getseq:
2923 value = d[key]
2928 value = d[key]
2924 value # silence pyflakes warning
2929 value # silence pyflakes warning
2925
2930
2926 def dogetscost():
2931 def dogetscost():
2927 d = util.lrucachedict(size, maxcost=costlimit)
2932 d = util.lrucachedict(size, maxcost=costlimit)
2928 for i, v in enumerate(values):
2933 for i, v in enumerate(values):
2929 d.insert(v, v, cost=costs[i])
2934 d.insert(v, v, cost=costs[i])
2930 for key in getseq:
2935 for key in getseq:
2931 try:
2936 try:
2932 value = d[key]
2937 value = d[key]
2933 value # silence pyflakes warning
2938 value # silence pyflakes warning
2934 except KeyError:
2939 except KeyError:
2935 pass
2940 pass
2936
2941
2937 # Set mode tests insertion speed with cache eviction.
2942 # Set mode tests insertion speed with cache eviction.
2938 setseq = []
2943 setseq = []
2939 costs = []
2944 costs = []
2940 for i in _xrange(sets):
2945 for i in _xrange(sets):
2941 setseq.append(random.randint(0, _maxint))
2946 setseq.append(random.randint(0, _maxint))
2942 costs.append(random.choice(costrange))
2947 costs.append(random.choice(costrange))
2943
2948
2944 def doinserts():
2949 def doinserts():
2945 d = util.lrucachedict(size)
2950 d = util.lrucachedict(size)
2946 for v in setseq:
2951 for v in setseq:
2947 d.insert(v, v)
2952 d.insert(v, v)
2948
2953
2949 def doinsertscost():
2954 def doinsertscost():
2950 d = util.lrucachedict(size, maxcost=costlimit)
2955 d = util.lrucachedict(size, maxcost=costlimit)
2951 for i, v in enumerate(setseq):
2956 for i, v in enumerate(setseq):
2952 d.insert(v, v, cost=costs[i])
2957 d.insert(v, v, cost=costs[i])
2953
2958
2954 def dosets():
2959 def dosets():
2955 d = util.lrucachedict(size)
2960 d = util.lrucachedict(size)
2956 for v in setseq:
2961 for v in setseq:
2957 d[v] = v
2962 d[v] = v
2958
2963
2959 # Mixed mode randomly performs gets and sets with eviction.
2964 # Mixed mode randomly performs gets and sets with eviction.
2960 mixedops = []
2965 mixedops = []
2961 for i in _xrange(mixed):
2966 for i in _xrange(mixed):
2962 r = random.randint(0, 100)
2967 r = random.randint(0, 100)
2963 if r < mixedgetfreq:
2968 if r < mixedgetfreq:
2964 op = 0
2969 op = 0
2965 else:
2970 else:
2966 op = 1
2971 op = 1
2967
2972
2968 mixedops.append((op,
2973 mixedops.append((op,
2969 random.randint(0, size * 2),
2974 random.randint(0, size * 2),
2970 random.choice(costrange)))
2975 random.choice(costrange)))
2971
2976
2972 def domixed():
2977 def domixed():
2973 d = util.lrucachedict(size)
2978 d = util.lrucachedict(size)
2974
2979
2975 for op, v, cost in mixedops:
2980 for op, v, cost in mixedops:
2976 if op == 0:
2981 if op == 0:
2977 try:
2982 try:
2978 d[v]
2983 d[v]
2979 except KeyError:
2984 except KeyError:
2980 pass
2985 pass
2981 else:
2986 else:
2982 d[v] = v
2987 d[v] = v
2983
2988
2984 def domixedcost():
2989 def domixedcost():
2985 d = util.lrucachedict(size, maxcost=costlimit)
2990 d = util.lrucachedict(size, maxcost=costlimit)
2986
2991
2987 for op, v, cost in mixedops:
2992 for op, v, cost in mixedops:
2988 if op == 0:
2993 if op == 0:
2989 try:
2994 try:
2990 d[v]
2995 d[v]
2991 except KeyError:
2996 except KeyError:
2992 pass
2997 pass
2993 else:
2998 else:
2994 d.insert(v, v, cost=cost)
2999 d.insert(v, v, cost=cost)
2995
3000
2996 benches = [
3001 benches = [
2997 (doinit, b'init'),
3002 (doinit, b'init'),
2998 ]
3003 ]
2999
3004
3000 if costlimit:
3005 if costlimit:
3001 benches.extend([
3006 benches.extend([
3002 (dogetscost, b'gets w/ cost limit'),
3007 (dogetscost, b'gets w/ cost limit'),
3003 (doinsertscost, b'inserts w/ cost limit'),
3008 (doinsertscost, b'inserts w/ cost limit'),
3004 (domixedcost, b'mixed w/ cost limit'),
3009 (domixedcost, b'mixed w/ cost limit'),
3005 ])
3010 ])
3006 else:
3011 else:
3007 benches.extend([
3012 benches.extend([
3008 (dogets, b'gets'),
3013 (dogets, b'gets'),
3009 (doinserts, b'inserts'),
3014 (doinserts, b'inserts'),
3010 (dosets, b'sets'),
3015 (dosets, b'sets'),
3011 (domixed, b'mixed')
3016 (domixed, b'mixed')
3012 ])
3017 ])
3013
3018
3014 for fn, title in benches:
3019 for fn, title in benches:
3015 timer, fm = gettimer(ui, opts)
3020 timer, fm = gettimer(ui, opts)
3016 timer(fn, title=title)
3021 timer(fn, title=title)
3017 fm.end()
3022 fm.end()
3018
3023
3019 @command(b'perfwrite', formatteropts)
3024 @command(b'perfwrite', formatteropts)
3020 def perfwrite(ui, repo, **opts):
3025 def perfwrite(ui, repo, **opts):
3021 """microbenchmark ui.write
3026 """microbenchmark ui.write
3022 """
3027 """
3023 opts = _byteskwargs(opts)
3028 opts = _byteskwargs(opts)
3024
3029
3025 timer, fm = gettimer(ui, opts)
3030 timer, fm = gettimer(ui, opts)
3026 def write():
3031 def write():
3027 for i in range(100000):
3032 for i in range(100000):
3028 ui.write((b'Testing write performance\n'))
3033 ui.write((b'Testing write performance\n'))
3029 timer(write)
3034 timer(write)
3030 fm.end()
3035 fm.end()
3031
3036
3032 def uisetup(ui):
3037 def uisetup(ui):
3033 if (util.safehasattr(cmdutil, b'openrevlog') and
3038 if (util.safehasattr(cmdutil, b'openrevlog') and
3034 not util.safehasattr(commands, b'debugrevlogopts')):
3039 not util.safehasattr(commands, b'debugrevlogopts')):
3035 # for "historical portability":
3040 # for "historical portability":
3036 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3041 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3037 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3042 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3038 # openrevlog() should cause failure, because it has been
3043 # openrevlog() should cause failure, because it has been
3039 # available since 3.5 (or 49c583ca48c4).
3044 # available since 3.5 (or 49c583ca48c4).
3040 def openrevlog(orig, repo, cmd, file_, opts):
3045 def openrevlog(orig, repo, cmd, file_, opts):
3041 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3046 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3042 raise error.Abort(b"This version doesn't support --dir option",
3047 raise error.Abort(b"This version doesn't support --dir option",
3043 hint=b"use 3.5 or later")
3048 hint=b"use 3.5 or later")
3044 return orig(repo, cmd, file_, opts)
3049 return orig(repo, cmd, file_, opts)
3045 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3050 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3046
3051
3047 @command(b'perfprogress', formatteropts + [
3052 @command(b'perfprogress', formatteropts + [
3048 (b'', b'topic', b'topic', b'topic for progress messages'),
3053 (b'', b'topic', b'topic', b'topic for progress messages'),
3049 (b'c', b'total', 1000000, b'total value we are progressing to'),
3054 (b'c', b'total', 1000000, b'total value we are progressing to'),
3050 ], norepo=True)
3055 ], norepo=True)
3051 def perfprogress(ui, topic=None, total=None, **opts):
3056 def perfprogress(ui, topic=None, total=None, **opts):
3052 """printing of progress bars"""
3057 """printing of progress bars"""
3053 opts = _byteskwargs(opts)
3058 opts = _byteskwargs(opts)
3054
3059
3055 timer, fm = gettimer(ui, opts)
3060 timer, fm = gettimer(ui, opts)
3056
3061
3057 def doprogress():
3062 def doprogress():
3058 with ui.makeprogress(topic, total=total) as progress:
3063 with ui.makeprogress(topic, total=total) as progress:
3059 for i in pycompat.xrange(total):
3064 for i in pycompat.xrange(total):
3060 progress.increment()
3065 progress.increment()
3061
3066
3062 timer(doprogress)
3067 timer(doprogress)
3063 fm.end()
3068 fm.end()
@@ -1,850 +1,851 b''
1 # __init__.py - fsmonitor initialization and overrides
1 # __init__.py - fsmonitor initialization and overrides
2 #
2 #
3 # Copyright 2013-2016 Facebook, Inc.
3 # Copyright 2013-2016 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''Faster status operations with the Watchman file monitor (EXPERIMENTAL)
8 '''Faster status operations with the Watchman file monitor (EXPERIMENTAL)
9
9
10 Integrates the file-watching program Watchman with Mercurial to produce faster
10 Integrates the file-watching program Watchman with Mercurial to produce faster
11 status results.
11 status results.
12
12
13 On a particular Linux system, for a real-world repository with over 400,000
13 On a particular Linux system, for a real-world repository with over 400,000
14 files hosted on ext4, vanilla `hg status` takes 1.3 seconds. On the same
14 files hosted on ext4, vanilla `hg status` takes 1.3 seconds. On the same
15 system, with fsmonitor it takes about 0.3 seconds.
15 system, with fsmonitor it takes about 0.3 seconds.
16
16
17 fsmonitor requires no configuration -- it will tell Watchman about your
17 fsmonitor requires no configuration -- it will tell Watchman about your
18 repository as necessary. You'll need to install Watchman from
18 repository as necessary. You'll need to install Watchman from
19 https://facebook.github.io/watchman/ and make sure it is in your PATH.
19 https://facebook.github.io/watchman/ and make sure it is in your PATH.
20
20
21 fsmonitor is incompatible with the largefiles and eol extensions, and
21 fsmonitor is incompatible with the largefiles and eol extensions, and
22 will disable itself if any of those are active.
22 will disable itself if any of those are active.
23
23
24 The following configuration options exist:
24 The following configuration options exist:
25
25
26 ::
26 ::
27
27
28 [fsmonitor]
28 [fsmonitor]
29 mode = {off, on, paranoid}
29 mode = {off, on, paranoid}
30
30
31 When `mode = off`, fsmonitor will disable itself (similar to not loading the
31 When `mode = off`, fsmonitor will disable itself (similar to not loading the
32 extension at all). When `mode = on`, fsmonitor will be enabled (the default).
32 extension at all). When `mode = on`, fsmonitor will be enabled (the default).
33 When `mode = paranoid`, fsmonitor will query both Watchman and the filesystem,
33 When `mode = paranoid`, fsmonitor will query both Watchman and the filesystem,
34 and ensure that the results are consistent.
34 and ensure that the results are consistent.
35
35
36 ::
36 ::
37
37
38 [fsmonitor]
38 [fsmonitor]
39 timeout = (float)
39 timeout = (float)
40
40
41 A value, in seconds, that determines how long fsmonitor will wait for Watchman
41 A value, in seconds, that determines how long fsmonitor will wait for Watchman
42 to return results. Defaults to `2.0`.
42 to return results. Defaults to `2.0`.
43
43
44 ::
44 ::
45
45
46 [fsmonitor]
46 [fsmonitor]
47 blacklistusers = (list of userids)
47 blacklistusers = (list of userids)
48
48
49 A list of usernames for which fsmonitor will disable itself altogether.
49 A list of usernames for which fsmonitor will disable itself altogether.
50
50
51 ::
51 ::
52
52
53 [fsmonitor]
53 [fsmonitor]
54 walk_on_invalidate = (boolean)
54 walk_on_invalidate = (boolean)
55
55
56 Whether or not to walk the whole repo ourselves when our cached state has been
56 Whether or not to walk the whole repo ourselves when our cached state has been
57 invalidated, for example when Watchman has been restarted or .hgignore rules
57 invalidated, for example when Watchman has been restarted or .hgignore rules
58 have been changed. Walking the repo in that case can result in competing for
58 have been changed. Walking the repo in that case can result in competing for
59 I/O with Watchman. For large repos it is recommended to set this value to
59 I/O with Watchman. For large repos it is recommended to set this value to
60 false. You may wish to set this to true if you have a very fast filesystem
60 false. You may wish to set this to true if you have a very fast filesystem
61 that can outpace the IPC overhead of getting the result data for the full repo
61 that can outpace the IPC overhead of getting the result data for the full repo
62 from Watchman. Defaults to false.
62 from Watchman. Defaults to false.
63
63
64 ::
64 ::
65
65
66 [fsmonitor]
66 [fsmonitor]
67 warn_when_unused = (boolean)
67 warn_when_unused = (boolean)
68
68
69 Whether to print a warning during certain operations when fsmonitor would be
69 Whether to print a warning during certain operations when fsmonitor would be
70 beneficial to performance but isn't enabled.
70 beneficial to performance but isn't enabled.
71
71
72 ::
72 ::
73
73
74 [fsmonitor]
74 [fsmonitor]
75 warn_update_file_count = (integer)
75 warn_update_file_count = (integer)
76
76
77 If ``warn_when_unused`` is set and fsmonitor isn't enabled, a warning will
77 If ``warn_when_unused`` is set and fsmonitor isn't enabled, a warning will
78 be printed during working directory updates if this many files will be
78 be printed during working directory updates if this many files will be
79 created.
79 created.
80 '''
80 '''
81
81
82 # Platforms Supported
82 # Platforms Supported
83 # ===================
83 # ===================
84 #
84 #
85 # **Linux:** *Stable*. Watchman and fsmonitor are both known to work reliably,
85 # **Linux:** *Stable*. Watchman and fsmonitor are both known to work reliably,
86 # even under severe loads.
86 # even under severe loads.
87 #
87 #
88 # **Mac OS X:** *Stable*. The Mercurial test suite passes with fsmonitor
88 # **Mac OS X:** *Stable*. The Mercurial test suite passes with fsmonitor
89 # turned on, on case-insensitive HFS+. There has been a reasonable amount of
89 # turned on, on case-insensitive HFS+. There has been a reasonable amount of
90 # user testing under normal loads.
90 # user testing under normal loads.
91 #
91 #
92 # **Solaris, BSD:** *Alpha*. watchman and fsmonitor are believed to work, but
92 # **Solaris, BSD:** *Alpha*. watchman and fsmonitor are believed to work, but
93 # very little testing has been done.
93 # very little testing has been done.
94 #
94 #
95 # **Windows:** *Alpha*. Not in a release version of watchman or fsmonitor yet.
95 # **Windows:** *Alpha*. Not in a release version of watchman or fsmonitor yet.
96 #
96 #
97 # Known Issues
97 # Known Issues
98 # ============
98 # ============
99 #
99 #
100 # * fsmonitor will disable itself if any of the following extensions are
100 # * fsmonitor will disable itself if any of the following extensions are
101 # enabled: largefiles, inotify, eol; or if the repository has subrepos.
101 # enabled: largefiles, inotify, eol; or if the repository has subrepos.
102 # * fsmonitor will produce incorrect results if nested repos that are not
102 # * fsmonitor will produce incorrect results if nested repos that are not
103 # subrepos exist. *Workaround*: add nested repo paths to your `.hgignore`.
103 # subrepos exist. *Workaround*: add nested repo paths to your `.hgignore`.
104 #
104 #
105 # The issues related to nested repos and subrepos are probably not fundamental
105 # The issues related to nested repos and subrepos are probably not fundamental
106 # ones. Patches to fix them are welcome.
106 # ones. Patches to fix them are welcome.
107
107
108 from __future__ import absolute_import
108 from __future__ import absolute_import
109
109
110 import codecs
110 import codecs
111 import hashlib
111 import hashlib
112 import os
112 import os
113 import stat
113 import stat
114 import sys
114 import sys
115 import tempfile
115 import tempfile
116 import weakref
116 import weakref
117
117
118 from mercurial.i18n import _
118 from mercurial.i18n import _
119 from mercurial.node import (
119 from mercurial.node import (
120 hex,
120 hex,
121 )
121 )
122
122
123 from mercurial import (
123 from mercurial import (
124 context,
124 context,
125 encoding,
125 encoding,
126 error,
126 error,
127 extensions,
127 extensions,
128 localrepo,
128 localrepo,
129 merge,
129 merge,
130 pathutil,
130 pathutil,
131 pycompat,
131 pycompat,
132 registrar,
132 registrar,
133 scmutil,
133 scmutil,
134 util,
134 util,
135 )
135 )
136 from mercurial import match as matchmod
136 from mercurial import match as matchmod
137
137
138 from . import (
138 from . import (
139 pywatchman,
139 pywatchman,
140 state,
140 state,
141 watchmanclient,
141 watchmanclient,
142 )
142 )
143
143
144 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
144 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
145 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
145 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
146 # be specifying the version(s) of Mercurial they are tested with, or
146 # be specifying the version(s) of Mercurial they are tested with, or
147 # leave the attribute unspecified.
147 # leave the attribute unspecified.
148 testedwith = 'ships-with-hg-core'
148 testedwith = 'ships-with-hg-core'
149
149
150 configtable = {}
150 configtable = {}
151 configitem = registrar.configitem(configtable)
151 configitem = registrar.configitem(configtable)
152
152
153 configitem('fsmonitor', 'mode',
153 configitem('fsmonitor', 'mode',
154 default='on',
154 default='on',
155 )
155 )
156 configitem('fsmonitor', 'walk_on_invalidate',
156 configitem('fsmonitor', 'walk_on_invalidate',
157 default=False,
157 default=False,
158 )
158 )
159 configitem('fsmonitor', 'timeout',
159 configitem('fsmonitor', 'timeout',
160 default='2',
160 default='2',
161 )
161 )
162 configitem('fsmonitor', 'blacklistusers',
162 configitem('fsmonitor', 'blacklistusers',
163 default=list,
163 default=list,
164 )
164 )
165 configitem('fsmonitor', 'watchman_exe',
165 configitem('fsmonitor', 'watchman_exe',
166 default='watchman',
166 default='watchman',
167 )
167 )
168 configitem('fsmonitor', 'verbose',
168 configitem('fsmonitor', 'verbose',
169 default=True,
169 default=True,
170 experimental=True,
170 )
171 )
171 configitem('experimental', 'fsmonitor.transaction_notify',
172 configitem('experimental', 'fsmonitor.transaction_notify',
172 default=False,
173 default=False,
173 )
174 )
174
175
175 # This extension is incompatible with the following blacklisted extensions
176 # This extension is incompatible with the following blacklisted extensions
176 # and will disable itself when encountering one of these:
177 # and will disable itself when encountering one of these:
177 _blacklist = ['largefiles', 'eol']
178 _blacklist = ['largefiles', 'eol']
178
179
179 def debuginstall(ui, fm):
180 def debuginstall(ui, fm):
180 fm.write("fsmonitor-watchman",
181 fm.write("fsmonitor-watchman",
181 _("fsmonitor checking for watchman binary... (%s)\n"),
182 _("fsmonitor checking for watchman binary... (%s)\n"),
182 ui.configpath("fsmonitor", "watchman_exe"))
183 ui.configpath("fsmonitor", "watchman_exe"))
183 root = tempfile.mkdtemp()
184 root = tempfile.mkdtemp()
184 c = watchmanclient.client(ui, root)
185 c = watchmanclient.client(ui, root)
185 err = None
186 err = None
186 try:
187 try:
187 v = c.command("version")
188 v = c.command("version")
188 fm.write("fsmonitor-watchman-version",
189 fm.write("fsmonitor-watchman-version",
189 _(" watchman binary version %s\n"), v["version"])
190 _(" watchman binary version %s\n"), v["version"])
190 except watchmanclient.Unavailable as e:
191 except watchmanclient.Unavailable as e:
191 err = str(e)
192 err = str(e)
192 fm.condwrite(err, "fsmonitor-watchman-error",
193 fm.condwrite(err, "fsmonitor-watchman-error",
193 _(" watchman binary missing or broken: %s\n"), err)
194 _(" watchman binary missing or broken: %s\n"), err)
194 return 1 if err else 0
195 return 1 if err else 0
195
196
196 def _handleunavailable(ui, state, ex):
197 def _handleunavailable(ui, state, ex):
197 """Exception handler for Watchman interaction exceptions"""
198 """Exception handler for Watchman interaction exceptions"""
198 if isinstance(ex, watchmanclient.Unavailable):
199 if isinstance(ex, watchmanclient.Unavailable):
199 # experimental config: fsmonitor.verbose
200 # experimental config: fsmonitor.verbose
200 if ex.warn and ui.configbool('fsmonitor', 'verbose'):
201 if ex.warn and ui.configbool('fsmonitor', 'verbose'):
201 if 'illegal_fstypes' not in str(ex):
202 if 'illegal_fstypes' not in str(ex):
202 ui.warn(str(ex) + '\n')
203 ui.warn(str(ex) + '\n')
203 if ex.invalidate:
204 if ex.invalidate:
204 state.invalidate()
205 state.invalidate()
205 # experimental config: fsmonitor.verbose
206 # experimental config: fsmonitor.verbose
206 if ui.configbool('fsmonitor', 'verbose'):
207 if ui.configbool('fsmonitor', 'verbose'):
207 ui.log('fsmonitor', 'Watchman unavailable: %s\n', ex.msg)
208 ui.log('fsmonitor', 'Watchman unavailable: %s\n', ex.msg)
208 else:
209 else:
209 ui.log('fsmonitor', 'Watchman exception: %s\n', ex)
210 ui.log('fsmonitor', 'Watchman exception: %s\n', ex)
210
211
211 def _hashignore(ignore):
212 def _hashignore(ignore):
212 """Calculate hash for ignore patterns and filenames
213 """Calculate hash for ignore patterns and filenames
213
214
214 If this information changes between Mercurial invocations, we can't
215 If this information changes between Mercurial invocations, we can't
215 rely on Watchman information anymore and have to re-scan the working
216 rely on Watchman information anymore and have to re-scan the working
216 copy.
217 copy.
217
218
218 """
219 """
219 sha1 = hashlib.sha1()
220 sha1 = hashlib.sha1()
220 sha1.update(repr(ignore))
221 sha1.update(repr(ignore))
221 return sha1.hexdigest()
222 return sha1.hexdigest()
222
223
223 _watchmanencoding = pywatchman.encoding.get_local_encoding()
224 _watchmanencoding = pywatchman.encoding.get_local_encoding()
224 _fsencoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
225 _fsencoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
225 _fixencoding = codecs.lookup(_watchmanencoding) != codecs.lookup(_fsencoding)
226 _fixencoding = codecs.lookup(_watchmanencoding) != codecs.lookup(_fsencoding)
226
227
227 def _watchmantofsencoding(path):
228 def _watchmantofsencoding(path):
228 """Fix path to match watchman and local filesystem encoding
229 """Fix path to match watchman and local filesystem encoding
229
230
230 watchman's paths encoding can differ from filesystem encoding. For example,
231 watchman's paths encoding can differ from filesystem encoding. For example,
231 on Windows, it's always utf-8.
232 on Windows, it's always utf-8.
232 """
233 """
233 try:
234 try:
234 decoded = path.decode(_watchmanencoding)
235 decoded = path.decode(_watchmanencoding)
235 except UnicodeDecodeError as e:
236 except UnicodeDecodeError as e:
236 raise error.Abort(str(e), hint='watchman encoding error')
237 raise error.Abort(str(e), hint='watchman encoding error')
237
238
238 try:
239 try:
239 encoded = decoded.encode(_fsencoding, 'strict')
240 encoded = decoded.encode(_fsencoding, 'strict')
240 except UnicodeEncodeError as e:
241 except UnicodeEncodeError as e:
241 raise error.Abort(str(e))
242 raise error.Abort(str(e))
242
243
243 return encoded
244 return encoded
244
245
245 def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True):
246 def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True):
246 '''Replacement for dirstate.walk, hooking into Watchman.
247 '''Replacement for dirstate.walk, hooking into Watchman.
247
248
248 Whenever full is False, ignored is False, and the Watchman client is
249 Whenever full is False, ignored is False, and the Watchman client is
249 available, use Watchman combined with saved state to possibly return only a
250 available, use Watchman combined with saved state to possibly return only a
250 subset of files.'''
251 subset of files.'''
251 def bail(reason):
252 def bail(reason):
252 self._ui.debug('fsmonitor: fallback to core status, %s\n' % reason)
253 self._ui.debug('fsmonitor: fallback to core status, %s\n' % reason)
253 return orig(match, subrepos, unknown, ignored, full=True)
254 return orig(match, subrepos, unknown, ignored, full=True)
254
255
255 if full:
256 if full:
256 return bail('full rewalk requested')
257 return bail('full rewalk requested')
257 if ignored:
258 if ignored:
258 return bail('listing ignored files')
259 return bail('listing ignored files')
259 if not self._watchmanclient.available():
260 if not self._watchmanclient.available():
260 return bail('client unavailable')
261 return bail('client unavailable')
261 state = self._fsmonitorstate
262 state = self._fsmonitorstate
262 clock, ignorehash, notefiles = state.get()
263 clock, ignorehash, notefiles = state.get()
263 if not clock:
264 if not clock:
264 if state.walk_on_invalidate:
265 if state.walk_on_invalidate:
265 return bail('no clock')
266 return bail('no clock')
266 # Initial NULL clock value, see
267 # Initial NULL clock value, see
267 # https://facebook.github.io/watchman/docs/clockspec.html
268 # https://facebook.github.io/watchman/docs/clockspec.html
268 clock = 'c:0:0'
269 clock = 'c:0:0'
269 notefiles = []
270 notefiles = []
270
271
271 ignore = self._ignore
272 ignore = self._ignore
272 dirignore = self._dirignore
273 dirignore = self._dirignore
273 if unknown:
274 if unknown:
274 if _hashignore(ignore) != ignorehash and clock != 'c:0:0':
275 if _hashignore(ignore) != ignorehash and clock != 'c:0:0':
275 # ignore list changed -- can't rely on Watchman state any more
276 # ignore list changed -- can't rely on Watchman state any more
276 if state.walk_on_invalidate:
277 if state.walk_on_invalidate:
277 return bail('ignore rules changed')
278 return bail('ignore rules changed')
278 notefiles = []
279 notefiles = []
279 clock = 'c:0:0'
280 clock = 'c:0:0'
280 else:
281 else:
281 # always ignore
282 # always ignore
282 ignore = util.always
283 ignore = util.always
283 dirignore = util.always
284 dirignore = util.always
284
285
285 matchfn = match.matchfn
286 matchfn = match.matchfn
286 matchalways = match.always()
287 matchalways = match.always()
287 dmap = self._map
288 dmap = self._map
288 if util.safehasattr(dmap, '_map'):
289 if util.safehasattr(dmap, '_map'):
289 # for better performance, directly access the inner dirstate map if the
290 # for better performance, directly access the inner dirstate map if the
290 # standard dirstate implementation is in use.
291 # standard dirstate implementation is in use.
291 dmap = dmap._map
292 dmap = dmap._map
292 nonnormalset = self._map.nonnormalset
293 nonnormalset = self._map.nonnormalset
293
294
294 copymap = self._map.copymap
295 copymap = self._map.copymap
295 getkind = stat.S_IFMT
296 getkind = stat.S_IFMT
296 dirkind = stat.S_IFDIR
297 dirkind = stat.S_IFDIR
297 regkind = stat.S_IFREG
298 regkind = stat.S_IFREG
298 lnkkind = stat.S_IFLNK
299 lnkkind = stat.S_IFLNK
299 join = self._join
300 join = self._join
300 normcase = util.normcase
301 normcase = util.normcase
301 fresh_instance = False
302 fresh_instance = False
302
303
303 exact = skipstep3 = False
304 exact = skipstep3 = False
304 if match.isexact(): # match.exact
305 if match.isexact(): # match.exact
305 exact = True
306 exact = True
306 dirignore = util.always # skip step 2
307 dirignore = util.always # skip step 2
307 elif match.prefix(): # match.match, no patterns
308 elif match.prefix(): # match.match, no patterns
308 skipstep3 = True
309 skipstep3 = True
309
310
310 if not exact and self._checkcase:
311 if not exact and self._checkcase:
311 # note that even though we could receive directory entries, we're only
312 # note that even though we could receive directory entries, we're only
312 # interested in checking if a file with the same name exists. So only
313 # interested in checking if a file with the same name exists. So only
313 # normalize files if possible.
314 # normalize files if possible.
314 normalize = self._normalizefile
315 normalize = self._normalizefile
315 skipstep3 = False
316 skipstep3 = False
316 else:
317 else:
317 normalize = None
318 normalize = None
318
319
319 # step 1: find all explicit files
320 # step 1: find all explicit files
320 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
321 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
321
322
322 skipstep3 = skipstep3 and not (work or dirsnotfound)
323 skipstep3 = skipstep3 and not (work or dirsnotfound)
323 work = [d for d in work if not dirignore(d[0])]
324 work = [d for d in work if not dirignore(d[0])]
324
325
325 if not work and (exact or skipstep3):
326 if not work and (exact or skipstep3):
326 for s in subrepos:
327 for s in subrepos:
327 del results[s]
328 del results[s]
328 del results['.hg']
329 del results['.hg']
329 return results
330 return results
330
331
331 # step 2: query Watchman
332 # step 2: query Watchman
332 try:
333 try:
333 # Use the user-configured timeout for the query.
334 # Use the user-configured timeout for the query.
334 # Add a little slack over the top of the user query to allow for
335 # Add a little slack over the top of the user query to allow for
335 # overheads while transferring the data
336 # overheads while transferring the data
336 self._watchmanclient.settimeout(state.timeout + 0.1)
337 self._watchmanclient.settimeout(state.timeout + 0.1)
337 result = self._watchmanclient.command('query', {
338 result = self._watchmanclient.command('query', {
338 'fields': ['mode', 'mtime', 'size', 'exists', 'name'],
339 'fields': ['mode', 'mtime', 'size', 'exists', 'name'],
339 'since': clock,
340 'since': clock,
340 'expression': [
341 'expression': [
341 'not', [
342 'not', [
342 'anyof', ['dirname', '.hg'],
343 'anyof', ['dirname', '.hg'],
343 ['name', '.hg', 'wholename']
344 ['name', '.hg', 'wholename']
344 ]
345 ]
345 ],
346 ],
346 'sync_timeout': int(state.timeout * 1000),
347 'sync_timeout': int(state.timeout * 1000),
347 'empty_on_fresh_instance': state.walk_on_invalidate,
348 'empty_on_fresh_instance': state.walk_on_invalidate,
348 })
349 })
349 except Exception as ex:
350 except Exception as ex:
350 _handleunavailable(self._ui, state, ex)
351 _handleunavailable(self._ui, state, ex)
351 self._watchmanclient.clearconnection()
352 self._watchmanclient.clearconnection()
352 return bail('exception during run')
353 return bail('exception during run')
353 else:
354 else:
354 # We need to propagate the last observed clock up so that we
355 # We need to propagate the last observed clock up so that we
355 # can use it for our next query
356 # can use it for our next query
356 state.setlastclock(result['clock'])
357 state.setlastclock(result['clock'])
357 if result['is_fresh_instance']:
358 if result['is_fresh_instance']:
358 if state.walk_on_invalidate:
359 if state.walk_on_invalidate:
359 state.invalidate()
360 state.invalidate()
360 return bail('fresh instance')
361 return bail('fresh instance')
361 fresh_instance = True
362 fresh_instance = True
362 # Ignore any prior noteable files from the state info
363 # Ignore any prior noteable files from the state info
363 notefiles = []
364 notefiles = []
364
365
365 # for file paths which require normalization and we encounter a case
366 # for file paths which require normalization and we encounter a case
366 # collision, we store our own foldmap
367 # collision, we store our own foldmap
367 if normalize:
368 if normalize:
368 foldmap = dict((normcase(k), k) for k in results)
369 foldmap = dict((normcase(k), k) for k in results)
369
370
370 switch_slashes = pycompat.ossep == '\\'
371 switch_slashes = pycompat.ossep == '\\'
371 # The order of the results is, strictly speaking, undefined.
372 # The order of the results is, strictly speaking, undefined.
372 # For case changes on a case insensitive filesystem we may receive
373 # For case changes on a case insensitive filesystem we may receive
373 # two entries, one with exists=True and another with exists=False.
374 # two entries, one with exists=True and another with exists=False.
374 # The exists=True entries in the same response should be interpreted
375 # The exists=True entries in the same response should be interpreted
375 # as being happens-after the exists=False entries due to the way that
376 # as being happens-after the exists=False entries due to the way that
376 # Watchman tracks files. We use this property to reconcile deletes
377 # Watchman tracks files. We use this property to reconcile deletes
377 # for name case changes.
378 # for name case changes.
378 for entry in result['files']:
379 for entry in result['files']:
379 fname = entry['name']
380 fname = entry['name']
380 if _fixencoding:
381 if _fixencoding:
381 fname = _watchmantofsencoding(fname)
382 fname = _watchmantofsencoding(fname)
382 if switch_slashes:
383 if switch_slashes:
383 fname = fname.replace('\\', '/')
384 fname = fname.replace('\\', '/')
384 if normalize:
385 if normalize:
385 normed = normcase(fname)
386 normed = normcase(fname)
386 fname = normalize(fname, True, True)
387 fname = normalize(fname, True, True)
387 foldmap[normed] = fname
388 foldmap[normed] = fname
388 fmode = entry['mode']
389 fmode = entry['mode']
389 fexists = entry['exists']
390 fexists = entry['exists']
390 kind = getkind(fmode)
391 kind = getkind(fmode)
391
392
392 if '/.hg/' in fname or fname.endswith('/.hg'):
393 if '/.hg/' in fname or fname.endswith('/.hg'):
393 return bail('nested-repo-detected')
394 return bail('nested-repo-detected')
394
395
395 if not fexists:
396 if not fexists:
396 # if marked as deleted and we don't already have a change
397 # if marked as deleted and we don't already have a change
397 # record, mark it as deleted. If we already have an entry
398 # record, mark it as deleted. If we already have an entry
398 # for fname then it was either part of walkexplicit or was
399 # for fname then it was either part of walkexplicit or was
399 # an earlier result that was a case change
400 # an earlier result that was a case change
400 if fname not in results and fname in dmap and (
401 if fname not in results and fname in dmap and (
401 matchalways or matchfn(fname)):
402 matchalways or matchfn(fname)):
402 results[fname] = None
403 results[fname] = None
403 elif kind == dirkind:
404 elif kind == dirkind:
404 if fname in dmap and (matchalways or matchfn(fname)):
405 if fname in dmap and (matchalways or matchfn(fname)):
405 results[fname] = None
406 results[fname] = None
406 elif kind == regkind or kind == lnkkind:
407 elif kind == regkind or kind == lnkkind:
407 if fname in dmap:
408 if fname in dmap:
408 if matchalways or matchfn(fname):
409 if matchalways or matchfn(fname):
409 results[fname] = entry
410 results[fname] = entry
410 elif (matchalways or matchfn(fname)) and not ignore(fname):
411 elif (matchalways or matchfn(fname)) and not ignore(fname):
411 results[fname] = entry
412 results[fname] = entry
412 elif fname in dmap and (matchalways or matchfn(fname)):
413 elif fname in dmap and (matchalways or matchfn(fname)):
413 results[fname] = None
414 results[fname] = None
414
415
415 # step 3: query notable files we don't already know about
416 # step 3: query notable files we don't already know about
416 # XXX try not to iterate over the entire dmap
417 # XXX try not to iterate over the entire dmap
417 if normalize:
418 if normalize:
418 # any notable files that have changed case will already be handled
419 # any notable files that have changed case will already be handled
419 # above, so just check membership in the foldmap
420 # above, so just check membership in the foldmap
420 notefiles = set((normalize(f, True, True) for f in notefiles
421 notefiles = set((normalize(f, True, True) for f in notefiles
421 if normcase(f) not in foldmap))
422 if normcase(f) not in foldmap))
422 visit = set((f for f in notefiles if (f not in results and matchfn(f)
423 visit = set((f for f in notefiles if (f not in results and matchfn(f)
423 and (f in dmap or not ignore(f)))))
424 and (f in dmap or not ignore(f)))))
424
425
425 if not fresh_instance:
426 if not fresh_instance:
426 if matchalways:
427 if matchalways:
427 visit.update(f for f in nonnormalset if f not in results)
428 visit.update(f for f in nonnormalset if f not in results)
428 visit.update(f for f in copymap if f not in results)
429 visit.update(f for f in copymap if f not in results)
429 else:
430 else:
430 visit.update(f for f in nonnormalset
431 visit.update(f for f in nonnormalset
431 if f not in results and matchfn(f))
432 if f not in results and matchfn(f))
432 visit.update(f for f in copymap
433 visit.update(f for f in copymap
433 if f not in results and matchfn(f))
434 if f not in results and matchfn(f))
434 else:
435 else:
435 if matchalways:
436 if matchalways:
436 visit.update(f for f, st in dmap.iteritems() if f not in results)
437 visit.update(f for f, st in dmap.iteritems() if f not in results)
437 visit.update(f for f in copymap if f not in results)
438 visit.update(f for f in copymap if f not in results)
438 else:
439 else:
439 visit.update(f for f, st in dmap.iteritems()
440 visit.update(f for f, st in dmap.iteritems()
440 if f not in results and matchfn(f))
441 if f not in results and matchfn(f))
441 visit.update(f for f in copymap
442 visit.update(f for f in copymap
442 if f not in results and matchfn(f))
443 if f not in results and matchfn(f))
443
444
444 audit = pathutil.pathauditor(self._root, cached=True).check
445 audit = pathutil.pathauditor(self._root, cached=True).check
445 auditpass = [f for f in visit if audit(f)]
446 auditpass = [f for f in visit if audit(f)]
446 auditpass.sort()
447 auditpass.sort()
447 auditfail = visit.difference(auditpass)
448 auditfail = visit.difference(auditpass)
448 for f in auditfail:
449 for f in auditfail:
449 results[f] = None
450 results[f] = None
450
451
451 nf = iter(auditpass).next
452 nf = iter(auditpass).next
452 for st in util.statfiles([join(f) for f in auditpass]):
453 for st in util.statfiles([join(f) for f in auditpass]):
453 f = nf()
454 f = nf()
454 if st or f in dmap:
455 if st or f in dmap:
455 results[f] = st
456 results[f] = st
456
457
457 for s in subrepos:
458 for s in subrepos:
458 del results[s]
459 del results[s]
459 del results['.hg']
460 del results['.hg']
460 return results
461 return results
461
462
462 def overridestatus(
463 def overridestatus(
463 orig, self, node1='.', node2=None, match=None, ignored=False,
464 orig, self, node1='.', node2=None, match=None, ignored=False,
464 clean=False, unknown=False, listsubrepos=False):
465 clean=False, unknown=False, listsubrepos=False):
465 listignored = ignored
466 listignored = ignored
466 listclean = clean
467 listclean = clean
467 listunknown = unknown
468 listunknown = unknown
468
469
469 def _cmpsets(l1, l2):
470 def _cmpsets(l1, l2):
470 try:
471 try:
471 if 'FSMONITOR_LOG_FILE' in encoding.environ:
472 if 'FSMONITOR_LOG_FILE' in encoding.environ:
472 fn = encoding.environ['FSMONITOR_LOG_FILE']
473 fn = encoding.environ['FSMONITOR_LOG_FILE']
473 f = open(fn, 'wb')
474 f = open(fn, 'wb')
474 else:
475 else:
475 fn = 'fsmonitorfail.log'
476 fn = 'fsmonitorfail.log'
476 f = self.vfs.open(fn, 'wb')
477 f = self.vfs.open(fn, 'wb')
477 except (IOError, OSError):
478 except (IOError, OSError):
478 self.ui.warn(_('warning: unable to write to %s\n') % fn)
479 self.ui.warn(_('warning: unable to write to %s\n') % fn)
479 return
480 return
480
481
481 try:
482 try:
482 for i, (s1, s2) in enumerate(zip(l1, l2)):
483 for i, (s1, s2) in enumerate(zip(l1, l2)):
483 if set(s1) != set(s2):
484 if set(s1) != set(s2):
484 f.write('sets at position %d are unequal\n' % i)
485 f.write('sets at position %d are unequal\n' % i)
485 f.write('watchman returned: %s\n' % s1)
486 f.write('watchman returned: %s\n' % s1)
486 f.write('stat returned: %s\n' % s2)
487 f.write('stat returned: %s\n' % s2)
487 finally:
488 finally:
488 f.close()
489 f.close()
489
490
490 if isinstance(node1, context.changectx):
491 if isinstance(node1, context.changectx):
491 ctx1 = node1
492 ctx1 = node1
492 else:
493 else:
493 ctx1 = self[node1]
494 ctx1 = self[node1]
494 if isinstance(node2, context.changectx):
495 if isinstance(node2, context.changectx):
495 ctx2 = node2
496 ctx2 = node2
496 else:
497 else:
497 ctx2 = self[node2]
498 ctx2 = self[node2]
498
499
499 working = ctx2.rev() is None
500 working = ctx2.rev() is None
500 parentworking = working and ctx1 == self['.']
501 parentworking = working and ctx1 == self['.']
501 match = match or matchmod.always()
502 match = match or matchmod.always()
502
503
503 # Maybe we can use this opportunity to update Watchman's state.
504 # Maybe we can use this opportunity to update Watchman's state.
504 # Mercurial uses workingcommitctx and/or memctx to represent the part of
505 # Mercurial uses workingcommitctx and/or memctx to represent the part of
505 # the workingctx that is to be committed. So don't update the state in
506 # the workingctx that is to be committed. So don't update the state in
506 # that case.
507 # that case.
507 # HG_PENDING is set in the environment when the dirstate is being updated
508 # HG_PENDING is set in the environment when the dirstate is being updated
508 # in the middle of a transaction; we must not update our state in that
509 # in the middle of a transaction; we must not update our state in that
509 # case, or we risk forgetting about changes in the working copy.
510 # case, or we risk forgetting about changes in the working copy.
510 updatestate = (parentworking and match.always() and
511 updatestate = (parentworking and match.always() and
511 not isinstance(ctx2, (context.workingcommitctx,
512 not isinstance(ctx2, (context.workingcommitctx,
512 context.memctx)) and
513 context.memctx)) and
513 'HG_PENDING' not in encoding.environ)
514 'HG_PENDING' not in encoding.environ)
514
515
515 try:
516 try:
516 if self._fsmonitorstate.walk_on_invalidate:
517 if self._fsmonitorstate.walk_on_invalidate:
517 # Use a short timeout to query the current clock. If that
518 # Use a short timeout to query the current clock. If that
518 # takes too long then we assume that the service will be slow
519 # takes too long then we assume that the service will be slow
519 # to answer our query.
520 # to answer our query.
520 # walk_on_invalidate indicates that we prefer to walk the
521 # walk_on_invalidate indicates that we prefer to walk the
521 # tree ourselves because we can ignore portions that Watchman
522 # tree ourselves because we can ignore portions that Watchman
522 # cannot and we tend to be faster in the warmer buffer cache
523 # cannot and we tend to be faster in the warmer buffer cache
523 # cases.
524 # cases.
524 self._watchmanclient.settimeout(0.1)
525 self._watchmanclient.settimeout(0.1)
525 else:
526 else:
526 # Give Watchman more time to potentially complete its walk
527 # Give Watchman more time to potentially complete its walk
527 # and return the initial clock. In this mode we assume that
528 # and return the initial clock. In this mode we assume that
528 # the filesystem will be slower than parsing a potentially
529 # the filesystem will be slower than parsing a potentially
529 # very large Watchman result set.
530 # very large Watchman result set.
530 self._watchmanclient.settimeout(
531 self._watchmanclient.settimeout(
531 self._fsmonitorstate.timeout + 0.1)
532 self._fsmonitorstate.timeout + 0.1)
532 startclock = self._watchmanclient.getcurrentclock()
533 startclock = self._watchmanclient.getcurrentclock()
533 except Exception as ex:
534 except Exception as ex:
534 self._watchmanclient.clearconnection()
535 self._watchmanclient.clearconnection()
535 _handleunavailable(self.ui, self._fsmonitorstate, ex)
536 _handleunavailable(self.ui, self._fsmonitorstate, ex)
536 # boo, Watchman failed. bail
537 # boo, Watchman failed. bail
537 return orig(node1, node2, match, listignored, listclean,
538 return orig(node1, node2, match, listignored, listclean,
538 listunknown, listsubrepos)
539 listunknown, listsubrepos)
539
540
540 if updatestate:
541 if updatestate:
541 # We need info about unknown files. This may make things slower the
542 # We need info about unknown files. This may make things slower the
542 # first time, but whatever.
543 # first time, but whatever.
543 stateunknown = True
544 stateunknown = True
544 else:
545 else:
545 stateunknown = listunknown
546 stateunknown = listunknown
546
547
547 if updatestate:
548 if updatestate:
548 ps = poststatus(startclock)
549 ps = poststatus(startclock)
549 self.addpostdsstatus(ps)
550 self.addpostdsstatus(ps)
550
551
551 r = orig(node1, node2, match, listignored, listclean, stateunknown,
552 r = orig(node1, node2, match, listignored, listclean, stateunknown,
552 listsubrepos)
553 listsubrepos)
553 modified, added, removed, deleted, unknown, ignored, clean = r
554 modified, added, removed, deleted, unknown, ignored, clean = r
554
555
555 if not listunknown:
556 if not listunknown:
556 unknown = []
557 unknown = []
557
558
558 # don't do paranoid checks if we're not going to query Watchman anyway
559 # don't do paranoid checks if we're not going to query Watchman anyway
559 full = listclean or match.traversedir is not None
560 full = listclean or match.traversedir is not None
560 if self._fsmonitorstate.mode == 'paranoid' and not full:
561 if self._fsmonitorstate.mode == 'paranoid' and not full:
561 # run status again and fall back to the old walk this time
562 # run status again and fall back to the old walk this time
562 self.dirstate._fsmonitordisable = True
563 self.dirstate._fsmonitordisable = True
563
564
564 # shut the UI up
565 # shut the UI up
565 quiet = self.ui.quiet
566 quiet = self.ui.quiet
566 self.ui.quiet = True
567 self.ui.quiet = True
567 fout, ferr = self.ui.fout, self.ui.ferr
568 fout, ferr = self.ui.fout, self.ui.ferr
568 self.ui.fout = self.ui.ferr = open(os.devnull, 'wb')
569 self.ui.fout = self.ui.ferr = open(os.devnull, 'wb')
569
570
570 try:
571 try:
571 rv2 = orig(
572 rv2 = orig(
572 node1, node2, match, listignored, listclean, listunknown,
573 node1, node2, match, listignored, listclean, listunknown,
573 listsubrepos)
574 listsubrepos)
574 finally:
575 finally:
575 self.dirstate._fsmonitordisable = False
576 self.dirstate._fsmonitordisable = False
576 self.ui.quiet = quiet
577 self.ui.quiet = quiet
577 self.ui.fout, self.ui.ferr = fout, ferr
578 self.ui.fout, self.ui.ferr = fout, ferr
578
579
579 # clean isn't tested since it's set to True above
580 # clean isn't tested since it's set to True above
580 with self.wlock():
581 with self.wlock():
581 _cmpsets(
582 _cmpsets(
582 [modified, added, removed, deleted, unknown, ignored, clean],
583 [modified, added, removed, deleted, unknown, ignored, clean],
583 rv2)
584 rv2)
584 modified, added, removed, deleted, unknown, ignored, clean = rv2
585 modified, added, removed, deleted, unknown, ignored, clean = rv2
585
586
586 return scmutil.status(
587 return scmutil.status(
587 modified, added, removed, deleted, unknown, ignored, clean)
588 modified, added, removed, deleted, unknown, ignored, clean)
588
589
589 class poststatus(object):
590 class poststatus(object):
590 def __init__(self, startclock):
591 def __init__(self, startclock):
591 self._startclock = startclock
592 self._startclock = startclock
592
593
593 def __call__(self, wctx, status):
594 def __call__(self, wctx, status):
594 clock = wctx.repo()._fsmonitorstate.getlastclock() or self._startclock
595 clock = wctx.repo()._fsmonitorstate.getlastclock() or self._startclock
595 hashignore = _hashignore(wctx.repo().dirstate._ignore)
596 hashignore = _hashignore(wctx.repo().dirstate._ignore)
596 notefiles = (status.modified + status.added + status.removed +
597 notefiles = (status.modified + status.added + status.removed +
597 status.deleted + status.unknown)
598 status.deleted + status.unknown)
598 wctx.repo()._fsmonitorstate.set(clock, hashignore, notefiles)
599 wctx.repo()._fsmonitorstate.set(clock, hashignore, notefiles)
599
600
600 def makedirstate(repo, dirstate):
601 def makedirstate(repo, dirstate):
601 class fsmonitordirstate(dirstate.__class__):
602 class fsmonitordirstate(dirstate.__class__):
602 def _fsmonitorinit(self, repo):
603 def _fsmonitorinit(self, repo):
603 # _fsmonitordisable is used in paranoid mode
604 # _fsmonitordisable is used in paranoid mode
604 self._fsmonitordisable = False
605 self._fsmonitordisable = False
605 self._fsmonitorstate = repo._fsmonitorstate
606 self._fsmonitorstate = repo._fsmonitorstate
606 self._watchmanclient = repo._watchmanclient
607 self._watchmanclient = repo._watchmanclient
607 self._repo = weakref.proxy(repo)
608 self._repo = weakref.proxy(repo)
608
609
609 def walk(self, *args, **kwargs):
610 def walk(self, *args, **kwargs):
610 orig = super(fsmonitordirstate, self).walk
611 orig = super(fsmonitordirstate, self).walk
611 if self._fsmonitordisable:
612 if self._fsmonitordisable:
612 return orig(*args, **kwargs)
613 return orig(*args, **kwargs)
613 return overridewalk(orig, self, *args, **kwargs)
614 return overridewalk(orig, self, *args, **kwargs)
614
615
615 def rebuild(self, *args, **kwargs):
616 def rebuild(self, *args, **kwargs):
616 self._fsmonitorstate.invalidate()
617 self._fsmonitorstate.invalidate()
617 return super(fsmonitordirstate, self).rebuild(*args, **kwargs)
618 return super(fsmonitordirstate, self).rebuild(*args, **kwargs)
618
619
619 def invalidate(self, *args, **kwargs):
620 def invalidate(self, *args, **kwargs):
620 self._fsmonitorstate.invalidate()
621 self._fsmonitorstate.invalidate()
621 return super(fsmonitordirstate, self).invalidate(*args, **kwargs)
622 return super(fsmonitordirstate, self).invalidate(*args, **kwargs)
622
623
623 dirstate.__class__ = fsmonitordirstate
624 dirstate.__class__ = fsmonitordirstate
624 dirstate._fsmonitorinit(repo)
625 dirstate._fsmonitorinit(repo)
625
626
626 def wrapdirstate(orig, self):
627 def wrapdirstate(orig, self):
627 ds = orig(self)
628 ds = orig(self)
628 # only override the dirstate when Watchman is available for the repo
629 # only override the dirstate when Watchman is available for the repo
629 if util.safehasattr(self, '_fsmonitorstate'):
630 if util.safehasattr(self, '_fsmonitorstate'):
630 makedirstate(self, ds)
631 makedirstate(self, ds)
631 return ds
632 return ds
632
633
633 def extsetup(ui):
634 def extsetup(ui):
634 extensions.wrapfilecache(
635 extensions.wrapfilecache(
635 localrepo.localrepository, 'dirstate', wrapdirstate)
636 localrepo.localrepository, 'dirstate', wrapdirstate)
636 if pycompat.isdarwin:
637 if pycompat.isdarwin:
637 # An assist for avoiding the dangling-symlink fsevents bug
638 # An assist for avoiding the dangling-symlink fsevents bug
638 extensions.wrapfunction(os, 'symlink', wrapsymlink)
639 extensions.wrapfunction(os, 'symlink', wrapsymlink)
639
640
640 extensions.wrapfunction(merge, 'update', wrapupdate)
641 extensions.wrapfunction(merge, 'update', wrapupdate)
641
642
642 def wrapsymlink(orig, source, link_name):
643 def wrapsymlink(orig, source, link_name):
643 ''' if we create a dangling symlink, also touch the parent dir
644 ''' if we create a dangling symlink, also touch the parent dir
644 to encourage fsevents notifications to work more correctly '''
645 to encourage fsevents notifications to work more correctly '''
645 try:
646 try:
646 return orig(source, link_name)
647 return orig(source, link_name)
647 finally:
648 finally:
648 try:
649 try:
649 os.utime(os.path.dirname(link_name), None)
650 os.utime(os.path.dirname(link_name), None)
650 except OSError:
651 except OSError:
651 pass
652 pass
652
653
653 class state_update(object):
654 class state_update(object):
654 ''' This context manager is responsible for dispatching the state-enter
655 ''' This context manager is responsible for dispatching the state-enter
655 and state-leave signals to the watchman service. The enter and leave
656 and state-leave signals to the watchman service. The enter and leave
656 methods can be invoked manually (for scenarios where context manager
657 methods can be invoked manually (for scenarios where context manager
657 semantics are not possible). If parameters oldnode and newnode are None,
658 semantics are not possible). If parameters oldnode and newnode are None,
658 they will be populated based on current working copy in enter and
659 they will be populated based on current working copy in enter and
659 leave, respectively. Similarly, if the distance is none, it will be
660 leave, respectively. Similarly, if the distance is none, it will be
660 calculated based on the oldnode and newnode in the leave method.'''
661 calculated based on the oldnode and newnode in the leave method.'''
661
662
662 def __init__(self, repo, name, oldnode=None, newnode=None, distance=None,
663 def __init__(self, repo, name, oldnode=None, newnode=None, distance=None,
663 partial=False):
664 partial=False):
664 self.repo = repo.unfiltered()
665 self.repo = repo.unfiltered()
665 self.name = name
666 self.name = name
666 self.oldnode = oldnode
667 self.oldnode = oldnode
667 self.newnode = newnode
668 self.newnode = newnode
668 self.distance = distance
669 self.distance = distance
669 self.partial = partial
670 self.partial = partial
670 self._lock = None
671 self._lock = None
671 self.need_leave = False
672 self.need_leave = False
672
673
673 def __enter__(self):
674 def __enter__(self):
674 self.enter()
675 self.enter()
675
676
676 def enter(self):
677 def enter(self):
677 # Make sure we have a wlock prior to sending notifications to watchman.
678 # Make sure we have a wlock prior to sending notifications to watchman.
678 # We don't want to race with other actors. In the update case,
679 # We don't want to race with other actors. In the update case,
679 # merge.update is going to take the wlock almost immediately. We are
680 # merge.update is going to take the wlock almost immediately. We are
680 # effectively extending the lock around several short sanity checks.
681 # effectively extending the lock around several short sanity checks.
681 if self.oldnode is None:
682 if self.oldnode is None:
682 self.oldnode = self.repo['.'].node()
683 self.oldnode = self.repo['.'].node()
683
684
684 if self.repo.currentwlock() is None:
685 if self.repo.currentwlock() is None:
685 if util.safehasattr(self.repo, 'wlocknostateupdate'):
686 if util.safehasattr(self.repo, 'wlocknostateupdate'):
686 self._lock = self.repo.wlocknostateupdate()
687 self._lock = self.repo.wlocknostateupdate()
687 else:
688 else:
688 self._lock = self.repo.wlock()
689 self._lock = self.repo.wlock()
689 self.need_leave = self._state(
690 self.need_leave = self._state(
690 'state-enter',
691 'state-enter',
691 hex(self.oldnode))
692 hex(self.oldnode))
692 return self
693 return self
693
694
694 def __exit__(self, type_, value, tb):
695 def __exit__(self, type_, value, tb):
695 abort = True if type_ else False
696 abort = True if type_ else False
696 self.exit(abort=abort)
697 self.exit(abort=abort)
697
698
698 def exit(self, abort=False):
699 def exit(self, abort=False):
699 try:
700 try:
700 if self.need_leave:
701 if self.need_leave:
701 status = 'failed' if abort else 'ok'
702 status = 'failed' if abort else 'ok'
702 if self.newnode is None:
703 if self.newnode is None:
703 self.newnode = self.repo['.'].node()
704 self.newnode = self.repo['.'].node()
704 if self.distance is None:
705 if self.distance is None:
705 self.distance = calcdistance(
706 self.distance = calcdistance(
706 self.repo, self.oldnode, self.newnode)
707 self.repo, self.oldnode, self.newnode)
707 self._state(
708 self._state(
708 'state-leave',
709 'state-leave',
709 hex(self.newnode),
710 hex(self.newnode),
710 status=status)
711 status=status)
711 finally:
712 finally:
712 self.need_leave = False
713 self.need_leave = False
713 if self._lock:
714 if self._lock:
714 self._lock.release()
715 self._lock.release()
715
716
716 def _state(self, cmd, commithash, status='ok'):
717 def _state(self, cmd, commithash, status='ok'):
717 if not util.safehasattr(self.repo, '_watchmanclient'):
718 if not util.safehasattr(self.repo, '_watchmanclient'):
718 return False
719 return False
719 try:
720 try:
720 self.repo._watchmanclient.command(cmd, {
721 self.repo._watchmanclient.command(cmd, {
721 'name': self.name,
722 'name': self.name,
722 'metadata': {
723 'metadata': {
723 # the target revision
724 # the target revision
724 'rev': commithash,
725 'rev': commithash,
725 # approximate number of commits between current and target
726 # approximate number of commits between current and target
726 'distance': self.distance if self.distance else 0,
727 'distance': self.distance if self.distance else 0,
727 # success/failure (only really meaningful for state-leave)
728 # success/failure (only really meaningful for state-leave)
728 'status': status,
729 'status': status,
729 # whether the working copy parent is changing
730 # whether the working copy parent is changing
730 'partial': self.partial,
731 'partial': self.partial,
731 }})
732 }})
732 return True
733 return True
733 except Exception as e:
734 except Exception as e:
734 # Swallow any errors; fire and forget
735 # Swallow any errors; fire and forget
735 self.repo.ui.log(
736 self.repo.ui.log(
736 'watchman', 'Exception %s while running %s\n', e, cmd)
737 'watchman', 'Exception %s while running %s\n', e, cmd)
737 return False
738 return False
738
739
739 # Estimate the distance between two nodes
740 # Estimate the distance between two nodes
740 def calcdistance(repo, oldnode, newnode):
741 def calcdistance(repo, oldnode, newnode):
741 anc = repo.changelog.ancestor(oldnode, newnode)
742 anc = repo.changelog.ancestor(oldnode, newnode)
742 ancrev = repo[anc].rev()
743 ancrev = repo[anc].rev()
743 distance = (abs(repo[oldnode].rev() - ancrev)
744 distance = (abs(repo[oldnode].rev() - ancrev)
744 + abs(repo[newnode].rev() - ancrev))
745 + abs(repo[newnode].rev() - ancrev))
745 return distance
746 return distance
746
747
747 # Bracket working copy updates with calls to the watchman state-enter
748 # Bracket working copy updates with calls to the watchman state-enter
748 # and state-leave commands. This allows clients to perform more intelligent
749 # and state-leave commands. This allows clients to perform more intelligent
749 # settling during bulk file change scenarios
750 # settling during bulk file change scenarios
750 # https://facebook.github.io/watchman/docs/cmd/subscribe.html#advanced-settling
751 # https://facebook.github.io/watchman/docs/cmd/subscribe.html#advanced-settling
751 def wrapupdate(orig, repo, node, branchmerge, force, ancestor=None,
752 def wrapupdate(orig, repo, node, branchmerge, force, ancestor=None,
752 mergeancestor=False, labels=None, matcher=None, **kwargs):
753 mergeancestor=False, labels=None, matcher=None, **kwargs):
753
754
754 distance = 0
755 distance = 0
755 partial = True
756 partial = True
756 oldnode = repo['.'].node()
757 oldnode = repo['.'].node()
757 newnode = repo[node].node()
758 newnode = repo[node].node()
758 if matcher is None or matcher.always():
759 if matcher is None or matcher.always():
759 partial = False
760 partial = False
760 distance = calcdistance(repo.unfiltered(), oldnode, newnode)
761 distance = calcdistance(repo.unfiltered(), oldnode, newnode)
761
762
762 with state_update(repo, name="hg.update", oldnode=oldnode, newnode=newnode,
763 with state_update(repo, name="hg.update", oldnode=oldnode, newnode=newnode,
763 distance=distance, partial=partial):
764 distance=distance, partial=partial):
764 return orig(
765 return orig(
765 repo, node, branchmerge, force, ancestor, mergeancestor,
766 repo, node, branchmerge, force, ancestor, mergeancestor,
766 labels, matcher, **kwargs)
767 labels, matcher, **kwargs)
767
768
768 def repo_has_depth_one_nested_repo(repo):
769 def repo_has_depth_one_nested_repo(repo):
769 for f in repo.wvfs.listdir():
770 for f in repo.wvfs.listdir():
770 if os.path.isdir(os.path.join(repo.root, f, '.hg')):
771 if os.path.isdir(os.path.join(repo.root, f, '.hg')):
771 msg = 'fsmonitor: sub-repository %r detected, fsmonitor disabled\n'
772 msg = 'fsmonitor: sub-repository %r detected, fsmonitor disabled\n'
772 repo.ui.debug(msg % f)
773 repo.ui.debug(msg % f)
773 return True
774 return True
774 return False
775 return False
775
776
776 def reposetup(ui, repo):
777 def reposetup(ui, repo):
777 # We don't work with largefiles or inotify
778 # We don't work with largefiles or inotify
778 exts = extensions.enabled()
779 exts = extensions.enabled()
779 for ext in _blacklist:
780 for ext in _blacklist:
780 if ext in exts:
781 if ext in exts:
781 ui.warn(_('The fsmonitor extension is incompatible with the %s '
782 ui.warn(_('The fsmonitor extension is incompatible with the %s '
782 'extension and has been disabled.\n') % ext)
783 'extension and has been disabled.\n') % ext)
783 return
784 return
784
785
785 if repo.local():
786 if repo.local():
786 # We don't work with subrepos either.
787 # We don't work with subrepos either.
787 #
788 #
788 # if repo[None].substate can cause a dirstate parse, which is too
789 # if repo[None].substate can cause a dirstate parse, which is too
789 # slow. Instead, look for a file called hgsubstate,
790 # slow. Instead, look for a file called hgsubstate,
790 if repo.wvfs.exists('.hgsubstate') or repo.wvfs.exists('.hgsub'):
791 if repo.wvfs.exists('.hgsubstate') or repo.wvfs.exists('.hgsub'):
791 return
792 return
792
793
793 if repo_has_depth_one_nested_repo(repo):
794 if repo_has_depth_one_nested_repo(repo):
794 return
795 return
795
796
796 fsmonitorstate = state.state(repo)
797 fsmonitorstate = state.state(repo)
797 if fsmonitorstate.mode == 'off':
798 if fsmonitorstate.mode == 'off':
798 return
799 return
799
800
800 try:
801 try:
801 client = watchmanclient.client(repo.ui, repo._root)
802 client = watchmanclient.client(repo.ui, repo._root)
802 except Exception as ex:
803 except Exception as ex:
803 _handleunavailable(ui, fsmonitorstate, ex)
804 _handleunavailable(ui, fsmonitorstate, ex)
804 return
805 return
805
806
806 repo._fsmonitorstate = fsmonitorstate
807 repo._fsmonitorstate = fsmonitorstate
807 repo._watchmanclient = client
808 repo._watchmanclient = client
808
809
809 dirstate, cached = localrepo.isfilecached(repo, 'dirstate')
810 dirstate, cached = localrepo.isfilecached(repo, 'dirstate')
810 if cached:
811 if cached:
811 # at this point since fsmonitorstate wasn't present,
812 # at this point since fsmonitorstate wasn't present,
812 # repo.dirstate is not a fsmonitordirstate
813 # repo.dirstate is not a fsmonitordirstate
813 makedirstate(repo, dirstate)
814 makedirstate(repo, dirstate)
814
815
815 class fsmonitorrepo(repo.__class__):
816 class fsmonitorrepo(repo.__class__):
816 def status(self, *args, **kwargs):
817 def status(self, *args, **kwargs):
817 orig = super(fsmonitorrepo, self).status
818 orig = super(fsmonitorrepo, self).status
818 return overridestatus(orig, self, *args, **kwargs)
819 return overridestatus(orig, self, *args, **kwargs)
819
820
820 def wlocknostateupdate(self, *args, **kwargs):
821 def wlocknostateupdate(self, *args, **kwargs):
821 return super(fsmonitorrepo, self).wlock(*args, **kwargs)
822 return super(fsmonitorrepo, self).wlock(*args, **kwargs)
822
823
823 def wlock(self, *args, **kwargs):
824 def wlock(self, *args, **kwargs):
824 l = super(fsmonitorrepo, self).wlock(*args, **kwargs)
825 l = super(fsmonitorrepo, self).wlock(*args, **kwargs)
825 if not ui.configbool(
826 if not ui.configbool(
826 "experimental", "fsmonitor.transaction_notify"):
827 "experimental", "fsmonitor.transaction_notify"):
827 return l
828 return l
828 if l.held != 1:
829 if l.held != 1:
829 return l
830 return l
830 origrelease = l.releasefn
831 origrelease = l.releasefn
831
832
832 def staterelease():
833 def staterelease():
833 if origrelease:
834 if origrelease:
834 origrelease()
835 origrelease()
835 if l.stateupdate:
836 if l.stateupdate:
836 l.stateupdate.exit()
837 l.stateupdate.exit()
837 l.stateupdate = None
838 l.stateupdate = None
838
839
839 try:
840 try:
840 l.stateupdate = None
841 l.stateupdate = None
841 l.stateupdate = state_update(self, name="hg.transaction")
842 l.stateupdate = state_update(self, name="hg.transaction")
842 l.stateupdate.enter()
843 l.stateupdate.enter()
843 l.releasefn = staterelease
844 l.releasefn = staterelease
844 except Exception as e:
845 except Exception as e:
845 # Swallow any errors; fire and forget
846 # Swallow any errors; fire and forget
846 self.ui.log(
847 self.ui.log(
847 'watchman', 'Exception in state update %s\n', e)
848 'watchman', 'Exception in state update %s\n', e)
848 return l
849 return l
849
850
850 repo.__class__ = fsmonitorrepo
851 repo.__class__ = fsmonitorrepo
@@ -1,1113 +1,1113 b''
1 # __init__.py - remotefilelog extension
1 # __init__.py - remotefilelog extension
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL)
7 """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL)
8
8
9 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
9 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
10 GUARANTEES. This means that repositories created with this extension may
10 GUARANTEES. This means that repositories created with this extension may
11 only be usable with the exact version of this extension/Mercurial that was
11 only be usable with the exact version of this extension/Mercurial that was
12 used. The extension attempts to enforce this in order to prevent repository
12 used. The extension attempts to enforce this in order to prevent repository
13 corruption.
13 corruption.
14
14
15 remotefilelog works by fetching file contents lazily and storing them
15 remotefilelog works by fetching file contents lazily and storing them
16 in a cache on the client rather than in revlogs. This allows enormous
16 in a cache on the client rather than in revlogs. This allows enormous
17 histories to be transferred only partially, making them easier to
17 histories to be transferred only partially, making them easier to
18 operate on.
18 operate on.
19
19
20 Configs:
20 Configs:
21
21
22 ``packs.maxchainlen`` specifies the maximum delta chain length in pack files
22 ``packs.maxchainlen`` specifies the maximum delta chain length in pack files
23
23
24 ``packs.maxpacksize`` specifies the maximum pack file size
24 ``packs.maxpacksize`` specifies the maximum pack file size
25
25
26 ``packs.maxpackfilecount`` specifies the maximum number of packs in the
26 ``packs.maxpackfilecount`` specifies the maximum number of packs in the
27 shared cache (trees only for now)
27 shared cache (trees only for now)
28
28
29 ``remotefilelog.backgroundprefetch`` runs prefetch in background when True
29 ``remotefilelog.backgroundprefetch`` runs prefetch in background when True
30
30
31 ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and
31 ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and
32 update, and on other commands that use them. Different from pullprefetch.
32 update, and on other commands that use them. Different from pullprefetch.
33
33
34 ``remotefilelog.gcrepack`` does garbage collection during repack when True
34 ``remotefilelog.gcrepack`` does garbage collection during repack when True
35
35
36 ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before
36 ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before
37 it is garbage collected
37 it is garbage collected
38
38
39 ``remotefilelog.repackonhggc`` runs repack on hg gc when True
39 ``remotefilelog.repackonhggc`` runs repack on hg gc when True
40
40
41 ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in
41 ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in
42 days after which it is no longer prefetched.
42 days after which it is no longer prefetched.
43
43
44 ``remotefilelog.prefetchdelay`` specifies delay between background
44 ``remotefilelog.prefetchdelay`` specifies delay between background
45 prefetches in seconds after operations that change the working copy parent
45 prefetches in seconds after operations that change the working copy parent
46
46
47 ``remotefilelog.data.gencountlimit`` constraints the minimum number of data
47 ``remotefilelog.data.gencountlimit`` constraints the minimum number of data
48 pack files required to be considered part of a generation. In particular,
48 pack files required to be considered part of a generation. In particular,
49 minimum number of packs files > gencountlimit.
49 minimum number of packs files > gencountlimit.
50
50
51 ``remotefilelog.data.generations`` list for specifying the lower bound of
51 ``remotefilelog.data.generations`` list for specifying the lower bound of
52 each generation of the data pack files. For example, list ['100MB','1MB']
52 each generation of the data pack files. For example, list ['100MB','1MB']
53 or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [
53 or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [
54 1MB, 100MB) and [100MB, infinity).
54 1MB, 100MB) and [100MB, infinity).
55
55
56 ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to
56 ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to
57 include in an incremental data repack.
57 include in an incremental data repack.
58
58
59 ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for
59 ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for
60 it to be considered for an incremental data repack.
60 it to be considered for an incremental data repack.
61
61
62 ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files
62 ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files
63 to include in an incremental data repack.
63 to include in an incremental data repack.
64
64
65 ``remotefilelog.history.gencountlimit`` constraints the minimum number of
65 ``remotefilelog.history.gencountlimit`` constraints the minimum number of
66 history pack files required to be considered part of a generation. In
66 history pack files required to be considered part of a generation. In
67 particular, minimum number of packs files > gencountlimit.
67 particular, minimum number of packs files > gencountlimit.
68
68
69 ``remotefilelog.history.generations`` list for specifying the lower bound of
69 ``remotefilelog.history.generations`` list for specifying the lower bound of
70 each generation of the history pack files. For example, list [
70 each generation of the history pack files. For example, list [
71 '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [
71 '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [
72 0, 1MB), [1MB, 100MB) and [100MB, infinity).
72 0, 1MB), [1MB, 100MB) and [100MB, infinity).
73
73
74 ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to
74 ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to
75 include in an incremental history repack.
75 include in an incremental history repack.
76
76
77 ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file
77 ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file
78 for it to be considered for an incremental history repack.
78 for it to be considered for an incremental history repack.
79
79
80 ``remotefilelog.history.repacksizelimit`` the maximum total size of pack
80 ``remotefilelog.history.repacksizelimit`` the maximum total size of pack
81 files to include in an incremental history repack.
81 files to include in an incremental history repack.
82
82
83 ``remotefilelog.backgroundrepack`` automatically consolidate packs in the
83 ``remotefilelog.backgroundrepack`` automatically consolidate packs in the
84 background
84 background
85
85
86 ``remotefilelog.cachepath`` path to cache
86 ``remotefilelog.cachepath`` path to cache
87
87
88 ``remotefilelog.cachegroup`` if set, make cache directory sgid to this
88 ``remotefilelog.cachegroup`` if set, make cache directory sgid to this
89 group
89 group
90
90
91 ``remotefilelog.cacheprocess`` binary to invoke for fetching file data
91 ``remotefilelog.cacheprocess`` binary to invoke for fetching file data
92
92
93 ``remotefilelog.debug`` turn on remotefilelog-specific debug output
93 ``remotefilelog.debug`` turn on remotefilelog-specific debug output
94
94
95 ``remotefilelog.excludepattern`` pattern of files to exclude from pulls
95 ``remotefilelog.excludepattern`` pattern of files to exclude from pulls
96
96
97 ``remotefilelog.includepattern`` pattern of files to include in pulls
97 ``remotefilelog.includepattern`` pattern of files to include in pulls
98
98
99 ``remotefilelog.fetchwarning``: message to print when too many
99 ``remotefilelog.fetchwarning``: message to print when too many
100 single-file fetches occur
100 single-file fetches occur
101
101
102 ``remotefilelog.getfilesstep`` number of files to request in a single RPC
102 ``remotefilelog.getfilesstep`` number of files to request in a single RPC
103
103
104 ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch
104 ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch
105 files, otherwise use optimistic fetching
105 files, otherwise use optimistic fetching
106
106
107 ``remotefilelog.pullprefetch`` revset for selecting files that should be
107 ``remotefilelog.pullprefetch`` revset for selecting files that should be
108 eagerly downloaded rather than lazily
108 eagerly downloaded rather than lazily
109
109
110 ``remotefilelog.reponame`` name of the repo. If set, used to partition
110 ``remotefilelog.reponame`` name of the repo. If set, used to partition
111 data from other repos in a shared store.
111 data from other repos in a shared store.
112
112
113 ``remotefilelog.server`` if true, enable server-side functionality
113 ``remotefilelog.server`` if true, enable server-side functionality
114
114
115 ``remotefilelog.servercachepath`` path for caching blobs on the server
115 ``remotefilelog.servercachepath`` path for caching blobs on the server
116
116
117 ``remotefilelog.serverexpiration`` number of days to keep cached server
117 ``remotefilelog.serverexpiration`` number of days to keep cached server
118 blobs
118 blobs
119
119
120 ``remotefilelog.validatecache`` if set, check cache entries for corruption
120 ``remotefilelog.validatecache`` if set, check cache entries for corruption
121 before returning blobs
121 before returning blobs
122
122
123 ``remotefilelog.validatecachelog`` if set, check cache entries for
123 ``remotefilelog.validatecachelog`` if set, check cache entries for
124 corruption before returning metadata
124 corruption before returning metadata
125
125
126 """
126 """
127 from __future__ import absolute_import
127 from __future__ import absolute_import
128
128
129 import os
129 import os
130 import time
130 import time
131 import traceback
131 import traceback
132
132
133 from mercurial.node import hex
133 from mercurial.node import hex
134 from mercurial.i18n import _
134 from mercurial.i18n import _
135 from mercurial import (
135 from mercurial import (
136 changegroup,
136 changegroup,
137 changelog,
137 changelog,
138 cmdutil,
138 cmdutil,
139 commands,
139 commands,
140 configitems,
140 configitems,
141 context,
141 context,
142 copies,
142 copies,
143 debugcommands as hgdebugcommands,
143 debugcommands as hgdebugcommands,
144 dispatch,
144 dispatch,
145 error,
145 error,
146 exchange,
146 exchange,
147 extensions,
147 extensions,
148 hg,
148 hg,
149 localrepo,
149 localrepo,
150 match,
150 match,
151 merge,
151 merge,
152 node as nodemod,
152 node as nodemod,
153 patch,
153 patch,
154 pycompat,
154 pycompat,
155 registrar,
155 registrar,
156 repair,
156 repair,
157 repoview,
157 repoview,
158 revset,
158 revset,
159 scmutil,
159 scmutil,
160 smartset,
160 smartset,
161 streamclone,
161 streamclone,
162 util,
162 util,
163 )
163 )
164 from . import (
164 from . import (
165 constants,
165 constants,
166 debugcommands,
166 debugcommands,
167 fileserverclient,
167 fileserverclient,
168 remotefilectx,
168 remotefilectx,
169 remotefilelog,
169 remotefilelog,
170 remotefilelogserver,
170 remotefilelogserver,
171 repack as repackmod,
171 repack as repackmod,
172 shallowbundle,
172 shallowbundle,
173 shallowrepo,
173 shallowrepo,
174 shallowstore,
174 shallowstore,
175 shallowutil,
175 shallowutil,
176 shallowverifier,
176 shallowverifier,
177 )
177 )
178
178
179 # ensures debug commands are registered
179 # ensures debug commands are registered
180 hgdebugcommands.command
180 hgdebugcommands.command
181
181
182 cmdtable = {}
182 cmdtable = {}
183 command = registrar.command(cmdtable)
183 command = registrar.command(cmdtable)
184
184
185 configtable = {}
185 configtable = {}
186 configitem = registrar.configitem(configtable)
186 configitem = registrar.configitem(configtable)
187
187
188 configitem('remotefilelog', 'debug', default=False)
188 configitem('remotefilelog', 'debug', default=False)
189
189
190 configitem('remotefilelog', 'reponame', default='')
190 configitem('remotefilelog', 'reponame', default='')
191 configitem('remotefilelog', 'cachepath', default=None)
191 configitem('remotefilelog', 'cachepath', default=None)
192 configitem('remotefilelog', 'cachegroup', default=None)
192 configitem('remotefilelog', 'cachegroup', default=None)
193 configitem('remotefilelog', 'cacheprocess', default=None)
193 configitem('remotefilelog', 'cacheprocess', default=None)
194 configitem('remotefilelog', 'cacheprocess.includepath', default=None)
194 configitem('remotefilelog', 'cacheprocess.includepath', default=None)
195 configitem("remotefilelog", "cachelimit", default="1000 GB")
195 configitem("remotefilelog", "cachelimit", default="1000 GB")
196
196
197 configitem('remotefilelog', 'fallbackpath', default=configitems.dynamicdefault,
197 configitem('remotefilelog', 'fallbackpath', default=configitems.dynamicdefault,
198 alias=[('remotefilelog', 'fallbackrepo')])
198 alias=[('remotefilelog', 'fallbackrepo')])
199
199
200 configitem('remotefilelog', 'validatecachelog', default=None)
200 configitem('remotefilelog', 'validatecachelog', default=None)
201 configitem('remotefilelog', 'validatecache', default='on')
201 configitem('remotefilelog', 'validatecache', default='on')
202 configitem('remotefilelog', 'server', default=None)
202 configitem('remotefilelog', 'server', default=None)
203 configitem('remotefilelog', 'servercachepath', default=None)
203 configitem('remotefilelog', 'servercachepath', default=None)
204 configitem("remotefilelog", "serverexpiration", default=30)
204 configitem("remotefilelog", "serverexpiration", default=30)
205 configitem('remotefilelog', 'backgroundrepack', default=False)
205 configitem('remotefilelog', 'backgroundrepack', default=False)
206 configitem('remotefilelog', 'bgprefetchrevs', default=None)
206 configitem('remotefilelog', 'bgprefetchrevs', default=None)
207 configitem('remotefilelog', 'pullprefetch', default=None)
207 configitem('remotefilelog', 'pullprefetch', default=None)
208 configitem('remotefilelog', 'backgroundprefetch', default=False)
208 configitem('remotefilelog', 'backgroundprefetch', default=False)
209 configitem('remotefilelog', 'prefetchdelay', default=120)
209 configitem('remotefilelog', 'prefetchdelay', default=120)
210 configitem('remotefilelog', 'prefetchdays', default=14)
210 configitem('remotefilelog', 'prefetchdays', default=14)
211
211
212 configitem('remotefilelog', 'getfilesstep', default=10000)
212 configitem('remotefilelog', 'getfilesstep', default=10000)
213 configitem('remotefilelog', 'getfilestype', default='optimistic')
213 configitem('remotefilelog', 'getfilestype', default='optimistic')
214 configitem('remotefilelog', 'batchsize', configitems.dynamicdefault)
214 configitem('remotefilelog', 'batchsize', configitems.dynamicdefault)
215 configitem('remotefilelog', 'fetchwarning', default='')
215 configitem('remotefilelog', 'fetchwarning', default='')
216
216
217 configitem('remotefilelog', 'includepattern', default=None)
217 configitem('remotefilelog', 'includepattern', default=None)
218 configitem('remotefilelog', 'excludepattern', default=None)
218 configitem('remotefilelog', 'excludepattern', default=None)
219
219
220 configitem('remotefilelog', 'gcrepack', default=False)
220 configitem('remotefilelog', 'gcrepack', default=False)
221 configitem('remotefilelog', 'repackonhggc', default=False)
221 configitem('remotefilelog', 'repackonhggc', default=False)
222 configitem('repack', 'chainorphansbysize', default=True)
222 configitem('repack', 'chainorphansbysize', default=True, experimental=True)
223
223
224 configitem('packs', 'maxpacksize', default=0)
224 configitem('packs', 'maxpacksize', default=0)
225 configitem('packs', 'maxchainlen', default=1000)
225 configitem('packs', 'maxchainlen', default=1000)
226
226
227 # default TTL limit is 30 days
227 # default TTL limit is 30 days
228 _defaultlimit = 60 * 60 * 24 * 30
228 _defaultlimit = 60 * 60 * 24 * 30
229 configitem('remotefilelog', 'nodettl', default=_defaultlimit)
229 configitem('remotefilelog', 'nodettl', default=_defaultlimit)
230
230
231 configitem('remotefilelog', 'data.gencountlimit', default=2),
231 configitem('remotefilelog', 'data.gencountlimit', default=2),
232 configitem('remotefilelog', 'data.generations',
232 configitem('remotefilelog', 'data.generations',
233 default=['1GB', '100MB', '1MB'])
233 default=['1GB', '100MB', '1MB'])
234 configitem('remotefilelog', 'data.maxrepackpacks', default=50)
234 configitem('remotefilelog', 'data.maxrepackpacks', default=50)
235 configitem('remotefilelog', 'data.repackmaxpacksize', default='4GB')
235 configitem('remotefilelog', 'data.repackmaxpacksize', default='4GB')
236 configitem('remotefilelog', 'data.repacksizelimit', default='100MB')
236 configitem('remotefilelog', 'data.repacksizelimit', default='100MB')
237
237
238 configitem('remotefilelog', 'history.gencountlimit', default=2),
238 configitem('remotefilelog', 'history.gencountlimit', default=2),
239 configitem('remotefilelog', 'history.generations', default=['100MB'])
239 configitem('remotefilelog', 'history.generations', default=['100MB'])
240 configitem('remotefilelog', 'history.maxrepackpacks', default=50)
240 configitem('remotefilelog', 'history.maxrepackpacks', default=50)
241 configitem('remotefilelog', 'history.repackmaxpacksize', default='400MB')
241 configitem('remotefilelog', 'history.repackmaxpacksize', default='400MB')
242 configitem('remotefilelog', 'history.repacksizelimit', default='100MB')
242 configitem('remotefilelog', 'history.repacksizelimit', default='100MB')
243
243
244 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
244 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
245 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
245 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
246 # be specifying the version(s) of Mercurial they are tested with, or
246 # be specifying the version(s) of Mercurial they are tested with, or
247 # leave the attribute unspecified.
247 # leave the attribute unspecified.
248 testedwith = 'ships-with-hg-core'
248 testedwith = 'ships-with-hg-core'
249
249
250 repoclass = localrepo.localrepository
250 repoclass = localrepo.localrepository
251 repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT)
251 repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT)
252
252
253 isenabled = shallowutil.isenabled
253 isenabled = shallowutil.isenabled
254
254
255 def uisetup(ui):
255 def uisetup(ui):
256 """Wraps user facing Mercurial commands to swap them out with shallow
256 """Wraps user facing Mercurial commands to swap them out with shallow
257 versions.
257 versions.
258 """
258 """
259 hg.wirepeersetupfuncs.append(fileserverclient.peersetup)
259 hg.wirepeersetupfuncs.append(fileserverclient.peersetup)
260
260
261 entry = extensions.wrapcommand(commands.table, 'clone', cloneshallow)
261 entry = extensions.wrapcommand(commands.table, 'clone', cloneshallow)
262 entry[1].append(('', 'shallow', None,
262 entry[1].append(('', 'shallow', None,
263 _("create a shallow clone which uses remote file "
263 _("create a shallow clone which uses remote file "
264 "history")))
264 "history")))
265
265
266 extensions.wrapcommand(commands.table, 'debugindex',
266 extensions.wrapcommand(commands.table, 'debugindex',
267 debugcommands.debugindex)
267 debugcommands.debugindex)
268 extensions.wrapcommand(commands.table, 'debugindexdot',
268 extensions.wrapcommand(commands.table, 'debugindexdot',
269 debugcommands.debugindexdot)
269 debugcommands.debugindexdot)
270 extensions.wrapcommand(commands.table, 'log', log)
270 extensions.wrapcommand(commands.table, 'log', log)
271 extensions.wrapcommand(commands.table, 'pull', pull)
271 extensions.wrapcommand(commands.table, 'pull', pull)
272
272
273 # Prevent 'hg manifest --all'
273 # Prevent 'hg manifest --all'
274 def _manifest(orig, ui, repo, *args, **opts):
274 def _manifest(orig, ui, repo, *args, **opts):
275 if (isenabled(repo) and opts.get(r'all')):
275 if (isenabled(repo) and opts.get(r'all')):
276 raise error.Abort(_("--all is not supported in a shallow repo"))
276 raise error.Abort(_("--all is not supported in a shallow repo"))
277
277
278 return orig(ui, repo, *args, **opts)
278 return orig(ui, repo, *args, **opts)
279 extensions.wrapcommand(commands.table, "manifest", _manifest)
279 extensions.wrapcommand(commands.table, "manifest", _manifest)
280
280
281 # Wrap remotefilelog with lfs code
281 # Wrap remotefilelog with lfs code
282 def _lfsloaded(loaded=False):
282 def _lfsloaded(loaded=False):
283 lfsmod = None
283 lfsmod = None
284 try:
284 try:
285 lfsmod = extensions.find('lfs')
285 lfsmod = extensions.find('lfs')
286 except KeyError:
286 except KeyError:
287 pass
287 pass
288 if lfsmod:
288 if lfsmod:
289 lfsmod.wrapfilelog(remotefilelog.remotefilelog)
289 lfsmod.wrapfilelog(remotefilelog.remotefilelog)
290 fileserverclient._lfsmod = lfsmod
290 fileserverclient._lfsmod = lfsmod
291 extensions.afterloaded('lfs', _lfsloaded)
291 extensions.afterloaded('lfs', _lfsloaded)
292
292
293 # debugdata needs remotefilelog.len to work
293 # debugdata needs remotefilelog.len to work
294 extensions.wrapcommand(commands.table, 'debugdata', debugdatashallow)
294 extensions.wrapcommand(commands.table, 'debugdata', debugdatashallow)
295
295
296 changegroup.cgpacker = shallowbundle.shallowcg1packer
296 changegroup.cgpacker = shallowbundle.shallowcg1packer
297
297
298 extensions.wrapfunction(changegroup, '_addchangegroupfiles',
298 extensions.wrapfunction(changegroup, '_addchangegroupfiles',
299 shallowbundle.addchangegroupfiles)
299 shallowbundle.addchangegroupfiles)
300 extensions.wrapfunction(
300 extensions.wrapfunction(
301 changegroup, 'makechangegroup', shallowbundle.makechangegroup)
301 changegroup, 'makechangegroup', shallowbundle.makechangegroup)
302 extensions.wrapfunction(localrepo, 'makestore', storewrapper)
302 extensions.wrapfunction(localrepo, 'makestore', storewrapper)
303 extensions.wrapfunction(exchange, 'pull', exchangepull)
303 extensions.wrapfunction(exchange, 'pull', exchangepull)
304 extensions.wrapfunction(merge, 'applyupdates', applyupdates)
304 extensions.wrapfunction(merge, 'applyupdates', applyupdates)
305 extensions.wrapfunction(merge, '_checkunknownfiles', checkunknownfiles)
305 extensions.wrapfunction(merge, '_checkunknownfiles', checkunknownfiles)
306 extensions.wrapfunction(context.workingctx, '_checklookup', checklookup)
306 extensions.wrapfunction(context.workingctx, '_checklookup', checklookup)
307 extensions.wrapfunction(scmutil, '_findrenames', findrenames)
307 extensions.wrapfunction(scmutil, '_findrenames', findrenames)
308 extensions.wrapfunction(copies, '_computeforwardmissing',
308 extensions.wrapfunction(copies, '_computeforwardmissing',
309 computeforwardmissing)
309 computeforwardmissing)
310 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
310 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
311 extensions.wrapfunction(repair, '_collectbrokencsets', _collectbrokencsets)
311 extensions.wrapfunction(repair, '_collectbrokencsets', _collectbrokencsets)
312 extensions.wrapfunction(context.changectx, 'filectx', filectx)
312 extensions.wrapfunction(context.changectx, 'filectx', filectx)
313 extensions.wrapfunction(context.workingctx, 'filectx', workingfilectx)
313 extensions.wrapfunction(context.workingctx, 'filectx', workingfilectx)
314 extensions.wrapfunction(patch, 'trydiff', trydiff)
314 extensions.wrapfunction(patch, 'trydiff', trydiff)
315 extensions.wrapfunction(hg, 'verify', _verify)
315 extensions.wrapfunction(hg, 'verify', _verify)
316 scmutil.fileprefetchhooks.add('remotefilelog', _fileprefetchhook)
316 scmutil.fileprefetchhooks.add('remotefilelog', _fileprefetchhook)
317
317
318 # disappointing hacks below
318 # disappointing hacks below
319 extensions.wrapfunction(scmutil, 'getrenamedfn', getrenamedfn)
319 extensions.wrapfunction(scmutil, 'getrenamedfn', getrenamedfn)
320 extensions.wrapfunction(revset, 'filelog', filelogrevset)
320 extensions.wrapfunction(revset, 'filelog', filelogrevset)
321 revset.symbols['filelog'] = revset.filelog
321 revset.symbols['filelog'] = revset.filelog
322 extensions.wrapfunction(cmdutil, 'walkfilerevs', walkfilerevs)
322 extensions.wrapfunction(cmdutil, 'walkfilerevs', walkfilerevs)
323
323
324
324
325 def cloneshallow(orig, ui, repo, *args, **opts):
325 def cloneshallow(orig, ui, repo, *args, **opts):
326 if opts.get(r'shallow'):
326 if opts.get(r'shallow'):
327 repos = []
327 repos = []
328 def pull_shallow(orig, self, *args, **kwargs):
328 def pull_shallow(orig, self, *args, **kwargs):
329 if not isenabled(self):
329 if not isenabled(self):
330 repos.append(self.unfiltered())
330 repos.append(self.unfiltered())
331 # set up the client hooks so the post-clone update works
331 # set up the client hooks so the post-clone update works
332 setupclient(self.ui, self.unfiltered())
332 setupclient(self.ui, self.unfiltered())
333
333
334 # setupclient fixed the class on the repo itself
334 # setupclient fixed the class on the repo itself
335 # but we also need to fix it on the repoview
335 # but we also need to fix it on the repoview
336 if isinstance(self, repoview.repoview):
336 if isinstance(self, repoview.repoview):
337 self.__class__.__bases__ = (self.__class__.__bases__[0],
337 self.__class__.__bases__ = (self.__class__.__bases__[0],
338 self.unfiltered().__class__)
338 self.unfiltered().__class__)
339 self.requirements.add(constants.SHALLOWREPO_REQUIREMENT)
339 self.requirements.add(constants.SHALLOWREPO_REQUIREMENT)
340 self._writerequirements()
340 self._writerequirements()
341
341
342 # Since setupclient hadn't been called, exchange.pull was not
342 # Since setupclient hadn't been called, exchange.pull was not
343 # wrapped. So we need to manually invoke our version of it.
343 # wrapped. So we need to manually invoke our version of it.
344 return exchangepull(orig, self, *args, **kwargs)
344 return exchangepull(orig, self, *args, **kwargs)
345 else:
345 else:
346 return orig(self, *args, **kwargs)
346 return orig(self, *args, **kwargs)
347 extensions.wrapfunction(exchange, 'pull', pull_shallow)
347 extensions.wrapfunction(exchange, 'pull', pull_shallow)
348
348
349 # Wrap the stream logic to add requirements and to pass include/exclude
349 # Wrap the stream logic to add requirements and to pass include/exclude
350 # patterns around.
350 # patterns around.
351 def setup_streamout(repo, remote):
351 def setup_streamout(repo, remote):
352 # Replace remote.stream_out with a version that sends file
352 # Replace remote.stream_out with a version that sends file
353 # patterns.
353 # patterns.
354 def stream_out_shallow(orig):
354 def stream_out_shallow(orig):
355 caps = remote.capabilities()
355 caps = remote.capabilities()
356 if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps:
356 if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps:
357 opts = {}
357 opts = {}
358 if repo.includepattern:
358 if repo.includepattern:
359 opts[r'includepattern'] = '\0'.join(repo.includepattern)
359 opts[r'includepattern'] = '\0'.join(repo.includepattern)
360 if repo.excludepattern:
360 if repo.excludepattern:
361 opts[r'excludepattern'] = '\0'.join(repo.excludepattern)
361 opts[r'excludepattern'] = '\0'.join(repo.excludepattern)
362 return remote._callstream('stream_out_shallow', **opts)
362 return remote._callstream('stream_out_shallow', **opts)
363 else:
363 else:
364 return orig()
364 return orig()
365 extensions.wrapfunction(remote, 'stream_out', stream_out_shallow)
365 extensions.wrapfunction(remote, 'stream_out', stream_out_shallow)
366 def stream_wrap(orig, op):
366 def stream_wrap(orig, op):
367 setup_streamout(op.repo, op.remote)
367 setup_streamout(op.repo, op.remote)
368 return orig(op)
368 return orig(op)
369 extensions.wrapfunction(
369 extensions.wrapfunction(
370 streamclone, 'maybeperformlegacystreamclone', stream_wrap)
370 streamclone, 'maybeperformlegacystreamclone', stream_wrap)
371
371
372 def canperformstreamclone(orig, pullop, bundle2=False):
372 def canperformstreamclone(orig, pullop, bundle2=False):
373 # remotefilelog is currently incompatible with the
373 # remotefilelog is currently incompatible with the
374 # bundle2 flavor of streamclones, so force us to use
374 # bundle2 flavor of streamclones, so force us to use
375 # v1 instead.
375 # v1 instead.
376 if 'v2' in pullop.remotebundle2caps.get('stream', []):
376 if 'v2' in pullop.remotebundle2caps.get('stream', []):
377 pullop.remotebundle2caps['stream'] = [
377 pullop.remotebundle2caps['stream'] = [
378 c for c in pullop.remotebundle2caps['stream']
378 c for c in pullop.remotebundle2caps['stream']
379 if c != 'v2']
379 if c != 'v2']
380 if bundle2:
380 if bundle2:
381 return False, None
381 return False, None
382 supported, requirements = orig(pullop, bundle2=bundle2)
382 supported, requirements = orig(pullop, bundle2=bundle2)
383 if requirements is not None:
383 if requirements is not None:
384 requirements.add(constants.SHALLOWREPO_REQUIREMENT)
384 requirements.add(constants.SHALLOWREPO_REQUIREMENT)
385 return supported, requirements
385 return supported, requirements
386 extensions.wrapfunction(
386 extensions.wrapfunction(
387 streamclone, 'canperformstreamclone', canperformstreamclone)
387 streamclone, 'canperformstreamclone', canperformstreamclone)
388
388
389 try:
389 try:
390 orig(ui, repo, *args, **opts)
390 orig(ui, repo, *args, **opts)
391 finally:
391 finally:
392 if opts.get(r'shallow'):
392 if opts.get(r'shallow'):
393 for r in repos:
393 for r in repos:
394 if util.safehasattr(r, 'fileservice'):
394 if util.safehasattr(r, 'fileservice'):
395 r.fileservice.close()
395 r.fileservice.close()
396
396
397 def debugdatashallow(orig, *args, **kwds):
397 def debugdatashallow(orig, *args, **kwds):
398 oldlen = remotefilelog.remotefilelog.__len__
398 oldlen = remotefilelog.remotefilelog.__len__
399 try:
399 try:
400 remotefilelog.remotefilelog.__len__ = lambda x: 1
400 remotefilelog.remotefilelog.__len__ = lambda x: 1
401 return orig(*args, **kwds)
401 return orig(*args, **kwds)
402 finally:
402 finally:
403 remotefilelog.remotefilelog.__len__ = oldlen
403 remotefilelog.remotefilelog.__len__ = oldlen
404
404
405 def reposetup(ui, repo):
405 def reposetup(ui, repo):
406 if not repo.local():
406 if not repo.local():
407 return
407 return
408
408
409 # put here intentionally bc doesnt work in uisetup
409 # put here intentionally bc doesnt work in uisetup
410 ui.setconfig('hooks', 'update.prefetch', wcpprefetch)
410 ui.setconfig('hooks', 'update.prefetch', wcpprefetch)
411 ui.setconfig('hooks', 'commit.prefetch', wcpprefetch)
411 ui.setconfig('hooks', 'commit.prefetch', wcpprefetch)
412
412
413 isserverenabled = ui.configbool('remotefilelog', 'server')
413 isserverenabled = ui.configbool('remotefilelog', 'server')
414 isshallowclient = isenabled(repo)
414 isshallowclient = isenabled(repo)
415
415
416 if isserverenabled and isshallowclient:
416 if isserverenabled and isshallowclient:
417 raise RuntimeError("Cannot be both a server and shallow client.")
417 raise RuntimeError("Cannot be both a server and shallow client.")
418
418
419 if isshallowclient:
419 if isshallowclient:
420 setupclient(ui, repo)
420 setupclient(ui, repo)
421
421
422 if isserverenabled:
422 if isserverenabled:
423 remotefilelogserver.setupserver(ui, repo)
423 remotefilelogserver.setupserver(ui, repo)
424
424
425 def setupclient(ui, repo):
425 def setupclient(ui, repo):
426 if not isinstance(repo, localrepo.localrepository):
426 if not isinstance(repo, localrepo.localrepository):
427 return
427 return
428
428
429 # Even clients get the server setup since they need to have the
429 # Even clients get the server setup since they need to have the
430 # wireprotocol endpoints registered.
430 # wireprotocol endpoints registered.
431 remotefilelogserver.onetimesetup(ui)
431 remotefilelogserver.onetimesetup(ui)
432 onetimeclientsetup(ui)
432 onetimeclientsetup(ui)
433
433
434 shallowrepo.wraprepo(repo)
434 shallowrepo.wraprepo(repo)
435 repo.store = shallowstore.wrapstore(repo.store)
435 repo.store = shallowstore.wrapstore(repo.store)
436
436
437 def storewrapper(orig, requirements, path, vfstype):
437 def storewrapper(orig, requirements, path, vfstype):
438 s = orig(requirements, path, vfstype)
438 s = orig(requirements, path, vfstype)
439 if constants.SHALLOWREPO_REQUIREMENT in requirements:
439 if constants.SHALLOWREPO_REQUIREMENT in requirements:
440 s = shallowstore.wrapstore(s)
440 s = shallowstore.wrapstore(s)
441
441
442 return s
442 return s
443
443
444 # prefetch files before update
444 # prefetch files before update
445 def applyupdates(orig, repo, actions, wctx, mctx, overwrite, wantfiledata,
445 def applyupdates(orig, repo, actions, wctx, mctx, overwrite, wantfiledata,
446 labels=None):
446 labels=None):
447 if isenabled(repo):
447 if isenabled(repo):
448 manifest = mctx.manifest()
448 manifest = mctx.manifest()
449 files = []
449 files = []
450 for f, args, msg in actions['g']:
450 for f, args, msg in actions['g']:
451 files.append((f, hex(manifest[f])))
451 files.append((f, hex(manifest[f])))
452 # batch fetch the needed files from the server
452 # batch fetch the needed files from the server
453 repo.fileservice.prefetch(files)
453 repo.fileservice.prefetch(files)
454 return orig(repo, actions, wctx, mctx, overwrite, wantfiledata,
454 return orig(repo, actions, wctx, mctx, overwrite, wantfiledata,
455 labels=labels)
455 labels=labels)
456
456
457 # Prefetch merge checkunknownfiles
457 # Prefetch merge checkunknownfiles
458 def checkunknownfiles(orig, repo, wctx, mctx, force, actions,
458 def checkunknownfiles(orig, repo, wctx, mctx, force, actions,
459 *args, **kwargs):
459 *args, **kwargs):
460 if isenabled(repo):
460 if isenabled(repo):
461 files = []
461 files = []
462 sparsematch = repo.maybesparsematch(mctx.rev())
462 sparsematch = repo.maybesparsematch(mctx.rev())
463 for f, (m, actionargs, msg) in actions.iteritems():
463 for f, (m, actionargs, msg) in actions.iteritems():
464 if sparsematch and not sparsematch(f):
464 if sparsematch and not sparsematch(f):
465 continue
465 continue
466 if m in ('c', 'dc', 'cm'):
466 if m in ('c', 'dc', 'cm'):
467 files.append((f, hex(mctx.filenode(f))))
467 files.append((f, hex(mctx.filenode(f))))
468 elif m == 'dg':
468 elif m == 'dg':
469 f2 = actionargs[0]
469 f2 = actionargs[0]
470 files.append((f2, hex(mctx.filenode(f2))))
470 files.append((f2, hex(mctx.filenode(f2))))
471 # batch fetch the needed files from the server
471 # batch fetch the needed files from the server
472 repo.fileservice.prefetch(files)
472 repo.fileservice.prefetch(files)
473 return orig(repo, wctx, mctx, force, actions, *args, **kwargs)
473 return orig(repo, wctx, mctx, force, actions, *args, **kwargs)
474
474
475 # Prefetch files before status attempts to look at their size and contents
475 # Prefetch files before status attempts to look at their size and contents
476 def checklookup(orig, self, files):
476 def checklookup(orig, self, files):
477 repo = self._repo
477 repo = self._repo
478 if isenabled(repo):
478 if isenabled(repo):
479 prefetchfiles = []
479 prefetchfiles = []
480 for parent in self._parents:
480 for parent in self._parents:
481 for f in files:
481 for f in files:
482 if f in parent:
482 if f in parent:
483 prefetchfiles.append((f, hex(parent.filenode(f))))
483 prefetchfiles.append((f, hex(parent.filenode(f))))
484 # batch fetch the needed files from the server
484 # batch fetch the needed files from the server
485 repo.fileservice.prefetch(prefetchfiles)
485 repo.fileservice.prefetch(prefetchfiles)
486 return orig(self, files)
486 return orig(self, files)
487
487
488 # Prefetch the logic that compares added and removed files for renames
488 # Prefetch the logic that compares added and removed files for renames
489 def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
489 def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
490 if isenabled(repo):
490 if isenabled(repo):
491 files = []
491 files = []
492 pmf = repo['.'].manifest()
492 pmf = repo['.'].manifest()
493 for f in removed:
493 for f in removed:
494 if f in pmf:
494 if f in pmf:
495 files.append((f, hex(pmf[f])))
495 files.append((f, hex(pmf[f])))
496 # batch fetch the needed files from the server
496 # batch fetch the needed files from the server
497 repo.fileservice.prefetch(files)
497 repo.fileservice.prefetch(files)
498 return orig(repo, matcher, added, removed, *args, **kwargs)
498 return orig(repo, matcher, added, removed, *args, **kwargs)
499
499
500 # prefetch files before pathcopies check
500 # prefetch files before pathcopies check
501 def computeforwardmissing(orig, a, b, match=None):
501 def computeforwardmissing(orig, a, b, match=None):
502 missing = orig(a, b, match=match)
502 missing = orig(a, b, match=match)
503 repo = a._repo
503 repo = a._repo
504 if isenabled(repo):
504 if isenabled(repo):
505 mb = b.manifest()
505 mb = b.manifest()
506
506
507 files = []
507 files = []
508 sparsematch = repo.maybesparsematch(b.rev())
508 sparsematch = repo.maybesparsematch(b.rev())
509 if sparsematch:
509 if sparsematch:
510 sparsemissing = set()
510 sparsemissing = set()
511 for f in missing:
511 for f in missing:
512 if sparsematch(f):
512 if sparsematch(f):
513 files.append((f, hex(mb[f])))
513 files.append((f, hex(mb[f])))
514 sparsemissing.add(f)
514 sparsemissing.add(f)
515 missing = sparsemissing
515 missing = sparsemissing
516
516
517 # batch fetch the needed files from the server
517 # batch fetch the needed files from the server
518 repo.fileservice.prefetch(files)
518 repo.fileservice.prefetch(files)
519 return missing
519 return missing
520
520
521 # close cache miss server connection after the command has finished
521 # close cache miss server connection after the command has finished
522 def runcommand(orig, lui, repo, *args, **kwargs):
522 def runcommand(orig, lui, repo, *args, **kwargs):
523 fileservice = None
523 fileservice = None
524 # repo can be None when running in chg:
524 # repo can be None when running in chg:
525 # - at startup, reposetup was called because serve is not norepo
525 # - at startup, reposetup was called because serve is not norepo
526 # - a norepo command like "help" is called
526 # - a norepo command like "help" is called
527 if repo and isenabled(repo):
527 if repo and isenabled(repo):
528 fileservice = repo.fileservice
528 fileservice = repo.fileservice
529 try:
529 try:
530 return orig(lui, repo, *args, **kwargs)
530 return orig(lui, repo, *args, **kwargs)
531 finally:
531 finally:
532 if fileservice:
532 if fileservice:
533 fileservice.close()
533 fileservice.close()
534
534
535 # prevent strip from stripping remotefilelogs
535 # prevent strip from stripping remotefilelogs
536 def _collectbrokencsets(orig, repo, files, striprev):
536 def _collectbrokencsets(orig, repo, files, striprev):
537 if isenabled(repo):
537 if isenabled(repo):
538 files = list([f for f in files if not repo.shallowmatch(f)])
538 files = list([f for f in files if not repo.shallowmatch(f)])
539 return orig(repo, files, striprev)
539 return orig(repo, files, striprev)
540
540
541 # changectx wrappers
541 # changectx wrappers
542 def filectx(orig, self, path, fileid=None, filelog=None):
542 def filectx(orig, self, path, fileid=None, filelog=None):
543 if fileid is None:
543 if fileid is None:
544 fileid = self.filenode(path)
544 fileid = self.filenode(path)
545 if (isenabled(self._repo) and self._repo.shallowmatch(path)):
545 if (isenabled(self._repo) and self._repo.shallowmatch(path)):
546 return remotefilectx.remotefilectx(self._repo, path, fileid=fileid,
546 return remotefilectx.remotefilectx(self._repo, path, fileid=fileid,
547 changectx=self, filelog=filelog)
547 changectx=self, filelog=filelog)
548 return orig(self, path, fileid=fileid, filelog=filelog)
548 return orig(self, path, fileid=fileid, filelog=filelog)
549
549
550 def workingfilectx(orig, self, path, filelog=None):
550 def workingfilectx(orig, self, path, filelog=None):
551 if (isenabled(self._repo) and self._repo.shallowmatch(path)):
551 if (isenabled(self._repo) and self._repo.shallowmatch(path)):
552 return remotefilectx.remoteworkingfilectx(self._repo, path,
552 return remotefilectx.remoteworkingfilectx(self._repo, path,
553 workingctx=self,
553 workingctx=self,
554 filelog=filelog)
554 filelog=filelog)
555 return orig(self, path, filelog=filelog)
555 return orig(self, path, filelog=filelog)
556
556
557 # prefetch required revisions before a diff
557 # prefetch required revisions before a diff
558 def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed,
558 def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed,
559 copy, getfilectx, *args, **kwargs):
559 copy, getfilectx, *args, **kwargs):
560 if isenabled(repo):
560 if isenabled(repo):
561 prefetch = []
561 prefetch = []
562 mf1 = ctx1.manifest()
562 mf1 = ctx1.manifest()
563 for fname in modified + added + removed:
563 for fname in modified + added + removed:
564 if fname in mf1:
564 if fname in mf1:
565 fnode = getfilectx(fname, ctx1).filenode()
565 fnode = getfilectx(fname, ctx1).filenode()
566 # fnode can be None if it's a edited working ctx file
566 # fnode can be None if it's a edited working ctx file
567 if fnode:
567 if fnode:
568 prefetch.append((fname, hex(fnode)))
568 prefetch.append((fname, hex(fnode)))
569 if fname not in removed:
569 if fname not in removed:
570 fnode = getfilectx(fname, ctx2).filenode()
570 fnode = getfilectx(fname, ctx2).filenode()
571 if fnode:
571 if fnode:
572 prefetch.append((fname, hex(fnode)))
572 prefetch.append((fname, hex(fnode)))
573
573
574 repo.fileservice.prefetch(prefetch)
574 repo.fileservice.prefetch(prefetch)
575
575
576 return orig(repo, revs, ctx1, ctx2, modified, added, removed, copy,
576 return orig(repo, revs, ctx1, ctx2, modified, added, removed, copy,
577 getfilectx, *args, **kwargs)
577 getfilectx, *args, **kwargs)
578
578
579 # Prevent verify from processing files
579 # Prevent verify from processing files
580 # a stub for mercurial.hg.verify()
580 # a stub for mercurial.hg.verify()
581 def _verify(orig, repo, level=None):
581 def _verify(orig, repo, level=None):
582 lock = repo.lock()
582 lock = repo.lock()
583 try:
583 try:
584 return shallowverifier.shallowverifier(repo).verify()
584 return shallowverifier.shallowverifier(repo).verify()
585 finally:
585 finally:
586 lock.release()
586 lock.release()
587
587
588
588
589 clientonetime = False
589 clientonetime = False
590 def onetimeclientsetup(ui):
590 def onetimeclientsetup(ui):
591 global clientonetime
591 global clientonetime
592 if clientonetime:
592 if clientonetime:
593 return
593 return
594 clientonetime = True
594 clientonetime = True
595
595
596 # Don't commit filelogs until we know the commit hash, since the hash
596 # Don't commit filelogs until we know the commit hash, since the hash
597 # is present in the filelog blob.
597 # is present in the filelog blob.
598 # This violates Mercurial's filelog->manifest->changelog write order,
598 # This violates Mercurial's filelog->manifest->changelog write order,
599 # but is generally fine for client repos.
599 # but is generally fine for client repos.
600 pendingfilecommits = []
600 pendingfilecommits = []
601 def addrawrevision(orig, self, rawtext, transaction, link, p1, p2, node,
601 def addrawrevision(orig, self, rawtext, transaction, link, p1, p2, node,
602 flags, cachedelta=None, _metatuple=None):
602 flags, cachedelta=None, _metatuple=None):
603 if isinstance(link, int):
603 if isinstance(link, int):
604 pendingfilecommits.append(
604 pendingfilecommits.append(
605 (self, rawtext, transaction, link, p1, p2, node, flags,
605 (self, rawtext, transaction, link, p1, p2, node, flags,
606 cachedelta, _metatuple))
606 cachedelta, _metatuple))
607 return node
607 return node
608 else:
608 else:
609 return orig(self, rawtext, transaction, link, p1, p2, node, flags,
609 return orig(self, rawtext, transaction, link, p1, p2, node, flags,
610 cachedelta, _metatuple=_metatuple)
610 cachedelta, _metatuple=_metatuple)
611 extensions.wrapfunction(
611 extensions.wrapfunction(
612 remotefilelog.remotefilelog, 'addrawrevision', addrawrevision)
612 remotefilelog.remotefilelog, 'addrawrevision', addrawrevision)
613
613
614 def changelogadd(orig, self, *args):
614 def changelogadd(orig, self, *args):
615 oldlen = len(self)
615 oldlen = len(self)
616 node = orig(self, *args)
616 node = orig(self, *args)
617 newlen = len(self)
617 newlen = len(self)
618 if oldlen != newlen:
618 if oldlen != newlen:
619 for oldargs in pendingfilecommits:
619 for oldargs in pendingfilecommits:
620 log, rt, tr, link, p1, p2, n, fl, c, m = oldargs
620 log, rt, tr, link, p1, p2, n, fl, c, m = oldargs
621 linknode = self.node(link)
621 linknode = self.node(link)
622 if linknode == node:
622 if linknode == node:
623 log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m)
623 log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m)
624 else:
624 else:
625 raise error.ProgrammingError(
625 raise error.ProgrammingError(
626 'pending multiple integer revisions are not supported')
626 'pending multiple integer revisions are not supported')
627 else:
627 else:
628 # "link" is actually wrong here (it is set to len(changelog))
628 # "link" is actually wrong here (it is set to len(changelog))
629 # if changelog remains unchanged, skip writing file revisions
629 # if changelog remains unchanged, skip writing file revisions
630 # but still do a sanity check about pending multiple revisions
630 # but still do a sanity check about pending multiple revisions
631 if len(set(x[3] for x in pendingfilecommits)) > 1:
631 if len(set(x[3] for x in pendingfilecommits)) > 1:
632 raise error.ProgrammingError(
632 raise error.ProgrammingError(
633 'pending multiple integer revisions are not supported')
633 'pending multiple integer revisions are not supported')
634 del pendingfilecommits[:]
634 del pendingfilecommits[:]
635 return node
635 return node
636 extensions.wrapfunction(changelog.changelog, 'add', changelogadd)
636 extensions.wrapfunction(changelog.changelog, 'add', changelogadd)
637
637
638 def getrenamedfn(orig, repo, endrev=None):
638 def getrenamedfn(orig, repo, endrev=None):
639 if not isenabled(repo) or copies.usechangesetcentricalgo(repo):
639 if not isenabled(repo) or copies.usechangesetcentricalgo(repo):
640 return orig(repo, endrev)
640 return orig(repo, endrev)
641
641
642 rcache = {}
642 rcache = {}
643
643
644 def getrenamed(fn, rev):
644 def getrenamed(fn, rev):
645 '''looks up all renames for a file (up to endrev) the first
645 '''looks up all renames for a file (up to endrev) the first
646 time the file is given. It indexes on the changerev and only
646 time the file is given. It indexes on the changerev and only
647 parses the manifest if linkrev != changerev.
647 parses the manifest if linkrev != changerev.
648 Returns rename info for fn at changerev rev.'''
648 Returns rename info for fn at changerev rev.'''
649 if rev in rcache.setdefault(fn, {}):
649 if rev in rcache.setdefault(fn, {}):
650 return rcache[fn][rev]
650 return rcache[fn][rev]
651
651
652 try:
652 try:
653 fctx = repo[rev].filectx(fn)
653 fctx = repo[rev].filectx(fn)
654 for ancestor in fctx.ancestors():
654 for ancestor in fctx.ancestors():
655 if ancestor.path() == fn:
655 if ancestor.path() == fn:
656 renamed = ancestor.renamed()
656 renamed = ancestor.renamed()
657 rcache[fn][ancestor.rev()] = renamed and renamed[0]
657 rcache[fn][ancestor.rev()] = renamed and renamed[0]
658
658
659 renamed = fctx.renamed()
659 renamed = fctx.renamed()
660 return renamed and renamed[0]
660 return renamed and renamed[0]
661 except error.LookupError:
661 except error.LookupError:
662 return None
662 return None
663
663
664 return getrenamed
664 return getrenamed
665
665
666 def walkfilerevs(orig, repo, match, follow, revs, fncache):
666 def walkfilerevs(orig, repo, match, follow, revs, fncache):
667 if not isenabled(repo):
667 if not isenabled(repo):
668 return orig(repo, match, follow, revs, fncache)
668 return orig(repo, match, follow, revs, fncache)
669
669
670 # remotefilelog's can't be walked in rev order, so throw.
670 # remotefilelog's can't be walked in rev order, so throw.
671 # The caller will see the exception and walk the commit tree instead.
671 # The caller will see the exception and walk the commit tree instead.
672 if not follow:
672 if not follow:
673 raise cmdutil.FileWalkError("Cannot walk via filelog")
673 raise cmdutil.FileWalkError("Cannot walk via filelog")
674
674
675 wanted = set()
675 wanted = set()
676 minrev, maxrev = min(revs), max(revs)
676 minrev, maxrev = min(revs), max(revs)
677
677
678 pctx = repo['.']
678 pctx = repo['.']
679 for filename in match.files():
679 for filename in match.files():
680 if filename not in pctx:
680 if filename not in pctx:
681 raise error.Abort(_('cannot follow file not in parent '
681 raise error.Abort(_('cannot follow file not in parent '
682 'revision: "%s"') % filename)
682 'revision: "%s"') % filename)
683 fctx = pctx[filename]
683 fctx = pctx[filename]
684
684
685 linkrev = fctx.linkrev()
685 linkrev = fctx.linkrev()
686 if linkrev >= minrev and linkrev <= maxrev:
686 if linkrev >= minrev and linkrev <= maxrev:
687 fncache.setdefault(linkrev, []).append(filename)
687 fncache.setdefault(linkrev, []).append(filename)
688 wanted.add(linkrev)
688 wanted.add(linkrev)
689
689
690 for ancestor in fctx.ancestors():
690 for ancestor in fctx.ancestors():
691 linkrev = ancestor.linkrev()
691 linkrev = ancestor.linkrev()
692 if linkrev >= minrev and linkrev <= maxrev:
692 if linkrev >= minrev and linkrev <= maxrev:
693 fncache.setdefault(linkrev, []).append(ancestor.path())
693 fncache.setdefault(linkrev, []).append(ancestor.path())
694 wanted.add(linkrev)
694 wanted.add(linkrev)
695
695
696 return wanted
696 return wanted
697
697
698 def filelogrevset(orig, repo, subset, x):
698 def filelogrevset(orig, repo, subset, x):
699 """``filelog(pattern)``
699 """``filelog(pattern)``
700 Changesets connected to the specified filelog.
700 Changesets connected to the specified filelog.
701
701
702 For performance reasons, ``filelog()`` does not show every changeset
702 For performance reasons, ``filelog()`` does not show every changeset
703 that affects the requested file(s). See :hg:`help log` for details. For
703 that affects the requested file(s). See :hg:`help log` for details. For
704 a slower, more accurate result, use ``file()``.
704 a slower, more accurate result, use ``file()``.
705 """
705 """
706
706
707 if not isenabled(repo):
707 if not isenabled(repo):
708 return orig(repo, subset, x)
708 return orig(repo, subset, x)
709
709
710 # i18n: "filelog" is a keyword
710 # i18n: "filelog" is a keyword
711 pat = revset.getstring(x, _("filelog requires a pattern"))
711 pat = revset.getstring(x, _("filelog requires a pattern"))
712 m = match.match(repo.root, repo.getcwd(), [pat], default='relpath',
712 m = match.match(repo.root, repo.getcwd(), [pat], default='relpath',
713 ctx=repo[None])
713 ctx=repo[None])
714 s = set()
714 s = set()
715
715
716 if not match.patkind(pat):
716 if not match.patkind(pat):
717 # slow
717 # slow
718 for r in subset:
718 for r in subset:
719 ctx = repo[r]
719 ctx = repo[r]
720 cfiles = ctx.files()
720 cfiles = ctx.files()
721 for f in m.files():
721 for f in m.files():
722 if f in cfiles:
722 if f in cfiles:
723 s.add(ctx.rev())
723 s.add(ctx.rev())
724 break
724 break
725 else:
725 else:
726 # partial
726 # partial
727 files = (f for f in repo[None] if m(f))
727 files = (f for f in repo[None] if m(f))
728 for f in files:
728 for f in files:
729 fctx = repo[None].filectx(f)
729 fctx = repo[None].filectx(f)
730 s.add(fctx.linkrev())
730 s.add(fctx.linkrev())
731 for actx in fctx.ancestors():
731 for actx in fctx.ancestors():
732 s.add(actx.linkrev())
732 s.add(actx.linkrev())
733
733
734 return smartset.baseset([r for r in subset if r in s])
734 return smartset.baseset([r for r in subset if r in s])
735
735
736 @command('gc', [], _('hg gc [REPO...]'), norepo=True)
736 @command('gc', [], _('hg gc [REPO...]'), norepo=True)
737 def gc(ui, *args, **opts):
737 def gc(ui, *args, **opts):
738 '''garbage collect the client and server filelog caches
738 '''garbage collect the client and server filelog caches
739 '''
739 '''
740 cachepaths = set()
740 cachepaths = set()
741
741
742 # get the system client cache
742 # get the system client cache
743 systemcache = shallowutil.getcachepath(ui, allowempty=True)
743 systemcache = shallowutil.getcachepath(ui, allowempty=True)
744 if systemcache:
744 if systemcache:
745 cachepaths.add(systemcache)
745 cachepaths.add(systemcache)
746
746
747 # get repo client and server cache
747 # get repo client and server cache
748 repopaths = []
748 repopaths = []
749 pwd = ui.environ.get('PWD')
749 pwd = ui.environ.get('PWD')
750 if pwd:
750 if pwd:
751 repopaths.append(pwd)
751 repopaths.append(pwd)
752
752
753 repopaths.extend(args)
753 repopaths.extend(args)
754 repos = []
754 repos = []
755 for repopath in repopaths:
755 for repopath in repopaths:
756 try:
756 try:
757 repo = hg.peer(ui, {}, repopath)
757 repo = hg.peer(ui, {}, repopath)
758 repos.append(repo)
758 repos.append(repo)
759
759
760 repocache = shallowutil.getcachepath(repo.ui, allowempty=True)
760 repocache = shallowutil.getcachepath(repo.ui, allowempty=True)
761 if repocache:
761 if repocache:
762 cachepaths.add(repocache)
762 cachepaths.add(repocache)
763 except error.RepoError:
763 except error.RepoError:
764 pass
764 pass
765
765
766 # gc client cache
766 # gc client cache
767 for cachepath in cachepaths:
767 for cachepath in cachepaths:
768 gcclient(ui, cachepath)
768 gcclient(ui, cachepath)
769
769
770 # gc server cache
770 # gc server cache
771 for repo in repos:
771 for repo in repos:
772 remotefilelogserver.gcserver(ui, repo._repo)
772 remotefilelogserver.gcserver(ui, repo._repo)
773
773
774 def gcclient(ui, cachepath):
774 def gcclient(ui, cachepath):
775 # get list of repos that use this cache
775 # get list of repos that use this cache
776 repospath = os.path.join(cachepath, 'repos')
776 repospath = os.path.join(cachepath, 'repos')
777 if not os.path.exists(repospath):
777 if not os.path.exists(repospath):
778 ui.warn(_("no known cache at %s\n") % cachepath)
778 ui.warn(_("no known cache at %s\n") % cachepath)
779 return
779 return
780
780
781 reposfile = open(repospath, 'rb')
781 reposfile = open(repospath, 'rb')
782 repos = {r[:-1] for r in reposfile.readlines()}
782 repos = {r[:-1] for r in reposfile.readlines()}
783 reposfile.close()
783 reposfile.close()
784
784
785 # build list of useful files
785 # build list of useful files
786 validrepos = []
786 validrepos = []
787 keepkeys = set()
787 keepkeys = set()
788
788
789 sharedcache = None
789 sharedcache = None
790 filesrepacked = False
790 filesrepacked = False
791
791
792 count = 0
792 count = 0
793 progress = ui.makeprogress(_("analyzing repositories"), unit="repos",
793 progress = ui.makeprogress(_("analyzing repositories"), unit="repos",
794 total=len(repos))
794 total=len(repos))
795 for path in repos:
795 for path in repos:
796 progress.update(count)
796 progress.update(count)
797 count += 1
797 count += 1
798 try:
798 try:
799 path = ui.expandpath(os.path.normpath(path))
799 path = ui.expandpath(os.path.normpath(path))
800 except TypeError as e:
800 except TypeError as e:
801 ui.warn(_("warning: malformed path: %r:%s\n") % (path, e))
801 ui.warn(_("warning: malformed path: %r:%s\n") % (path, e))
802 traceback.print_exc()
802 traceback.print_exc()
803 continue
803 continue
804 try:
804 try:
805 peer = hg.peer(ui, {}, path)
805 peer = hg.peer(ui, {}, path)
806 repo = peer._repo
806 repo = peer._repo
807 except error.RepoError:
807 except error.RepoError:
808 continue
808 continue
809
809
810 validrepos.append(path)
810 validrepos.append(path)
811
811
812 # Protect against any repo or config changes that have happened since
812 # Protect against any repo or config changes that have happened since
813 # this repo was added to the repos file. We'd rather this loop succeed
813 # this repo was added to the repos file. We'd rather this loop succeed
814 # and too much be deleted, than the loop fail and nothing gets deleted.
814 # and too much be deleted, than the loop fail and nothing gets deleted.
815 if not isenabled(repo):
815 if not isenabled(repo):
816 continue
816 continue
817
817
818 if not util.safehasattr(repo, 'name'):
818 if not util.safehasattr(repo, 'name'):
819 ui.warn(_("repo %s is a misconfigured remotefilelog repo\n") % path)
819 ui.warn(_("repo %s is a misconfigured remotefilelog repo\n") % path)
820 continue
820 continue
821
821
822 # If garbage collection on repack and repack on hg gc are enabled
822 # If garbage collection on repack and repack on hg gc are enabled
823 # then loose files are repacked and garbage collected.
823 # then loose files are repacked and garbage collected.
824 # Otherwise regular garbage collection is performed.
824 # Otherwise regular garbage collection is performed.
825 repackonhggc = repo.ui.configbool('remotefilelog', 'repackonhggc')
825 repackonhggc = repo.ui.configbool('remotefilelog', 'repackonhggc')
826 gcrepack = repo.ui.configbool('remotefilelog', 'gcrepack')
826 gcrepack = repo.ui.configbool('remotefilelog', 'gcrepack')
827 if repackonhggc and gcrepack:
827 if repackonhggc and gcrepack:
828 try:
828 try:
829 repackmod.incrementalrepack(repo)
829 repackmod.incrementalrepack(repo)
830 filesrepacked = True
830 filesrepacked = True
831 continue
831 continue
832 except (IOError, repackmod.RepackAlreadyRunning):
832 except (IOError, repackmod.RepackAlreadyRunning):
833 # If repack cannot be performed due to not enough disk space
833 # If repack cannot be performed due to not enough disk space
834 # continue doing garbage collection of loose files w/o repack
834 # continue doing garbage collection of loose files w/o repack
835 pass
835 pass
836
836
837 reponame = repo.name
837 reponame = repo.name
838 if not sharedcache:
838 if not sharedcache:
839 sharedcache = repo.sharedstore
839 sharedcache = repo.sharedstore
840
840
841 # Compute a keepset which is not garbage collected
841 # Compute a keepset which is not garbage collected
842 def keyfn(fname, fnode):
842 def keyfn(fname, fnode):
843 return fileserverclient.getcachekey(reponame, fname, hex(fnode))
843 return fileserverclient.getcachekey(reponame, fname, hex(fnode))
844 keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys)
844 keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys)
845
845
846 progress.complete()
846 progress.complete()
847
847
848 # write list of valid repos back
848 # write list of valid repos back
849 oldumask = os.umask(0o002)
849 oldumask = os.umask(0o002)
850 try:
850 try:
851 reposfile = open(repospath, 'wb')
851 reposfile = open(repospath, 'wb')
852 reposfile.writelines([("%s\n" % r) for r in validrepos])
852 reposfile.writelines([("%s\n" % r) for r in validrepos])
853 reposfile.close()
853 reposfile.close()
854 finally:
854 finally:
855 os.umask(oldumask)
855 os.umask(oldumask)
856
856
857 # prune cache
857 # prune cache
858 if sharedcache is not None:
858 if sharedcache is not None:
859 sharedcache.gc(keepkeys)
859 sharedcache.gc(keepkeys)
860 elif not filesrepacked:
860 elif not filesrepacked:
861 ui.warn(_("warning: no valid repos in repofile\n"))
861 ui.warn(_("warning: no valid repos in repofile\n"))
862
862
863 def log(orig, ui, repo, *pats, **opts):
863 def log(orig, ui, repo, *pats, **opts):
864 if not isenabled(repo):
864 if not isenabled(repo):
865 return orig(ui, repo, *pats, **opts)
865 return orig(ui, repo, *pats, **opts)
866
866
867 follow = opts.get(r'follow')
867 follow = opts.get(r'follow')
868 revs = opts.get(r'rev')
868 revs = opts.get(r'rev')
869 if pats:
869 if pats:
870 # Force slowpath for non-follow patterns and follows that start from
870 # Force slowpath for non-follow patterns and follows that start from
871 # non-working-copy-parent revs.
871 # non-working-copy-parent revs.
872 if not follow or revs:
872 if not follow or revs:
873 # This forces the slowpath
873 # This forces the slowpath
874 opts[r'removed'] = True
874 opts[r'removed'] = True
875
875
876 # If this is a non-follow log without any revs specified, recommend that
876 # If this is a non-follow log without any revs specified, recommend that
877 # the user add -f to speed it up.
877 # the user add -f to speed it up.
878 if not follow and not revs:
878 if not follow and not revs:
879 match = scmutil.match(repo['.'], pats, pycompat.byteskwargs(opts))
879 match = scmutil.match(repo['.'], pats, pycompat.byteskwargs(opts))
880 isfile = not match.anypats()
880 isfile = not match.anypats()
881 if isfile:
881 if isfile:
882 for file in match.files():
882 for file in match.files():
883 if not os.path.isfile(repo.wjoin(file)):
883 if not os.path.isfile(repo.wjoin(file)):
884 isfile = False
884 isfile = False
885 break
885 break
886
886
887 if isfile:
887 if isfile:
888 ui.warn(_("warning: file log can be slow on large repos - " +
888 ui.warn(_("warning: file log can be slow on large repos - " +
889 "use -f to speed it up\n"))
889 "use -f to speed it up\n"))
890
890
891 return orig(ui, repo, *pats, **opts)
891 return orig(ui, repo, *pats, **opts)
892
892
893 def revdatelimit(ui, revset):
893 def revdatelimit(ui, revset):
894 """Update revset so that only changesets no older than 'prefetchdays' days
894 """Update revset so that only changesets no older than 'prefetchdays' days
895 are included. The default value is set to 14 days. If 'prefetchdays' is set
895 are included. The default value is set to 14 days. If 'prefetchdays' is set
896 to zero or negative value then date restriction is not applied.
896 to zero or negative value then date restriction is not applied.
897 """
897 """
898 days = ui.configint('remotefilelog', 'prefetchdays')
898 days = ui.configint('remotefilelog', 'prefetchdays')
899 if days > 0:
899 if days > 0:
900 revset = '(%s) & date(-%s)' % (revset, days)
900 revset = '(%s) & date(-%s)' % (revset, days)
901 return revset
901 return revset
902
902
903 def readytofetch(repo):
903 def readytofetch(repo):
904 """Check that enough time has passed since the last background prefetch.
904 """Check that enough time has passed since the last background prefetch.
905 This only relates to prefetches after operations that change the working
905 This only relates to prefetches after operations that change the working
906 copy parent. Default delay between background prefetches is 2 minutes.
906 copy parent. Default delay between background prefetches is 2 minutes.
907 """
907 """
908 timeout = repo.ui.configint('remotefilelog', 'prefetchdelay')
908 timeout = repo.ui.configint('remotefilelog', 'prefetchdelay')
909 fname = repo.vfs.join('lastprefetch')
909 fname = repo.vfs.join('lastprefetch')
910
910
911 ready = False
911 ready = False
912 with open(fname, 'a'):
912 with open(fname, 'a'):
913 # the with construct above is used to avoid race conditions
913 # the with construct above is used to avoid race conditions
914 modtime = os.path.getmtime(fname)
914 modtime = os.path.getmtime(fname)
915 if (time.time() - modtime) > timeout:
915 if (time.time() - modtime) > timeout:
916 os.utime(fname, None)
916 os.utime(fname, None)
917 ready = True
917 ready = True
918
918
919 return ready
919 return ready
920
920
921 def wcpprefetch(ui, repo, **kwargs):
921 def wcpprefetch(ui, repo, **kwargs):
922 """Prefetches in background revisions specified by bgprefetchrevs revset.
922 """Prefetches in background revisions specified by bgprefetchrevs revset.
923 Does background repack if backgroundrepack flag is set in config.
923 Does background repack if backgroundrepack flag is set in config.
924 """
924 """
925 shallow = isenabled(repo)
925 shallow = isenabled(repo)
926 bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs')
926 bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs')
927 isready = readytofetch(repo)
927 isready = readytofetch(repo)
928
928
929 if not (shallow and bgprefetchrevs and isready):
929 if not (shallow and bgprefetchrevs and isready):
930 return
930 return
931
931
932 bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack')
932 bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack')
933 # update a revset with a date limit
933 # update a revset with a date limit
934 bgprefetchrevs = revdatelimit(ui, bgprefetchrevs)
934 bgprefetchrevs = revdatelimit(ui, bgprefetchrevs)
935
935
936 def anon():
936 def anon():
937 if util.safehasattr(repo, 'ranprefetch') and repo.ranprefetch:
937 if util.safehasattr(repo, 'ranprefetch') and repo.ranprefetch:
938 return
938 return
939 repo.ranprefetch = True
939 repo.ranprefetch = True
940 repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack)
940 repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack)
941
941
942 repo._afterlock(anon)
942 repo._afterlock(anon)
943
943
944 def pull(orig, ui, repo, *pats, **opts):
944 def pull(orig, ui, repo, *pats, **opts):
945 result = orig(ui, repo, *pats, **opts)
945 result = orig(ui, repo, *pats, **opts)
946
946
947 if isenabled(repo):
947 if isenabled(repo):
948 # prefetch if it's configured
948 # prefetch if it's configured
949 prefetchrevset = ui.config('remotefilelog', 'pullprefetch')
949 prefetchrevset = ui.config('remotefilelog', 'pullprefetch')
950 bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack')
950 bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack')
951 bgprefetch = repo.ui.configbool('remotefilelog', 'backgroundprefetch')
951 bgprefetch = repo.ui.configbool('remotefilelog', 'backgroundprefetch')
952
952
953 if prefetchrevset:
953 if prefetchrevset:
954 ui.status(_("prefetching file contents\n"))
954 ui.status(_("prefetching file contents\n"))
955 revs = scmutil.revrange(repo, [prefetchrevset])
955 revs = scmutil.revrange(repo, [prefetchrevset])
956 base = repo['.'].rev()
956 base = repo['.'].rev()
957 if bgprefetch:
957 if bgprefetch:
958 repo.backgroundprefetch(prefetchrevset, repack=bgrepack)
958 repo.backgroundprefetch(prefetchrevset, repack=bgrepack)
959 else:
959 else:
960 repo.prefetch(revs, base=base)
960 repo.prefetch(revs, base=base)
961 if bgrepack:
961 if bgrepack:
962 repackmod.backgroundrepack(repo, incremental=True)
962 repackmod.backgroundrepack(repo, incremental=True)
963 elif bgrepack:
963 elif bgrepack:
964 repackmod.backgroundrepack(repo, incremental=True)
964 repackmod.backgroundrepack(repo, incremental=True)
965
965
966 return result
966 return result
967
967
968 def exchangepull(orig, repo, remote, *args, **kwargs):
968 def exchangepull(orig, repo, remote, *args, **kwargs):
969 # Hook into the callstream/getbundle to insert bundle capabilities
969 # Hook into the callstream/getbundle to insert bundle capabilities
970 # during a pull.
970 # during a pull.
971 def localgetbundle(orig, source, heads=None, common=None, bundlecaps=None,
971 def localgetbundle(orig, source, heads=None, common=None, bundlecaps=None,
972 **kwargs):
972 **kwargs):
973 if not bundlecaps:
973 if not bundlecaps:
974 bundlecaps = set()
974 bundlecaps = set()
975 bundlecaps.add(constants.BUNDLE2_CAPABLITY)
975 bundlecaps.add(constants.BUNDLE2_CAPABLITY)
976 return orig(source, heads=heads, common=common, bundlecaps=bundlecaps,
976 return orig(source, heads=heads, common=common, bundlecaps=bundlecaps,
977 **kwargs)
977 **kwargs)
978
978
979 if util.safehasattr(remote, '_callstream'):
979 if util.safehasattr(remote, '_callstream'):
980 remote._localrepo = repo
980 remote._localrepo = repo
981 elif util.safehasattr(remote, 'getbundle'):
981 elif util.safehasattr(remote, 'getbundle'):
982 extensions.wrapfunction(remote, 'getbundle', localgetbundle)
982 extensions.wrapfunction(remote, 'getbundle', localgetbundle)
983
983
984 return orig(repo, remote, *args, **kwargs)
984 return orig(repo, remote, *args, **kwargs)
985
985
986 def _fileprefetchhook(repo, revs, match):
986 def _fileprefetchhook(repo, revs, match):
987 if isenabled(repo):
987 if isenabled(repo):
988 allfiles = []
988 allfiles = []
989 for rev in revs:
989 for rev in revs:
990 if rev == nodemod.wdirrev or rev is None:
990 if rev == nodemod.wdirrev or rev is None:
991 continue
991 continue
992 ctx = repo[rev]
992 ctx = repo[rev]
993 mf = ctx.manifest()
993 mf = ctx.manifest()
994 sparsematch = repo.maybesparsematch(ctx.rev())
994 sparsematch = repo.maybesparsematch(ctx.rev())
995 for path in ctx.walk(match):
995 for path in ctx.walk(match):
996 if (not sparsematch or sparsematch(path)) and path in mf:
996 if (not sparsematch or sparsematch(path)) and path in mf:
997 allfiles.append((path, hex(mf[path])))
997 allfiles.append((path, hex(mf[path])))
998 repo.fileservice.prefetch(allfiles)
998 repo.fileservice.prefetch(allfiles)
999
999
1000 @command('debugremotefilelog', [
1000 @command('debugremotefilelog', [
1001 ('d', 'decompress', None, _('decompress the filelog first')),
1001 ('d', 'decompress', None, _('decompress the filelog first')),
1002 ], _('hg debugremotefilelog <path>'), norepo=True)
1002 ], _('hg debugremotefilelog <path>'), norepo=True)
1003 def debugremotefilelog(ui, path, **opts):
1003 def debugremotefilelog(ui, path, **opts):
1004 return debugcommands.debugremotefilelog(ui, path, **opts)
1004 return debugcommands.debugremotefilelog(ui, path, **opts)
1005
1005
1006 @command('verifyremotefilelog', [
1006 @command('verifyremotefilelog', [
1007 ('d', 'decompress', None, _('decompress the filelogs first')),
1007 ('d', 'decompress', None, _('decompress the filelogs first')),
1008 ], _('hg verifyremotefilelogs <directory>'), norepo=True)
1008 ], _('hg verifyremotefilelogs <directory>'), norepo=True)
1009 def verifyremotefilelog(ui, path, **opts):
1009 def verifyremotefilelog(ui, path, **opts):
1010 return debugcommands.verifyremotefilelog(ui, path, **opts)
1010 return debugcommands.verifyremotefilelog(ui, path, **opts)
1011
1011
1012 @command('debugdatapack', [
1012 @command('debugdatapack', [
1013 ('', 'long', None, _('print the long hashes')),
1013 ('', 'long', None, _('print the long hashes')),
1014 ('', 'node', '', _('dump the contents of node'), 'NODE'),
1014 ('', 'node', '', _('dump the contents of node'), 'NODE'),
1015 ], _('hg debugdatapack <paths>'), norepo=True)
1015 ], _('hg debugdatapack <paths>'), norepo=True)
1016 def debugdatapack(ui, *paths, **opts):
1016 def debugdatapack(ui, *paths, **opts):
1017 return debugcommands.debugdatapack(ui, *paths, **opts)
1017 return debugcommands.debugdatapack(ui, *paths, **opts)
1018
1018
1019 @command('debughistorypack', [
1019 @command('debughistorypack', [
1020 ], _('hg debughistorypack <path>'), norepo=True)
1020 ], _('hg debughistorypack <path>'), norepo=True)
1021 def debughistorypack(ui, path, **opts):
1021 def debughistorypack(ui, path, **opts):
1022 return debugcommands.debughistorypack(ui, path)
1022 return debugcommands.debughistorypack(ui, path)
1023
1023
1024 @command('debugkeepset', [
1024 @command('debugkeepset', [
1025 ], _('hg debugkeepset'))
1025 ], _('hg debugkeepset'))
1026 def debugkeepset(ui, repo, **opts):
1026 def debugkeepset(ui, repo, **opts):
1027 # The command is used to measure keepset computation time
1027 # The command is used to measure keepset computation time
1028 def keyfn(fname, fnode):
1028 def keyfn(fname, fnode):
1029 return fileserverclient.getcachekey(repo.name, fname, hex(fnode))
1029 return fileserverclient.getcachekey(repo.name, fname, hex(fnode))
1030 repackmod.keepset(repo, keyfn)
1030 repackmod.keepset(repo, keyfn)
1031 return
1031 return
1032
1032
1033 @command('debugwaitonrepack', [
1033 @command('debugwaitonrepack', [
1034 ], _('hg debugwaitonrepack'))
1034 ], _('hg debugwaitonrepack'))
1035 def debugwaitonrepack(ui, repo, **opts):
1035 def debugwaitonrepack(ui, repo, **opts):
1036 return debugcommands.debugwaitonrepack(repo)
1036 return debugcommands.debugwaitonrepack(repo)
1037
1037
1038 @command('debugwaitonprefetch', [
1038 @command('debugwaitonprefetch', [
1039 ], _('hg debugwaitonprefetch'))
1039 ], _('hg debugwaitonprefetch'))
1040 def debugwaitonprefetch(ui, repo, **opts):
1040 def debugwaitonprefetch(ui, repo, **opts):
1041 return debugcommands.debugwaitonprefetch(repo)
1041 return debugcommands.debugwaitonprefetch(repo)
1042
1042
1043 def resolveprefetchopts(ui, opts):
1043 def resolveprefetchopts(ui, opts):
1044 if not opts.get('rev'):
1044 if not opts.get('rev'):
1045 revset = ['.', 'draft()']
1045 revset = ['.', 'draft()']
1046
1046
1047 prefetchrevset = ui.config('remotefilelog', 'pullprefetch', None)
1047 prefetchrevset = ui.config('remotefilelog', 'pullprefetch', None)
1048 if prefetchrevset:
1048 if prefetchrevset:
1049 revset.append('(%s)' % prefetchrevset)
1049 revset.append('(%s)' % prefetchrevset)
1050 bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs', None)
1050 bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs', None)
1051 if bgprefetchrevs:
1051 if bgprefetchrevs:
1052 revset.append('(%s)' % bgprefetchrevs)
1052 revset.append('(%s)' % bgprefetchrevs)
1053 revset = '+'.join(revset)
1053 revset = '+'.join(revset)
1054
1054
1055 # update a revset with a date limit
1055 # update a revset with a date limit
1056 revset = revdatelimit(ui, revset)
1056 revset = revdatelimit(ui, revset)
1057
1057
1058 opts['rev'] = [revset]
1058 opts['rev'] = [revset]
1059
1059
1060 if not opts.get('base'):
1060 if not opts.get('base'):
1061 opts['base'] = None
1061 opts['base'] = None
1062
1062
1063 return opts
1063 return opts
1064
1064
1065 @command('prefetch', [
1065 @command('prefetch', [
1066 ('r', 'rev', [], _('prefetch the specified revisions'), _('REV')),
1066 ('r', 'rev', [], _('prefetch the specified revisions'), _('REV')),
1067 ('', 'repack', False, _('run repack after prefetch')),
1067 ('', 'repack', False, _('run repack after prefetch')),
1068 ('b', 'base', '', _("rev that is assumed to already be local")),
1068 ('b', 'base', '', _("rev that is assumed to already be local")),
1069 ] + commands.walkopts, _('hg prefetch [OPTIONS] [FILE...]'))
1069 ] + commands.walkopts, _('hg prefetch [OPTIONS] [FILE...]'))
1070 def prefetch(ui, repo, *pats, **opts):
1070 def prefetch(ui, repo, *pats, **opts):
1071 """prefetch file revisions from the server
1071 """prefetch file revisions from the server
1072
1072
1073 Prefetchs file revisions for the specified revs and stores them in the
1073 Prefetchs file revisions for the specified revs and stores them in the
1074 local remotefilelog cache. If no rev is specified, the default rev is
1074 local remotefilelog cache. If no rev is specified, the default rev is
1075 used which is the union of dot, draft, pullprefetch and bgprefetchrev.
1075 used which is the union of dot, draft, pullprefetch and bgprefetchrev.
1076 File names or patterns can be used to limit which files are downloaded.
1076 File names or patterns can be used to limit which files are downloaded.
1077
1077
1078 Return 0 on success.
1078 Return 0 on success.
1079 """
1079 """
1080 opts = pycompat.byteskwargs(opts)
1080 opts = pycompat.byteskwargs(opts)
1081 if not isenabled(repo):
1081 if not isenabled(repo):
1082 raise error.Abort(_("repo is not shallow"))
1082 raise error.Abort(_("repo is not shallow"))
1083
1083
1084 opts = resolveprefetchopts(ui, opts)
1084 opts = resolveprefetchopts(ui, opts)
1085 revs = scmutil.revrange(repo, opts.get('rev'))
1085 revs = scmutil.revrange(repo, opts.get('rev'))
1086 repo.prefetch(revs, opts.get('base'), pats, opts)
1086 repo.prefetch(revs, opts.get('base'), pats, opts)
1087
1087
1088 # Run repack in background
1088 # Run repack in background
1089 if opts.get('repack'):
1089 if opts.get('repack'):
1090 repackmod.backgroundrepack(repo, incremental=True)
1090 repackmod.backgroundrepack(repo, incremental=True)
1091
1091
1092 @command('repack', [
1092 @command('repack', [
1093 ('', 'background', None, _('run in a background process'), None),
1093 ('', 'background', None, _('run in a background process'), None),
1094 ('', 'incremental', None, _('do an incremental repack'), None),
1094 ('', 'incremental', None, _('do an incremental repack'), None),
1095 ('', 'packsonly', None, _('only repack packs (skip loose objects)'), None),
1095 ('', 'packsonly', None, _('only repack packs (skip loose objects)'), None),
1096 ], _('hg repack [OPTIONS]'))
1096 ], _('hg repack [OPTIONS]'))
1097 def repack_(ui, repo, *pats, **opts):
1097 def repack_(ui, repo, *pats, **opts):
1098 if opts.get(r'background'):
1098 if opts.get(r'background'):
1099 repackmod.backgroundrepack(repo, incremental=opts.get(r'incremental'),
1099 repackmod.backgroundrepack(repo, incremental=opts.get(r'incremental'),
1100 packsonly=opts.get(r'packsonly', False))
1100 packsonly=opts.get(r'packsonly', False))
1101 return
1101 return
1102
1102
1103 options = {'packsonly': opts.get(r'packsonly')}
1103 options = {'packsonly': opts.get(r'packsonly')}
1104
1104
1105 try:
1105 try:
1106 if opts.get(r'incremental'):
1106 if opts.get(r'incremental'):
1107 repackmod.incrementalrepack(repo, options=options)
1107 repackmod.incrementalrepack(repo, options=options)
1108 else:
1108 else:
1109 repackmod.fullrepack(repo, options=options)
1109 repackmod.fullrepack(repo, options=options)
1110 except repackmod.RepackAlreadyRunning as ex:
1110 except repackmod.RepackAlreadyRunning as ex:
1111 # Don't propogate the exception if the repack is already in
1111 # Don't propogate the exception if the repack is already in
1112 # progress, since we want the command to exit 0.
1112 # progress, since we want the command to exit 0.
1113 repo.ui.warn('%s\n' % ex)
1113 repo.ui.warn('%s\n' % ex)
@@ -1,1174 +1,1175 b''
1 # sqlitestore.py - Storage backend that uses SQLite
1 # sqlitestore.py - Storage backend that uses SQLite
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """store repository data in SQLite (EXPERIMENTAL)
8 """store repository data in SQLite (EXPERIMENTAL)
9
9
10 The sqlitestore extension enables the storage of repository data in SQLite.
10 The sqlitestore extension enables the storage of repository data in SQLite.
11
11
12 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
12 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
13 GUARANTEES. This means that repositories created with this extension may
13 GUARANTEES. This means that repositories created with this extension may
14 only be usable with the exact version of this extension/Mercurial that was
14 only be usable with the exact version of this extension/Mercurial that was
15 used. The extension attempts to enforce this in order to prevent repository
15 used. The extension attempts to enforce this in order to prevent repository
16 corruption.
16 corruption.
17
17
18 In addition, several features are not yet supported or have known bugs:
18 In addition, several features are not yet supported or have known bugs:
19
19
20 * Only some data is stored in SQLite. Changeset, manifest, and other repository
20 * Only some data is stored in SQLite. Changeset, manifest, and other repository
21 data is not yet stored in SQLite.
21 data is not yet stored in SQLite.
22 * Transactions are not robust. If the process is aborted at the right time
22 * Transactions are not robust. If the process is aborted at the right time
23 during transaction close/rollback, the repository could be in an inconsistent
23 during transaction close/rollback, the repository could be in an inconsistent
24 state. This problem will diminish once all repository data is tracked by
24 state. This problem will diminish once all repository data is tracked by
25 SQLite.
25 SQLite.
26 * Bundle repositories do not work (the ability to use e.g.
26 * Bundle repositories do not work (the ability to use e.g.
27 `hg -R <bundle-file> log` to automatically overlay a bundle on top of the
27 `hg -R <bundle-file> log` to automatically overlay a bundle on top of the
28 existing repository).
28 existing repository).
29 * Various other features don't work.
29 * Various other features don't work.
30
30
31 This extension should work for basic clone/pull, update, and commit workflows.
31 This extension should work for basic clone/pull, update, and commit workflows.
32 Some history rewriting operations may fail due to lack of support for bundle
32 Some history rewriting operations may fail due to lack of support for bundle
33 repositories.
33 repositories.
34
34
35 To use, activate the extension and set the ``storage.new-repo-backend`` config
35 To use, activate the extension and set the ``storage.new-repo-backend`` config
36 option to ``sqlite`` to enable new repositories to use SQLite for storage.
36 option to ``sqlite`` to enable new repositories to use SQLite for storage.
37 """
37 """
38
38
39 # To run the test suite with repos using SQLite by default, execute the
39 # To run the test suite with repos using SQLite by default, execute the
40 # following:
40 # following:
41 #
41 #
42 # HGREPOFEATURES="sqlitestore" run-tests.py \
42 # HGREPOFEATURES="sqlitestore" run-tests.py \
43 # --extra-config-opt extensions.sqlitestore= \
43 # --extra-config-opt extensions.sqlitestore= \
44 # --extra-config-opt storage.new-repo-backend=sqlite
44 # --extra-config-opt storage.new-repo-backend=sqlite
45
45
46 from __future__ import absolute_import
46 from __future__ import absolute_import
47
47
48 import hashlib
48 import hashlib
49 import sqlite3
49 import sqlite3
50 import struct
50 import struct
51 import threading
51 import threading
52 import zlib
52 import zlib
53
53
54 from mercurial.i18n import _
54 from mercurial.i18n import _
55 from mercurial.node import (
55 from mercurial.node import (
56 nullid,
56 nullid,
57 nullrev,
57 nullrev,
58 short,
58 short,
59 )
59 )
60 from mercurial.thirdparty import (
60 from mercurial.thirdparty import (
61 attr,
61 attr,
62 )
62 )
63 from mercurial import (
63 from mercurial import (
64 ancestor,
64 ancestor,
65 dagop,
65 dagop,
66 encoding,
66 encoding,
67 error,
67 error,
68 extensions,
68 extensions,
69 localrepo,
69 localrepo,
70 mdiff,
70 mdiff,
71 pycompat,
71 pycompat,
72 registrar,
72 registrar,
73 repository,
73 repository,
74 util,
74 util,
75 verify,
75 verify,
76 )
76 )
77 from mercurial.utils import (
77 from mercurial.utils import (
78 interfaceutil,
78 interfaceutil,
79 storageutil,
79 storageutil,
80 )
80 )
81
81
82 try:
82 try:
83 from mercurial import zstd
83 from mercurial import zstd
84 zstd.__version__
84 zstd.__version__
85 except ImportError:
85 except ImportError:
86 zstd = None
86 zstd = None
87
87
88 configtable = {}
88 configtable = {}
89 configitem = registrar.configitem(configtable)
89 configitem = registrar.configitem(configtable)
90
90
91 # experimental config: storage.sqlite.compression
91 # experimental config: storage.sqlite.compression
92 configitem('storage', 'sqlite.compression',
92 configitem('storage', 'sqlite.compression',
93 default='zstd' if zstd else 'zlib')
93 default='zstd' if zstd else 'zlib',
94 experimental=True)
94
95
95 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
96 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
96 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
97 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
97 # be specifying the version(s) of Mercurial they are tested with, or
98 # be specifying the version(s) of Mercurial they are tested with, or
98 # leave the attribute unspecified.
99 # leave the attribute unspecified.
99 testedwith = 'ships-with-hg-core'
100 testedwith = 'ships-with-hg-core'
100
101
101 REQUIREMENT = b'exp-sqlite-001'
102 REQUIREMENT = b'exp-sqlite-001'
102 REQUIREMENT_ZSTD = b'exp-sqlite-comp-001=zstd'
103 REQUIREMENT_ZSTD = b'exp-sqlite-comp-001=zstd'
103 REQUIREMENT_ZLIB = b'exp-sqlite-comp-001=zlib'
104 REQUIREMENT_ZLIB = b'exp-sqlite-comp-001=zlib'
104 REQUIREMENT_NONE = b'exp-sqlite-comp-001=none'
105 REQUIREMENT_NONE = b'exp-sqlite-comp-001=none'
105 REQUIREMENT_SHALLOW_FILES = b'exp-sqlite-shallow-files'
106 REQUIREMENT_SHALLOW_FILES = b'exp-sqlite-shallow-files'
106
107
107 CURRENT_SCHEMA_VERSION = 1
108 CURRENT_SCHEMA_VERSION = 1
108
109
109 COMPRESSION_NONE = 1
110 COMPRESSION_NONE = 1
110 COMPRESSION_ZSTD = 2
111 COMPRESSION_ZSTD = 2
111 COMPRESSION_ZLIB = 3
112 COMPRESSION_ZLIB = 3
112
113
113 FLAG_CENSORED = 1
114 FLAG_CENSORED = 1
114 FLAG_MISSING_P1 = 2
115 FLAG_MISSING_P1 = 2
115 FLAG_MISSING_P2 = 4
116 FLAG_MISSING_P2 = 4
116
117
117 CREATE_SCHEMA = [
118 CREATE_SCHEMA = [
118 # Deltas are stored as content-indexed blobs.
119 # Deltas are stored as content-indexed blobs.
119 # compression column holds COMPRESSION_* constant for how the
120 # compression column holds COMPRESSION_* constant for how the
120 # delta is encoded.
121 # delta is encoded.
121
122
122 r'CREATE TABLE delta ('
123 r'CREATE TABLE delta ('
123 r' id INTEGER PRIMARY KEY, '
124 r' id INTEGER PRIMARY KEY, '
124 r' compression INTEGER NOT NULL, '
125 r' compression INTEGER NOT NULL, '
125 r' hash BLOB UNIQUE ON CONFLICT ABORT, '
126 r' hash BLOB UNIQUE ON CONFLICT ABORT, '
126 r' delta BLOB NOT NULL '
127 r' delta BLOB NOT NULL '
127 r')',
128 r')',
128
129
129 # Tracked paths are denormalized to integers to avoid redundant
130 # Tracked paths are denormalized to integers to avoid redundant
130 # storage of the path name.
131 # storage of the path name.
131 r'CREATE TABLE filepath ('
132 r'CREATE TABLE filepath ('
132 r' id INTEGER PRIMARY KEY, '
133 r' id INTEGER PRIMARY KEY, '
133 r' path BLOB NOT NULL '
134 r' path BLOB NOT NULL '
134 r')',
135 r')',
135
136
136 r'CREATE UNIQUE INDEX filepath_path '
137 r'CREATE UNIQUE INDEX filepath_path '
137 r' ON filepath (path)',
138 r' ON filepath (path)',
138
139
139 # We have a single table for all file revision data.
140 # We have a single table for all file revision data.
140 # Each file revision is uniquely described by a (path, rev) and
141 # Each file revision is uniquely described by a (path, rev) and
141 # (path, node).
142 # (path, node).
142 #
143 #
143 # Revision data is stored as a pointer to the delta producing this
144 # Revision data is stored as a pointer to the delta producing this
144 # revision and the file revision whose delta should be applied before
145 # revision and the file revision whose delta should be applied before
145 # that one. One can reconstruct the delta chain by recursively following
146 # that one. One can reconstruct the delta chain by recursively following
146 # the delta base revision pointers until one encounters NULL.
147 # the delta base revision pointers until one encounters NULL.
147 #
148 #
148 # flags column holds bitwise integer flags controlling storage options.
149 # flags column holds bitwise integer flags controlling storage options.
149 # These flags are defined by the FLAG_* constants.
150 # These flags are defined by the FLAG_* constants.
150 r'CREATE TABLE fileindex ('
151 r'CREATE TABLE fileindex ('
151 r' id INTEGER PRIMARY KEY, '
152 r' id INTEGER PRIMARY KEY, '
152 r' pathid INTEGER REFERENCES filepath(id), '
153 r' pathid INTEGER REFERENCES filepath(id), '
153 r' revnum INTEGER NOT NULL, '
154 r' revnum INTEGER NOT NULL, '
154 r' p1rev INTEGER NOT NULL, '
155 r' p1rev INTEGER NOT NULL, '
155 r' p2rev INTEGER NOT NULL, '
156 r' p2rev INTEGER NOT NULL, '
156 r' linkrev INTEGER NOT NULL, '
157 r' linkrev INTEGER NOT NULL, '
157 r' flags INTEGER NOT NULL, '
158 r' flags INTEGER NOT NULL, '
158 r' deltaid INTEGER REFERENCES delta(id), '
159 r' deltaid INTEGER REFERENCES delta(id), '
159 r' deltabaseid INTEGER REFERENCES fileindex(id), '
160 r' deltabaseid INTEGER REFERENCES fileindex(id), '
160 r' node BLOB NOT NULL '
161 r' node BLOB NOT NULL '
161 r')',
162 r')',
162
163
163 r'CREATE UNIQUE INDEX fileindex_pathrevnum '
164 r'CREATE UNIQUE INDEX fileindex_pathrevnum '
164 r' ON fileindex (pathid, revnum)',
165 r' ON fileindex (pathid, revnum)',
165
166
166 r'CREATE UNIQUE INDEX fileindex_pathnode '
167 r'CREATE UNIQUE INDEX fileindex_pathnode '
167 r' ON fileindex (pathid, node)',
168 r' ON fileindex (pathid, node)',
168
169
169 # Provide a view over all file data for convenience.
170 # Provide a view over all file data for convenience.
170 r'CREATE VIEW filedata AS '
171 r'CREATE VIEW filedata AS '
171 r'SELECT '
172 r'SELECT '
172 r' fileindex.id AS id, '
173 r' fileindex.id AS id, '
173 r' filepath.id AS pathid, '
174 r' filepath.id AS pathid, '
174 r' filepath.path AS path, '
175 r' filepath.path AS path, '
175 r' fileindex.revnum AS revnum, '
176 r' fileindex.revnum AS revnum, '
176 r' fileindex.node AS node, '
177 r' fileindex.node AS node, '
177 r' fileindex.p1rev AS p1rev, '
178 r' fileindex.p1rev AS p1rev, '
178 r' fileindex.p2rev AS p2rev, '
179 r' fileindex.p2rev AS p2rev, '
179 r' fileindex.linkrev AS linkrev, '
180 r' fileindex.linkrev AS linkrev, '
180 r' fileindex.flags AS flags, '
181 r' fileindex.flags AS flags, '
181 r' fileindex.deltaid AS deltaid, '
182 r' fileindex.deltaid AS deltaid, '
182 r' fileindex.deltabaseid AS deltabaseid '
183 r' fileindex.deltabaseid AS deltabaseid '
183 r'FROM filepath, fileindex '
184 r'FROM filepath, fileindex '
184 r'WHERE fileindex.pathid=filepath.id',
185 r'WHERE fileindex.pathid=filepath.id',
185
186
186 r'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION,
187 r'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION,
187 ]
188 ]
188
189
189 def resolvedeltachain(db, pathid, node, revisioncache,
190 def resolvedeltachain(db, pathid, node, revisioncache,
190 stoprids, zstddctx=None):
191 stoprids, zstddctx=None):
191 """Resolve a delta chain for a file node."""
192 """Resolve a delta chain for a file node."""
192
193
193 # TODO the "not in ({stops})" here is possibly slowing down the query
194 # TODO the "not in ({stops})" here is possibly slowing down the query
194 # because it needs to perform the lookup on every recursive invocation.
195 # because it needs to perform the lookup on every recursive invocation.
195 # This could possibly be faster if we created a temporary query with
196 # This could possibly be faster if we created a temporary query with
196 # baseid "poisoned" to null and limited the recursive filter to
197 # baseid "poisoned" to null and limited the recursive filter to
197 # "is not null".
198 # "is not null".
198 res = db.execute(
199 res = db.execute(
199 r'WITH RECURSIVE '
200 r'WITH RECURSIVE '
200 r' deltachain(deltaid, baseid) AS ('
201 r' deltachain(deltaid, baseid) AS ('
201 r' SELECT deltaid, deltabaseid FROM fileindex '
202 r' SELECT deltaid, deltabaseid FROM fileindex '
202 r' WHERE pathid=? AND node=? '
203 r' WHERE pathid=? AND node=? '
203 r' UNION ALL '
204 r' UNION ALL '
204 r' SELECT fileindex.deltaid, deltabaseid '
205 r' SELECT fileindex.deltaid, deltabaseid '
205 r' FROM fileindex, deltachain '
206 r' FROM fileindex, deltachain '
206 r' WHERE '
207 r' WHERE '
207 r' fileindex.id=deltachain.baseid '
208 r' fileindex.id=deltachain.baseid '
208 r' AND deltachain.baseid IS NOT NULL '
209 r' AND deltachain.baseid IS NOT NULL '
209 r' AND fileindex.id NOT IN ({stops}) '
210 r' AND fileindex.id NOT IN ({stops}) '
210 r' ) '
211 r' ) '
211 r'SELECT deltachain.baseid, compression, delta '
212 r'SELECT deltachain.baseid, compression, delta '
212 r'FROM deltachain, delta '
213 r'FROM deltachain, delta '
213 r'WHERE delta.id=deltachain.deltaid'.format(
214 r'WHERE delta.id=deltachain.deltaid'.format(
214 stops=r','.join([r'?'] * len(stoprids))),
215 stops=r','.join([r'?'] * len(stoprids))),
215 tuple([pathid, node] + list(stoprids.keys())))
216 tuple([pathid, node] + list(stoprids.keys())))
216
217
217 deltas = []
218 deltas = []
218 lastdeltabaseid = None
219 lastdeltabaseid = None
219
220
220 for deltabaseid, compression, delta in res:
221 for deltabaseid, compression, delta in res:
221 lastdeltabaseid = deltabaseid
222 lastdeltabaseid = deltabaseid
222
223
223 if compression == COMPRESSION_ZSTD:
224 if compression == COMPRESSION_ZSTD:
224 delta = zstddctx.decompress(delta)
225 delta = zstddctx.decompress(delta)
225 elif compression == COMPRESSION_NONE:
226 elif compression == COMPRESSION_NONE:
226 delta = delta
227 delta = delta
227 elif compression == COMPRESSION_ZLIB:
228 elif compression == COMPRESSION_ZLIB:
228 delta = zlib.decompress(delta)
229 delta = zlib.decompress(delta)
229 else:
230 else:
230 raise SQLiteStoreError('unhandled compression type: %d' %
231 raise SQLiteStoreError('unhandled compression type: %d' %
231 compression)
232 compression)
232
233
233 deltas.append(delta)
234 deltas.append(delta)
234
235
235 if lastdeltabaseid in stoprids:
236 if lastdeltabaseid in stoprids:
236 basetext = revisioncache[stoprids[lastdeltabaseid]]
237 basetext = revisioncache[stoprids[lastdeltabaseid]]
237 else:
238 else:
238 basetext = deltas.pop()
239 basetext = deltas.pop()
239
240
240 deltas.reverse()
241 deltas.reverse()
241 fulltext = mdiff.patches(basetext, deltas)
242 fulltext = mdiff.patches(basetext, deltas)
242
243
243 # SQLite returns buffer instances for blob columns on Python 2. This
244 # SQLite returns buffer instances for blob columns on Python 2. This
244 # type can propagate through the delta application layer. Because
245 # type can propagate through the delta application layer. Because
245 # downstream callers assume revisions are bytes, cast as needed.
246 # downstream callers assume revisions are bytes, cast as needed.
246 if not isinstance(fulltext, bytes):
247 if not isinstance(fulltext, bytes):
247 fulltext = bytes(delta)
248 fulltext = bytes(delta)
248
249
249 return fulltext
250 return fulltext
250
251
251 def insertdelta(db, compression, hash, delta):
252 def insertdelta(db, compression, hash, delta):
252 try:
253 try:
253 return db.execute(
254 return db.execute(
254 r'INSERT INTO delta (compression, hash, delta) '
255 r'INSERT INTO delta (compression, hash, delta) '
255 r'VALUES (?, ?, ?)',
256 r'VALUES (?, ?, ?)',
256 (compression, hash, delta)).lastrowid
257 (compression, hash, delta)).lastrowid
257 except sqlite3.IntegrityError:
258 except sqlite3.IntegrityError:
258 return db.execute(
259 return db.execute(
259 r'SELECT id FROM delta WHERE hash=?',
260 r'SELECT id FROM delta WHERE hash=?',
260 (hash,)).fetchone()[0]
261 (hash,)).fetchone()[0]
261
262
262 class SQLiteStoreError(error.StorageError):
263 class SQLiteStoreError(error.StorageError):
263 pass
264 pass
264
265
265 @attr.s
266 @attr.s
266 class revisionentry(object):
267 class revisionentry(object):
267 rid = attr.ib()
268 rid = attr.ib()
268 rev = attr.ib()
269 rev = attr.ib()
269 node = attr.ib()
270 node = attr.ib()
270 p1rev = attr.ib()
271 p1rev = attr.ib()
271 p2rev = attr.ib()
272 p2rev = attr.ib()
272 p1node = attr.ib()
273 p1node = attr.ib()
273 p2node = attr.ib()
274 p2node = attr.ib()
274 linkrev = attr.ib()
275 linkrev = attr.ib()
275 flags = attr.ib()
276 flags = attr.ib()
276
277
277 @interfaceutil.implementer(repository.irevisiondelta)
278 @interfaceutil.implementer(repository.irevisiondelta)
278 @attr.s(slots=True)
279 @attr.s(slots=True)
279 class sqliterevisiondelta(object):
280 class sqliterevisiondelta(object):
280 node = attr.ib()
281 node = attr.ib()
281 p1node = attr.ib()
282 p1node = attr.ib()
282 p2node = attr.ib()
283 p2node = attr.ib()
283 basenode = attr.ib()
284 basenode = attr.ib()
284 flags = attr.ib()
285 flags = attr.ib()
285 baserevisionsize = attr.ib()
286 baserevisionsize = attr.ib()
286 revision = attr.ib()
287 revision = attr.ib()
287 delta = attr.ib()
288 delta = attr.ib()
288 linknode = attr.ib(default=None)
289 linknode = attr.ib(default=None)
289
290
290 @interfaceutil.implementer(repository.iverifyproblem)
291 @interfaceutil.implementer(repository.iverifyproblem)
291 @attr.s(frozen=True)
292 @attr.s(frozen=True)
292 class sqliteproblem(object):
293 class sqliteproblem(object):
293 warning = attr.ib(default=None)
294 warning = attr.ib(default=None)
294 error = attr.ib(default=None)
295 error = attr.ib(default=None)
295 node = attr.ib(default=None)
296 node = attr.ib(default=None)
296
297
297 @interfaceutil.implementer(repository.ifilestorage)
298 @interfaceutil.implementer(repository.ifilestorage)
298 class sqlitefilestore(object):
299 class sqlitefilestore(object):
299 """Implements storage for an individual tracked path."""
300 """Implements storage for an individual tracked path."""
300
301
301 def __init__(self, db, path, compression):
302 def __init__(self, db, path, compression):
302 self._db = db
303 self._db = db
303 self._path = path
304 self._path = path
304
305
305 self._pathid = None
306 self._pathid = None
306
307
307 # revnum -> node
308 # revnum -> node
308 self._revtonode = {}
309 self._revtonode = {}
309 # node -> revnum
310 # node -> revnum
310 self._nodetorev = {}
311 self._nodetorev = {}
311 # node -> data structure
312 # node -> data structure
312 self._revisions = {}
313 self._revisions = {}
313
314
314 self._revisioncache = util.lrucachedict(10)
315 self._revisioncache = util.lrucachedict(10)
315
316
316 self._compengine = compression
317 self._compengine = compression
317
318
318 if compression == 'zstd':
319 if compression == 'zstd':
319 self._cctx = zstd.ZstdCompressor(level=3)
320 self._cctx = zstd.ZstdCompressor(level=3)
320 self._dctx = zstd.ZstdDecompressor()
321 self._dctx = zstd.ZstdDecompressor()
321 else:
322 else:
322 self._cctx = None
323 self._cctx = None
323 self._dctx = None
324 self._dctx = None
324
325
325 self._refreshindex()
326 self._refreshindex()
326
327
327 def _refreshindex(self):
328 def _refreshindex(self):
328 self._revtonode = {}
329 self._revtonode = {}
329 self._nodetorev = {}
330 self._nodetorev = {}
330 self._revisions = {}
331 self._revisions = {}
331
332
332 res = list(self._db.execute(
333 res = list(self._db.execute(
333 r'SELECT id FROM filepath WHERE path=?', (self._path,)))
334 r'SELECT id FROM filepath WHERE path=?', (self._path,)))
334
335
335 if not res:
336 if not res:
336 self._pathid = None
337 self._pathid = None
337 return
338 return
338
339
339 self._pathid = res[0][0]
340 self._pathid = res[0][0]
340
341
341 res = self._db.execute(
342 res = self._db.execute(
342 r'SELECT id, revnum, node, p1rev, p2rev, linkrev, flags '
343 r'SELECT id, revnum, node, p1rev, p2rev, linkrev, flags '
343 r'FROM fileindex '
344 r'FROM fileindex '
344 r'WHERE pathid=? '
345 r'WHERE pathid=? '
345 r'ORDER BY revnum ASC',
346 r'ORDER BY revnum ASC',
346 (self._pathid,))
347 (self._pathid,))
347
348
348 for i, row in enumerate(res):
349 for i, row in enumerate(res):
349 rid, rev, node, p1rev, p2rev, linkrev, flags = row
350 rid, rev, node, p1rev, p2rev, linkrev, flags = row
350
351
351 if i != rev:
352 if i != rev:
352 raise SQLiteStoreError(_('sqlite database has inconsistent '
353 raise SQLiteStoreError(_('sqlite database has inconsistent '
353 'revision numbers'))
354 'revision numbers'))
354
355
355 if p1rev == nullrev:
356 if p1rev == nullrev:
356 p1node = nullid
357 p1node = nullid
357 else:
358 else:
358 p1node = self._revtonode[p1rev]
359 p1node = self._revtonode[p1rev]
359
360
360 if p2rev == nullrev:
361 if p2rev == nullrev:
361 p2node = nullid
362 p2node = nullid
362 else:
363 else:
363 p2node = self._revtonode[p2rev]
364 p2node = self._revtonode[p2rev]
364
365
365 entry = revisionentry(
366 entry = revisionentry(
366 rid=rid,
367 rid=rid,
367 rev=rev,
368 rev=rev,
368 node=node,
369 node=node,
369 p1rev=p1rev,
370 p1rev=p1rev,
370 p2rev=p2rev,
371 p2rev=p2rev,
371 p1node=p1node,
372 p1node=p1node,
372 p2node=p2node,
373 p2node=p2node,
373 linkrev=linkrev,
374 linkrev=linkrev,
374 flags=flags)
375 flags=flags)
375
376
376 self._revtonode[rev] = node
377 self._revtonode[rev] = node
377 self._nodetorev[node] = rev
378 self._nodetorev[node] = rev
378 self._revisions[node] = entry
379 self._revisions[node] = entry
379
380
380 # Start of ifileindex interface.
381 # Start of ifileindex interface.
381
382
382 def __len__(self):
383 def __len__(self):
383 return len(self._revisions)
384 return len(self._revisions)
384
385
385 def __iter__(self):
386 def __iter__(self):
386 return iter(pycompat.xrange(len(self._revisions)))
387 return iter(pycompat.xrange(len(self._revisions)))
387
388
388 def hasnode(self, node):
389 def hasnode(self, node):
389 if node == nullid:
390 if node == nullid:
390 return False
391 return False
391
392
392 return node in self._nodetorev
393 return node in self._nodetorev
393
394
394 def revs(self, start=0, stop=None):
395 def revs(self, start=0, stop=None):
395 return storageutil.iterrevs(len(self._revisions), start=start,
396 return storageutil.iterrevs(len(self._revisions), start=start,
396 stop=stop)
397 stop=stop)
397
398
398 def parents(self, node):
399 def parents(self, node):
399 if node == nullid:
400 if node == nullid:
400 return nullid, nullid
401 return nullid, nullid
401
402
402 if node not in self._revisions:
403 if node not in self._revisions:
403 raise error.LookupError(node, self._path, _('no node'))
404 raise error.LookupError(node, self._path, _('no node'))
404
405
405 entry = self._revisions[node]
406 entry = self._revisions[node]
406 return entry.p1node, entry.p2node
407 return entry.p1node, entry.p2node
407
408
408 def parentrevs(self, rev):
409 def parentrevs(self, rev):
409 if rev == nullrev:
410 if rev == nullrev:
410 return nullrev, nullrev
411 return nullrev, nullrev
411
412
412 if rev not in self._revtonode:
413 if rev not in self._revtonode:
413 raise IndexError(rev)
414 raise IndexError(rev)
414
415
415 entry = self._revisions[self._revtonode[rev]]
416 entry = self._revisions[self._revtonode[rev]]
416 return entry.p1rev, entry.p2rev
417 return entry.p1rev, entry.p2rev
417
418
418 def rev(self, node):
419 def rev(self, node):
419 if node == nullid:
420 if node == nullid:
420 return nullrev
421 return nullrev
421
422
422 if node not in self._nodetorev:
423 if node not in self._nodetorev:
423 raise error.LookupError(node, self._path, _('no node'))
424 raise error.LookupError(node, self._path, _('no node'))
424
425
425 return self._nodetorev[node]
426 return self._nodetorev[node]
426
427
427 def node(self, rev):
428 def node(self, rev):
428 if rev == nullrev:
429 if rev == nullrev:
429 return nullid
430 return nullid
430
431
431 if rev not in self._revtonode:
432 if rev not in self._revtonode:
432 raise IndexError(rev)
433 raise IndexError(rev)
433
434
434 return self._revtonode[rev]
435 return self._revtonode[rev]
435
436
436 def lookup(self, node):
437 def lookup(self, node):
437 return storageutil.fileidlookup(self, node, self._path)
438 return storageutil.fileidlookup(self, node, self._path)
438
439
439 def linkrev(self, rev):
440 def linkrev(self, rev):
440 if rev == nullrev:
441 if rev == nullrev:
441 return nullrev
442 return nullrev
442
443
443 if rev not in self._revtonode:
444 if rev not in self._revtonode:
444 raise IndexError(rev)
445 raise IndexError(rev)
445
446
446 entry = self._revisions[self._revtonode[rev]]
447 entry = self._revisions[self._revtonode[rev]]
447 return entry.linkrev
448 return entry.linkrev
448
449
449 def iscensored(self, rev):
450 def iscensored(self, rev):
450 if rev == nullrev:
451 if rev == nullrev:
451 return False
452 return False
452
453
453 if rev not in self._revtonode:
454 if rev not in self._revtonode:
454 raise IndexError(rev)
455 raise IndexError(rev)
455
456
456 return self._revisions[self._revtonode[rev]].flags & FLAG_CENSORED
457 return self._revisions[self._revtonode[rev]].flags & FLAG_CENSORED
457
458
458 def commonancestorsheads(self, node1, node2):
459 def commonancestorsheads(self, node1, node2):
459 rev1 = self.rev(node1)
460 rev1 = self.rev(node1)
460 rev2 = self.rev(node2)
461 rev2 = self.rev(node2)
461
462
462 ancestors = ancestor.commonancestorsheads(self.parentrevs, rev1, rev2)
463 ancestors = ancestor.commonancestorsheads(self.parentrevs, rev1, rev2)
463 return pycompat.maplist(self.node, ancestors)
464 return pycompat.maplist(self.node, ancestors)
464
465
465 def descendants(self, revs):
466 def descendants(self, revs):
466 # TODO we could implement this using a recursive SQL query, which
467 # TODO we could implement this using a recursive SQL query, which
467 # might be faster.
468 # might be faster.
468 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
469 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
469
470
470 def heads(self, start=None, stop=None):
471 def heads(self, start=None, stop=None):
471 if start is None and stop is None:
472 if start is None and stop is None:
472 if not len(self):
473 if not len(self):
473 return [nullid]
474 return [nullid]
474
475
475 startrev = self.rev(start) if start is not None else nullrev
476 startrev = self.rev(start) if start is not None else nullrev
476 stoprevs = {self.rev(n) for n in stop or []}
477 stoprevs = {self.rev(n) for n in stop or []}
477
478
478 revs = dagop.headrevssubset(self.revs, self.parentrevs,
479 revs = dagop.headrevssubset(self.revs, self.parentrevs,
479 startrev=startrev, stoprevs=stoprevs)
480 startrev=startrev, stoprevs=stoprevs)
480
481
481 return [self.node(rev) for rev in revs]
482 return [self.node(rev) for rev in revs]
482
483
483 def children(self, node):
484 def children(self, node):
484 rev = self.rev(node)
485 rev = self.rev(node)
485
486
486 res = self._db.execute(
487 res = self._db.execute(
487 r'SELECT'
488 r'SELECT'
488 r' node '
489 r' node '
489 r' FROM filedata '
490 r' FROM filedata '
490 r' WHERE path=? AND (p1rev=? OR p2rev=?) '
491 r' WHERE path=? AND (p1rev=? OR p2rev=?) '
491 r' ORDER BY revnum ASC',
492 r' ORDER BY revnum ASC',
492 (self._path, rev, rev))
493 (self._path, rev, rev))
493
494
494 return [row[0] for row in res]
495 return [row[0] for row in res]
495
496
496 # End of ifileindex interface.
497 # End of ifileindex interface.
497
498
498 # Start of ifiledata interface.
499 # Start of ifiledata interface.
499
500
500 def size(self, rev):
501 def size(self, rev):
501 if rev == nullrev:
502 if rev == nullrev:
502 return 0
503 return 0
503
504
504 if rev not in self._revtonode:
505 if rev not in self._revtonode:
505 raise IndexError(rev)
506 raise IndexError(rev)
506
507
507 node = self._revtonode[rev]
508 node = self._revtonode[rev]
508
509
509 if self.renamed(node):
510 if self.renamed(node):
510 return len(self.read(node))
511 return len(self.read(node))
511
512
512 return len(self.revision(node))
513 return len(self.revision(node))
513
514
514 def revision(self, node, raw=False, _verifyhash=True):
515 def revision(self, node, raw=False, _verifyhash=True):
515 if node in (nullid, nullrev):
516 if node in (nullid, nullrev):
516 return b''
517 return b''
517
518
518 if isinstance(node, int):
519 if isinstance(node, int):
519 node = self.node(node)
520 node = self.node(node)
520
521
521 if node not in self._nodetorev:
522 if node not in self._nodetorev:
522 raise error.LookupError(node, self._path, _('no node'))
523 raise error.LookupError(node, self._path, _('no node'))
523
524
524 if node in self._revisioncache:
525 if node in self._revisioncache:
525 return self._revisioncache[node]
526 return self._revisioncache[node]
526
527
527 # Because we have a fulltext revision cache, we are able to
528 # Because we have a fulltext revision cache, we are able to
528 # short-circuit delta chain traversal and decompression as soon as
529 # short-circuit delta chain traversal and decompression as soon as
529 # we encounter a revision in the cache.
530 # we encounter a revision in the cache.
530
531
531 stoprids = {self._revisions[n].rid: n
532 stoprids = {self._revisions[n].rid: n
532 for n in self._revisioncache}
533 for n in self._revisioncache}
533
534
534 if not stoprids:
535 if not stoprids:
535 stoprids[-1] = None
536 stoprids[-1] = None
536
537
537 fulltext = resolvedeltachain(self._db, self._pathid, node,
538 fulltext = resolvedeltachain(self._db, self._pathid, node,
538 self._revisioncache, stoprids,
539 self._revisioncache, stoprids,
539 zstddctx=self._dctx)
540 zstddctx=self._dctx)
540
541
541 # Don't verify hashes if parent nodes were rewritten, as the hash
542 # Don't verify hashes if parent nodes were rewritten, as the hash
542 # wouldn't verify.
543 # wouldn't verify.
543 if self._revisions[node].flags & (FLAG_MISSING_P1 | FLAG_MISSING_P2):
544 if self._revisions[node].flags & (FLAG_MISSING_P1 | FLAG_MISSING_P2):
544 _verifyhash = False
545 _verifyhash = False
545
546
546 if _verifyhash:
547 if _verifyhash:
547 self._checkhash(fulltext, node)
548 self._checkhash(fulltext, node)
548 self._revisioncache[node] = fulltext
549 self._revisioncache[node] = fulltext
549
550
550 return fulltext
551 return fulltext
551
552
552 def rawdata(self, *args, **kwargs):
553 def rawdata(self, *args, **kwargs):
553 return self.revision(*args, **kwargs)
554 return self.revision(*args, **kwargs)
554
555
555 def read(self, node):
556 def read(self, node):
556 return storageutil.filtermetadata(self.revision(node))
557 return storageutil.filtermetadata(self.revision(node))
557
558
558 def renamed(self, node):
559 def renamed(self, node):
559 return storageutil.filerevisioncopied(self, node)
560 return storageutil.filerevisioncopied(self, node)
560
561
561 def cmp(self, node, fulltext):
562 def cmp(self, node, fulltext):
562 return not storageutil.filedataequivalent(self, node, fulltext)
563 return not storageutil.filedataequivalent(self, node, fulltext)
563
564
564 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
565 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
565 assumehaveparentrevisions=False,
566 assumehaveparentrevisions=False,
566 deltamode=repository.CG_DELTAMODE_STD):
567 deltamode=repository.CG_DELTAMODE_STD):
567 if nodesorder not in ('nodes', 'storage', 'linear', None):
568 if nodesorder not in ('nodes', 'storage', 'linear', None):
568 raise error.ProgrammingError('unhandled value for nodesorder: %s' %
569 raise error.ProgrammingError('unhandled value for nodesorder: %s' %
569 nodesorder)
570 nodesorder)
570
571
571 nodes = [n for n in nodes if n != nullid]
572 nodes = [n for n in nodes if n != nullid]
572
573
573 if not nodes:
574 if not nodes:
574 return
575 return
575
576
576 # TODO perform in a single query.
577 # TODO perform in a single query.
577 res = self._db.execute(
578 res = self._db.execute(
578 r'SELECT revnum, deltaid FROM fileindex '
579 r'SELECT revnum, deltaid FROM fileindex '
579 r'WHERE pathid=? '
580 r'WHERE pathid=? '
580 r' AND node in (%s)' % (r','.join([r'?'] * len(nodes))),
581 r' AND node in (%s)' % (r','.join([r'?'] * len(nodes))),
581 tuple([self._pathid] + nodes))
582 tuple([self._pathid] + nodes))
582
583
583 deltabases = {}
584 deltabases = {}
584
585
585 for rev, deltaid in res:
586 for rev, deltaid in res:
586 res = self._db.execute(
587 res = self._db.execute(
587 r'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?',
588 r'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?',
588 (self._pathid, deltaid))
589 (self._pathid, deltaid))
589 deltabases[rev] = res.fetchone()[0]
590 deltabases[rev] = res.fetchone()[0]
590
591
591 # TODO define revdifffn so we can use delta from storage.
592 # TODO define revdifffn so we can use delta from storage.
592 for delta in storageutil.emitrevisions(
593 for delta in storageutil.emitrevisions(
593 self, nodes, nodesorder, sqliterevisiondelta,
594 self, nodes, nodesorder, sqliterevisiondelta,
594 deltaparentfn=deltabases.__getitem__,
595 deltaparentfn=deltabases.__getitem__,
595 revisiondata=revisiondata,
596 revisiondata=revisiondata,
596 assumehaveparentrevisions=assumehaveparentrevisions,
597 assumehaveparentrevisions=assumehaveparentrevisions,
597 deltamode=deltamode):
598 deltamode=deltamode):
598
599
599 yield delta
600 yield delta
600
601
601 # End of ifiledata interface.
602 # End of ifiledata interface.
602
603
603 # Start of ifilemutation interface.
604 # Start of ifilemutation interface.
604
605
605 def add(self, filedata, meta, transaction, linkrev, p1, p2):
606 def add(self, filedata, meta, transaction, linkrev, p1, p2):
606 if meta or filedata.startswith(b'\x01\n'):
607 if meta or filedata.startswith(b'\x01\n'):
607 filedata = storageutil.packmeta(meta, filedata)
608 filedata = storageutil.packmeta(meta, filedata)
608
609
609 return self.addrevision(filedata, transaction, linkrev, p1, p2)
610 return self.addrevision(filedata, transaction, linkrev, p1, p2)
610
611
611 def addrevision(self, revisiondata, transaction, linkrev, p1, p2, node=None,
612 def addrevision(self, revisiondata, transaction, linkrev, p1, p2, node=None,
612 flags=0, cachedelta=None):
613 flags=0, cachedelta=None):
613 if flags:
614 if flags:
614 raise SQLiteStoreError(_('flags not supported on revisions'))
615 raise SQLiteStoreError(_('flags not supported on revisions'))
615
616
616 validatehash = node is not None
617 validatehash = node is not None
617 node = node or storageutil.hashrevisionsha1(revisiondata, p1, p2)
618 node = node or storageutil.hashrevisionsha1(revisiondata, p1, p2)
618
619
619 if validatehash:
620 if validatehash:
620 self._checkhash(revisiondata, node, p1, p2)
621 self._checkhash(revisiondata, node, p1, p2)
621
622
622 if node in self._nodetorev:
623 if node in self._nodetorev:
623 return node
624 return node
624
625
625 node = self._addrawrevision(node, revisiondata, transaction, linkrev,
626 node = self._addrawrevision(node, revisiondata, transaction, linkrev,
626 p1, p2)
627 p1, p2)
627
628
628 self._revisioncache[node] = revisiondata
629 self._revisioncache[node] = revisiondata
629 return node
630 return node
630
631
631 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None,
632 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None,
632 maybemissingparents=False):
633 maybemissingparents=False):
633 nodes = []
634 nodes = []
634
635
635 for node, p1, p2, linknode, deltabase, delta, wireflags in deltas:
636 for node, p1, p2, linknode, deltabase, delta, wireflags in deltas:
636 storeflags = 0
637 storeflags = 0
637
638
638 if wireflags & repository.REVISION_FLAG_CENSORED:
639 if wireflags & repository.REVISION_FLAG_CENSORED:
639 storeflags |= FLAG_CENSORED
640 storeflags |= FLAG_CENSORED
640
641
641 if wireflags & ~repository.REVISION_FLAG_CENSORED:
642 if wireflags & ~repository.REVISION_FLAG_CENSORED:
642 raise SQLiteStoreError('unhandled revision flag')
643 raise SQLiteStoreError('unhandled revision flag')
643
644
644 if maybemissingparents:
645 if maybemissingparents:
645 if p1 != nullid and not self.hasnode(p1):
646 if p1 != nullid and not self.hasnode(p1):
646 p1 = nullid
647 p1 = nullid
647 storeflags |= FLAG_MISSING_P1
648 storeflags |= FLAG_MISSING_P1
648
649
649 if p2 != nullid and not self.hasnode(p2):
650 if p2 != nullid and not self.hasnode(p2):
650 p2 = nullid
651 p2 = nullid
651 storeflags |= FLAG_MISSING_P2
652 storeflags |= FLAG_MISSING_P2
652
653
653 baserev = self.rev(deltabase)
654 baserev = self.rev(deltabase)
654
655
655 # If base is censored, delta must be full replacement in a single
656 # If base is censored, delta must be full replacement in a single
656 # patch operation.
657 # patch operation.
657 if baserev != nullrev and self.iscensored(baserev):
658 if baserev != nullrev and self.iscensored(baserev):
658 hlen = struct.calcsize('>lll')
659 hlen = struct.calcsize('>lll')
659 oldlen = len(self.revision(deltabase, raw=True,
660 oldlen = len(self.revision(deltabase, raw=True,
660 _verifyhash=False))
661 _verifyhash=False))
661 newlen = len(delta) - hlen
662 newlen = len(delta) - hlen
662
663
663 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
664 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
664 raise error.CensoredBaseError(self._path,
665 raise error.CensoredBaseError(self._path,
665 deltabase)
666 deltabase)
666
667
667 if (not (storeflags & FLAG_CENSORED)
668 if (not (storeflags & FLAG_CENSORED)
668 and storageutil.deltaiscensored(
669 and storageutil.deltaiscensored(
669 delta, baserev, lambda x: len(self.revision(x, raw=True)))):
670 delta, baserev, lambda x: len(self.revision(x, raw=True)))):
670 storeflags |= FLAG_CENSORED
671 storeflags |= FLAG_CENSORED
671
672
672 linkrev = linkmapper(linknode)
673 linkrev = linkmapper(linknode)
673
674
674 nodes.append(node)
675 nodes.append(node)
675
676
676 if node in self._revisions:
677 if node in self._revisions:
677 # Possibly reset parents to make them proper.
678 # Possibly reset parents to make them proper.
678 entry = self._revisions[node]
679 entry = self._revisions[node]
679
680
680 if entry.flags & FLAG_MISSING_P1 and p1 != nullid:
681 if entry.flags & FLAG_MISSING_P1 and p1 != nullid:
681 entry.p1node = p1
682 entry.p1node = p1
682 entry.p1rev = self._nodetorev[p1]
683 entry.p1rev = self._nodetorev[p1]
683 entry.flags &= ~FLAG_MISSING_P1
684 entry.flags &= ~FLAG_MISSING_P1
684
685
685 self._db.execute(
686 self._db.execute(
686 r'UPDATE fileindex SET p1rev=?, flags=? '
687 r'UPDATE fileindex SET p1rev=?, flags=? '
687 r'WHERE id=?',
688 r'WHERE id=?',
688 (self._nodetorev[p1], entry.flags, entry.rid))
689 (self._nodetorev[p1], entry.flags, entry.rid))
689
690
690 if entry.flags & FLAG_MISSING_P2 and p2 != nullid:
691 if entry.flags & FLAG_MISSING_P2 and p2 != nullid:
691 entry.p2node = p2
692 entry.p2node = p2
692 entry.p2rev = self._nodetorev[p2]
693 entry.p2rev = self._nodetorev[p2]
693 entry.flags &= ~FLAG_MISSING_P2
694 entry.flags &= ~FLAG_MISSING_P2
694
695
695 self._db.execute(
696 self._db.execute(
696 r'UPDATE fileindex SET p2rev=?, flags=? '
697 r'UPDATE fileindex SET p2rev=?, flags=? '
697 r'WHERE id=?',
698 r'WHERE id=?',
698 (self._nodetorev[p1], entry.flags, entry.rid))
699 (self._nodetorev[p1], entry.flags, entry.rid))
699
700
700 continue
701 continue
701
702
702 if deltabase == nullid:
703 if deltabase == nullid:
703 text = mdiff.patch(b'', delta)
704 text = mdiff.patch(b'', delta)
704 storedelta = None
705 storedelta = None
705 else:
706 else:
706 text = None
707 text = None
707 storedelta = (deltabase, delta)
708 storedelta = (deltabase, delta)
708
709
709 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
710 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
710 storedelta=storedelta, flags=storeflags)
711 storedelta=storedelta, flags=storeflags)
711
712
712 if addrevisioncb:
713 if addrevisioncb:
713 addrevisioncb(self, node)
714 addrevisioncb(self, node)
714
715
715 return nodes
716 return nodes
716
717
717 def censorrevision(self, tr, censornode, tombstone=b''):
718 def censorrevision(self, tr, censornode, tombstone=b''):
718 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
719 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
719
720
720 # This restriction is cargo culted from revlogs and makes no sense for
721 # This restriction is cargo culted from revlogs and makes no sense for
721 # SQLite, since columns can be resized at will.
722 # SQLite, since columns can be resized at will.
722 if len(tombstone) > len(self.revision(censornode, raw=True)):
723 if len(tombstone) > len(self.revision(censornode, raw=True)):
723 raise error.Abort(_('censor tombstone must be no longer than '
724 raise error.Abort(_('censor tombstone must be no longer than '
724 'censored data'))
725 'censored data'))
725
726
726 # We need to replace the censored revision's data with the tombstone.
727 # We need to replace the censored revision's data with the tombstone.
727 # But replacing that data will have implications for delta chains that
728 # But replacing that data will have implications for delta chains that
728 # reference it.
729 # reference it.
729 #
730 #
730 # While "better," more complex strategies are possible, we do something
731 # While "better," more complex strategies are possible, we do something
731 # simple: we find delta chain children of the censored revision and we
732 # simple: we find delta chain children of the censored revision and we
732 # replace those incremental deltas with fulltexts of their corresponding
733 # replace those incremental deltas with fulltexts of their corresponding
733 # revision. Then we delete the now-unreferenced delta and original
734 # revision. Then we delete the now-unreferenced delta and original
734 # revision and insert a replacement.
735 # revision and insert a replacement.
735
736
736 # Find the delta to be censored.
737 # Find the delta to be censored.
737 censoreddeltaid = self._db.execute(
738 censoreddeltaid = self._db.execute(
738 r'SELECT deltaid FROM fileindex WHERE id=?',
739 r'SELECT deltaid FROM fileindex WHERE id=?',
739 (self._revisions[censornode].rid,)).fetchone()[0]
740 (self._revisions[censornode].rid,)).fetchone()[0]
740
741
741 # Find all its delta chain children.
742 # Find all its delta chain children.
742 # TODO once we support storing deltas for !files, we'll need to look
743 # TODO once we support storing deltas for !files, we'll need to look
743 # for those delta chains too.
744 # for those delta chains too.
744 rows = list(self._db.execute(
745 rows = list(self._db.execute(
745 r'SELECT id, pathid, node FROM fileindex '
746 r'SELECT id, pathid, node FROM fileindex '
746 r'WHERE deltabaseid=? OR deltaid=?',
747 r'WHERE deltabaseid=? OR deltaid=?',
747 (censoreddeltaid, censoreddeltaid)))
748 (censoreddeltaid, censoreddeltaid)))
748
749
749 for row in rows:
750 for row in rows:
750 rid, pathid, node = row
751 rid, pathid, node = row
751
752
752 fulltext = resolvedeltachain(self._db, pathid, node, {}, {-1: None},
753 fulltext = resolvedeltachain(self._db, pathid, node, {}, {-1: None},
753 zstddctx=self._dctx)
754 zstddctx=self._dctx)
754
755
755 deltahash = hashlib.sha1(fulltext).digest()
756 deltahash = hashlib.sha1(fulltext).digest()
756
757
757 if self._compengine == 'zstd':
758 if self._compengine == 'zstd':
758 deltablob = self._cctx.compress(fulltext)
759 deltablob = self._cctx.compress(fulltext)
759 compression = COMPRESSION_ZSTD
760 compression = COMPRESSION_ZSTD
760 elif self._compengine == 'zlib':
761 elif self._compengine == 'zlib':
761 deltablob = zlib.compress(fulltext)
762 deltablob = zlib.compress(fulltext)
762 compression = COMPRESSION_ZLIB
763 compression = COMPRESSION_ZLIB
763 elif self._compengine == 'none':
764 elif self._compengine == 'none':
764 deltablob = fulltext
765 deltablob = fulltext
765 compression = COMPRESSION_NONE
766 compression = COMPRESSION_NONE
766 else:
767 else:
767 raise error.ProgrammingError('unhandled compression engine: %s'
768 raise error.ProgrammingError('unhandled compression engine: %s'
768 % self._compengine)
769 % self._compengine)
769
770
770 if len(deltablob) >= len(fulltext):
771 if len(deltablob) >= len(fulltext):
771 deltablob = fulltext
772 deltablob = fulltext
772 compression = COMPRESSION_NONE
773 compression = COMPRESSION_NONE
773
774
774 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
775 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
775
776
776 self._db.execute(
777 self._db.execute(
777 r'UPDATE fileindex SET deltaid=?, deltabaseid=NULL '
778 r'UPDATE fileindex SET deltaid=?, deltabaseid=NULL '
778 r'WHERE id=?', (deltaid, rid))
779 r'WHERE id=?', (deltaid, rid))
779
780
780 # Now create the tombstone delta and replace the delta on the censored
781 # Now create the tombstone delta and replace the delta on the censored
781 # node.
782 # node.
782 deltahash = hashlib.sha1(tombstone).digest()
783 deltahash = hashlib.sha1(tombstone).digest()
783 tombstonedeltaid = insertdelta(self._db, COMPRESSION_NONE,
784 tombstonedeltaid = insertdelta(self._db, COMPRESSION_NONE,
784 deltahash, tombstone)
785 deltahash, tombstone)
785
786
786 flags = self._revisions[censornode].flags
787 flags = self._revisions[censornode].flags
787 flags |= FLAG_CENSORED
788 flags |= FLAG_CENSORED
788
789
789 self._db.execute(
790 self._db.execute(
790 r'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL '
791 r'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL '
791 r'WHERE pathid=? AND node=?',
792 r'WHERE pathid=? AND node=?',
792 (flags, tombstonedeltaid, self._pathid, censornode))
793 (flags, tombstonedeltaid, self._pathid, censornode))
793
794
794 self._db.execute(
795 self._db.execute(
795 r'DELETE FROM delta WHERE id=?', (censoreddeltaid,))
796 r'DELETE FROM delta WHERE id=?', (censoreddeltaid,))
796
797
797 self._refreshindex()
798 self._refreshindex()
798 self._revisioncache.clear()
799 self._revisioncache.clear()
799
800
800 def getstrippoint(self, minlink):
801 def getstrippoint(self, minlink):
801 return storageutil.resolvestripinfo(minlink, len(self) - 1,
802 return storageutil.resolvestripinfo(minlink, len(self) - 1,
802 [self.rev(n) for n in self.heads()],
803 [self.rev(n) for n in self.heads()],
803 self.linkrev,
804 self.linkrev,
804 self.parentrevs)
805 self.parentrevs)
805
806
806 def strip(self, minlink, transaction):
807 def strip(self, minlink, transaction):
807 if not len(self):
808 if not len(self):
808 return
809 return
809
810
810 rev, _ignored = self.getstrippoint(minlink)
811 rev, _ignored = self.getstrippoint(minlink)
811
812
812 if rev == len(self):
813 if rev == len(self):
813 return
814 return
814
815
815 for rev in self.revs(rev):
816 for rev in self.revs(rev):
816 self._db.execute(
817 self._db.execute(
817 r'DELETE FROM fileindex WHERE pathid=? AND node=?',
818 r'DELETE FROM fileindex WHERE pathid=? AND node=?',
818 (self._pathid, self.node(rev)))
819 (self._pathid, self.node(rev)))
819
820
820 # TODO how should we garbage collect data in delta table?
821 # TODO how should we garbage collect data in delta table?
821
822
822 self._refreshindex()
823 self._refreshindex()
823
824
824 # End of ifilemutation interface.
825 # End of ifilemutation interface.
825
826
826 # Start of ifilestorage interface.
827 # Start of ifilestorage interface.
827
828
828 def files(self):
829 def files(self):
829 return []
830 return []
830
831
831 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
832 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
832 revisionscount=False, trackedsize=False,
833 revisionscount=False, trackedsize=False,
833 storedsize=False):
834 storedsize=False):
834 d = {}
835 d = {}
835
836
836 if exclusivefiles:
837 if exclusivefiles:
837 d['exclusivefiles'] = []
838 d['exclusivefiles'] = []
838
839
839 if sharedfiles:
840 if sharedfiles:
840 # TODO list sqlite file(s) here.
841 # TODO list sqlite file(s) here.
841 d['sharedfiles'] = []
842 d['sharedfiles'] = []
842
843
843 if revisionscount:
844 if revisionscount:
844 d['revisionscount'] = len(self)
845 d['revisionscount'] = len(self)
845
846
846 if trackedsize:
847 if trackedsize:
847 d['trackedsize'] = sum(len(self.revision(node))
848 d['trackedsize'] = sum(len(self.revision(node))
848 for node in self._nodetorev)
849 for node in self._nodetorev)
849
850
850 if storedsize:
851 if storedsize:
851 # TODO implement this?
852 # TODO implement this?
852 d['storedsize'] = None
853 d['storedsize'] = None
853
854
854 return d
855 return d
855
856
856 def verifyintegrity(self, state):
857 def verifyintegrity(self, state):
857 state['skipread'] = set()
858 state['skipread'] = set()
858
859
859 for rev in self:
860 for rev in self:
860 node = self.node(rev)
861 node = self.node(rev)
861
862
862 try:
863 try:
863 self.revision(node)
864 self.revision(node)
864 except Exception as e:
865 except Exception as e:
865 yield sqliteproblem(
866 yield sqliteproblem(
866 error=_('unpacking %s: %s') % (short(node), e),
867 error=_('unpacking %s: %s') % (short(node), e),
867 node=node)
868 node=node)
868
869
869 state['skipread'].add(node)
870 state['skipread'].add(node)
870
871
871 # End of ifilestorage interface.
872 # End of ifilestorage interface.
872
873
873 def _checkhash(self, fulltext, node, p1=None, p2=None):
874 def _checkhash(self, fulltext, node, p1=None, p2=None):
874 if p1 is None and p2 is None:
875 if p1 is None and p2 is None:
875 p1, p2 = self.parents(node)
876 p1, p2 = self.parents(node)
876
877
877 if node == storageutil.hashrevisionsha1(fulltext, p1, p2):
878 if node == storageutil.hashrevisionsha1(fulltext, p1, p2):
878 return
879 return
879
880
880 try:
881 try:
881 del self._revisioncache[node]
882 del self._revisioncache[node]
882 except KeyError:
883 except KeyError:
883 pass
884 pass
884
885
885 if storageutil.iscensoredtext(fulltext):
886 if storageutil.iscensoredtext(fulltext):
886 raise error.CensoredNodeError(self._path, node, fulltext)
887 raise error.CensoredNodeError(self._path, node, fulltext)
887
888
888 raise SQLiteStoreError(_('integrity check failed on %s') %
889 raise SQLiteStoreError(_('integrity check failed on %s') %
889 self._path)
890 self._path)
890
891
891 def _addrawrevision(self, node, revisiondata, transaction, linkrev,
892 def _addrawrevision(self, node, revisiondata, transaction, linkrev,
892 p1, p2, storedelta=None, flags=0):
893 p1, p2, storedelta=None, flags=0):
893 if self._pathid is None:
894 if self._pathid is None:
894 res = self._db.execute(
895 res = self._db.execute(
895 r'INSERT INTO filepath (path) VALUES (?)', (self._path,))
896 r'INSERT INTO filepath (path) VALUES (?)', (self._path,))
896 self._pathid = res.lastrowid
897 self._pathid = res.lastrowid
897
898
898 # For simplicity, always store a delta against p1.
899 # For simplicity, always store a delta against p1.
899 # TODO we need a lot more logic here to make behavior reasonable.
900 # TODO we need a lot more logic here to make behavior reasonable.
900
901
901 if storedelta:
902 if storedelta:
902 deltabase, delta = storedelta
903 deltabase, delta = storedelta
903
904
904 if isinstance(deltabase, int):
905 if isinstance(deltabase, int):
905 deltabase = self.node(deltabase)
906 deltabase = self.node(deltabase)
906
907
907 else:
908 else:
908 assert revisiondata is not None
909 assert revisiondata is not None
909 deltabase = p1
910 deltabase = p1
910
911
911 if deltabase == nullid:
912 if deltabase == nullid:
912 delta = revisiondata
913 delta = revisiondata
913 else:
914 else:
914 delta = mdiff.textdiff(self.revision(self.rev(deltabase)),
915 delta = mdiff.textdiff(self.revision(self.rev(deltabase)),
915 revisiondata)
916 revisiondata)
916
917
917 # File index stores a pointer to its delta and the parent delta.
918 # File index stores a pointer to its delta and the parent delta.
918 # The parent delta is stored via a pointer to the fileindex PK.
919 # The parent delta is stored via a pointer to the fileindex PK.
919 if deltabase == nullid:
920 if deltabase == nullid:
920 baseid = None
921 baseid = None
921 else:
922 else:
922 baseid = self._revisions[deltabase].rid
923 baseid = self._revisions[deltabase].rid
923
924
924 # Deltas are stored with a hash of their content. This allows
925 # Deltas are stored with a hash of their content. This allows
925 # us to de-duplicate. The table is configured to ignore conflicts
926 # us to de-duplicate. The table is configured to ignore conflicts
926 # and it is faster to just insert and silently noop than to look
927 # and it is faster to just insert and silently noop than to look
927 # first.
928 # first.
928 deltahash = hashlib.sha1(delta).digest()
929 deltahash = hashlib.sha1(delta).digest()
929
930
930 if self._compengine == 'zstd':
931 if self._compengine == 'zstd':
931 deltablob = self._cctx.compress(delta)
932 deltablob = self._cctx.compress(delta)
932 compression = COMPRESSION_ZSTD
933 compression = COMPRESSION_ZSTD
933 elif self._compengine == 'zlib':
934 elif self._compengine == 'zlib':
934 deltablob = zlib.compress(delta)
935 deltablob = zlib.compress(delta)
935 compression = COMPRESSION_ZLIB
936 compression = COMPRESSION_ZLIB
936 elif self._compengine == 'none':
937 elif self._compengine == 'none':
937 deltablob = delta
938 deltablob = delta
938 compression = COMPRESSION_NONE
939 compression = COMPRESSION_NONE
939 else:
940 else:
940 raise error.ProgrammingError('unhandled compression engine: %s' %
941 raise error.ProgrammingError('unhandled compression engine: %s' %
941 self._compengine)
942 self._compengine)
942
943
943 # Don't store compressed data if it isn't practical.
944 # Don't store compressed data if it isn't practical.
944 if len(deltablob) >= len(delta):
945 if len(deltablob) >= len(delta):
945 deltablob = delta
946 deltablob = delta
946 compression = COMPRESSION_NONE
947 compression = COMPRESSION_NONE
947
948
948 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
949 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
949
950
950 rev = len(self)
951 rev = len(self)
951
952
952 if p1 == nullid:
953 if p1 == nullid:
953 p1rev = nullrev
954 p1rev = nullrev
954 else:
955 else:
955 p1rev = self._nodetorev[p1]
956 p1rev = self._nodetorev[p1]
956
957
957 if p2 == nullid:
958 if p2 == nullid:
958 p2rev = nullrev
959 p2rev = nullrev
959 else:
960 else:
960 p2rev = self._nodetorev[p2]
961 p2rev = self._nodetorev[p2]
961
962
962 rid = self._db.execute(
963 rid = self._db.execute(
963 r'INSERT INTO fileindex ('
964 r'INSERT INTO fileindex ('
964 r' pathid, revnum, node, p1rev, p2rev, linkrev, flags, '
965 r' pathid, revnum, node, p1rev, p2rev, linkrev, flags, '
965 r' deltaid, deltabaseid) '
966 r' deltaid, deltabaseid) '
966 r' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
967 r' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
967 (self._pathid, rev, node, p1rev, p2rev, linkrev, flags,
968 (self._pathid, rev, node, p1rev, p2rev, linkrev, flags,
968 deltaid, baseid)
969 deltaid, baseid)
969 ).lastrowid
970 ).lastrowid
970
971
971 entry = revisionentry(
972 entry = revisionentry(
972 rid=rid,
973 rid=rid,
973 rev=rev,
974 rev=rev,
974 node=node,
975 node=node,
975 p1rev=p1rev,
976 p1rev=p1rev,
976 p2rev=p2rev,
977 p2rev=p2rev,
977 p1node=p1,
978 p1node=p1,
978 p2node=p2,
979 p2node=p2,
979 linkrev=linkrev,
980 linkrev=linkrev,
980 flags=flags)
981 flags=flags)
981
982
982 self._nodetorev[node] = rev
983 self._nodetorev[node] = rev
983 self._revtonode[rev] = node
984 self._revtonode[rev] = node
984 self._revisions[node] = entry
985 self._revisions[node] = entry
985
986
986 return node
987 return node
987
988
988 class sqliterepository(localrepo.localrepository):
989 class sqliterepository(localrepo.localrepository):
989 def cancopy(self):
990 def cancopy(self):
990 return False
991 return False
991
992
992 def transaction(self, *args, **kwargs):
993 def transaction(self, *args, **kwargs):
993 current = self.currenttransaction()
994 current = self.currenttransaction()
994
995
995 tr = super(sqliterepository, self).transaction(*args, **kwargs)
996 tr = super(sqliterepository, self).transaction(*args, **kwargs)
996
997
997 if current:
998 if current:
998 return tr
999 return tr
999
1000
1000 self._dbconn.execute(r'BEGIN TRANSACTION')
1001 self._dbconn.execute(r'BEGIN TRANSACTION')
1001
1002
1002 def committransaction(_):
1003 def committransaction(_):
1003 self._dbconn.commit()
1004 self._dbconn.commit()
1004
1005
1005 tr.addfinalize('sqlitestore', committransaction)
1006 tr.addfinalize('sqlitestore', committransaction)
1006
1007
1007 return tr
1008 return tr
1008
1009
1009 @property
1010 @property
1010 def _dbconn(self):
1011 def _dbconn(self):
1011 # SQLite connections can only be used on the thread that created
1012 # SQLite connections can only be used on the thread that created
1012 # them. In most cases, this "just works." However, hgweb uses
1013 # them. In most cases, this "just works." However, hgweb uses
1013 # multiple threads.
1014 # multiple threads.
1014 tid = threading.current_thread().ident
1015 tid = threading.current_thread().ident
1015
1016
1016 if self._db:
1017 if self._db:
1017 if self._db[0] == tid:
1018 if self._db[0] == tid:
1018 return self._db[1]
1019 return self._db[1]
1019
1020
1020 db = makedb(self.svfs.join('db.sqlite'))
1021 db = makedb(self.svfs.join('db.sqlite'))
1021 self._db = (tid, db)
1022 self._db = (tid, db)
1022
1023
1023 return db
1024 return db
1024
1025
1025 def makedb(path):
1026 def makedb(path):
1026 """Construct a database handle for a database at path."""
1027 """Construct a database handle for a database at path."""
1027
1028
1028 db = sqlite3.connect(encoding.strfromlocal(path))
1029 db = sqlite3.connect(encoding.strfromlocal(path))
1029 db.text_factory = bytes
1030 db.text_factory = bytes
1030
1031
1031 res = db.execute(r'PRAGMA user_version').fetchone()[0]
1032 res = db.execute(r'PRAGMA user_version').fetchone()[0]
1032
1033
1033 # New database.
1034 # New database.
1034 if res == 0:
1035 if res == 0:
1035 for statement in CREATE_SCHEMA:
1036 for statement in CREATE_SCHEMA:
1036 db.execute(statement)
1037 db.execute(statement)
1037
1038
1038 db.commit()
1039 db.commit()
1039
1040
1040 elif res == CURRENT_SCHEMA_VERSION:
1041 elif res == CURRENT_SCHEMA_VERSION:
1041 pass
1042 pass
1042
1043
1043 else:
1044 else:
1044 raise error.Abort(_('sqlite database has unrecognized version'))
1045 raise error.Abort(_('sqlite database has unrecognized version'))
1045
1046
1046 db.execute(r'PRAGMA journal_mode=WAL')
1047 db.execute(r'PRAGMA journal_mode=WAL')
1047
1048
1048 return db
1049 return db
1049
1050
1050 def featuresetup(ui, supported):
1051 def featuresetup(ui, supported):
1051 supported.add(REQUIREMENT)
1052 supported.add(REQUIREMENT)
1052
1053
1053 if zstd:
1054 if zstd:
1054 supported.add(REQUIREMENT_ZSTD)
1055 supported.add(REQUIREMENT_ZSTD)
1055
1056
1056 supported.add(REQUIREMENT_ZLIB)
1057 supported.add(REQUIREMENT_ZLIB)
1057 supported.add(REQUIREMENT_NONE)
1058 supported.add(REQUIREMENT_NONE)
1058 supported.add(REQUIREMENT_SHALLOW_FILES)
1059 supported.add(REQUIREMENT_SHALLOW_FILES)
1059 supported.add(repository.NARROW_REQUIREMENT)
1060 supported.add(repository.NARROW_REQUIREMENT)
1060
1061
1061 def newreporequirements(orig, ui, createopts):
1062 def newreporequirements(orig, ui, createopts):
1062 if createopts['backend'] != 'sqlite':
1063 if createopts['backend'] != 'sqlite':
1063 return orig(ui, createopts)
1064 return orig(ui, createopts)
1064
1065
1065 # This restriction can be lifted once we have more confidence.
1066 # This restriction can be lifted once we have more confidence.
1066 if 'sharedrepo' in createopts:
1067 if 'sharedrepo' in createopts:
1067 raise error.Abort(_('shared repositories not supported with SQLite '
1068 raise error.Abort(_('shared repositories not supported with SQLite '
1068 'store'))
1069 'store'))
1069
1070
1070 # This filtering is out of an abundance of caution: we want to ensure
1071 # This filtering is out of an abundance of caution: we want to ensure
1071 # we honor creation options and we do that by annotating exactly the
1072 # we honor creation options and we do that by annotating exactly the
1072 # creation options we recognize.
1073 # creation options we recognize.
1073 known = {
1074 known = {
1074 'narrowfiles',
1075 'narrowfiles',
1075 'backend',
1076 'backend',
1076 'shallowfilestore',
1077 'shallowfilestore',
1077 }
1078 }
1078
1079
1079 unsupported = set(createopts) - known
1080 unsupported = set(createopts) - known
1080 if unsupported:
1081 if unsupported:
1081 raise error.Abort(_('SQLite store does not support repo creation '
1082 raise error.Abort(_('SQLite store does not support repo creation '
1082 'option: %s') % ', '.join(sorted(unsupported)))
1083 'option: %s') % ', '.join(sorted(unsupported)))
1083
1084
1084 # Since we're a hybrid store that still relies on revlogs, we fall back
1085 # Since we're a hybrid store that still relies on revlogs, we fall back
1085 # to using the revlogv1 backend's storage requirements then adding our
1086 # to using the revlogv1 backend's storage requirements then adding our
1086 # own requirement.
1087 # own requirement.
1087 createopts['backend'] = 'revlogv1'
1088 createopts['backend'] = 'revlogv1'
1088 requirements = orig(ui, createopts)
1089 requirements = orig(ui, createopts)
1089 requirements.add(REQUIREMENT)
1090 requirements.add(REQUIREMENT)
1090
1091
1091 compression = ui.config('storage', 'sqlite.compression')
1092 compression = ui.config('storage', 'sqlite.compression')
1092
1093
1093 if compression == 'zstd' and not zstd:
1094 if compression == 'zstd' and not zstd:
1094 raise error.Abort(_('storage.sqlite.compression set to "zstd" but '
1095 raise error.Abort(_('storage.sqlite.compression set to "zstd" but '
1095 'zstandard compression not available to this '
1096 'zstandard compression not available to this '
1096 'Mercurial install'))
1097 'Mercurial install'))
1097
1098
1098 if compression == 'zstd':
1099 if compression == 'zstd':
1099 requirements.add(REQUIREMENT_ZSTD)
1100 requirements.add(REQUIREMENT_ZSTD)
1100 elif compression == 'zlib':
1101 elif compression == 'zlib':
1101 requirements.add(REQUIREMENT_ZLIB)
1102 requirements.add(REQUIREMENT_ZLIB)
1102 elif compression == 'none':
1103 elif compression == 'none':
1103 requirements.add(REQUIREMENT_NONE)
1104 requirements.add(REQUIREMENT_NONE)
1104 else:
1105 else:
1105 raise error.Abort(_('unknown compression engine defined in '
1106 raise error.Abort(_('unknown compression engine defined in '
1106 'storage.sqlite.compression: %s') % compression)
1107 'storage.sqlite.compression: %s') % compression)
1107
1108
1108 if createopts.get('shallowfilestore'):
1109 if createopts.get('shallowfilestore'):
1109 requirements.add(REQUIREMENT_SHALLOW_FILES)
1110 requirements.add(REQUIREMENT_SHALLOW_FILES)
1110
1111
1111 return requirements
1112 return requirements
1112
1113
1113 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1114 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1114 class sqlitefilestorage(object):
1115 class sqlitefilestorage(object):
1115 """Repository file storage backed by SQLite."""
1116 """Repository file storage backed by SQLite."""
1116 def file(self, path):
1117 def file(self, path):
1117 if path[0] == b'/':
1118 if path[0] == b'/':
1118 path = path[1:]
1119 path = path[1:]
1119
1120
1120 if REQUIREMENT_ZSTD in self.requirements:
1121 if REQUIREMENT_ZSTD in self.requirements:
1121 compression = 'zstd'
1122 compression = 'zstd'
1122 elif REQUIREMENT_ZLIB in self.requirements:
1123 elif REQUIREMENT_ZLIB in self.requirements:
1123 compression = 'zlib'
1124 compression = 'zlib'
1124 elif REQUIREMENT_NONE in self.requirements:
1125 elif REQUIREMENT_NONE in self.requirements:
1125 compression = 'none'
1126 compression = 'none'
1126 else:
1127 else:
1127 raise error.Abort(_('unable to determine what compression engine '
1128 raise error.Abort(_('unable to determine what compression engine '
1128 'to use for SQLite storage'))
1129 'to use for SQLite storage'))
1129
1130
1130 return sqlitefilestore(self._dbconn, path, compression)
1131 return sqlitefilestore(self._dbconn, path, compression)
1131
1132
1132 def makefilestorage(orig, requirements, features, **kwargs):
1133 def makefilestorage(orig, requirements, features, **kwargs):
1133 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1134 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1134 if REQUIREMENT in requirements:
1135 if REQUIREMENT in requirements:
1135 if REQUIREMENT_SHALLOW_FILES in requirements:
1136 if REQUIREMENT_SHALLOW_FILES in requirements:
1136 features.add(repository.REPO_FEATURE_SHALLOW_FILE_STORAGE)
1137 features.add(repository.REPO_FEATURE_SHALLOW_FILE_STORAGE)
1137
1138
1138 return sqlitefilestorage
1139 return sqlitefilestorage
1139 else:
1140 else:
1140 return orig(requirements=requirements, features=features, **kwargs)
1141 return orig(requirements=requirements, features=features, **kwargs)
1141
1142
1142 def makemain(orig, ui, requirements, **kwargs):
1143 def makemain(orig, ui, requirements, **kwargs):
1143 if REQUIREMENT in requirements:
1144 if REQUIREMENT in requirements:
1144 if REQUIREMENT_ZSTD in requirements and not zstd:
1145 if REQUIREMENT_ZSTD in requirements and not zstd:
1145 raise error.Abort(_('repository uses zstandard compression, which '
1146 raise error.Abort(_('repository uses zstandard compression, which '
1146 'is not available to this Mercurial install'))
1147 'is not available to this Mercurial install'))
1147
1148
1148 return sqliterepository
1149 return sqliterepository
1149
1150
1150 return orig(requirements=requirements, **kwargs)
1151 return orig(requirements=requirements, **kwargs)
1151
1152
1152 def verifierinit(orig, self, *args, **kwargs):
1153 def verifierinit(orig, self, *args, **kwargs):
1153 orig(self, *args, **kwargs)
1154 orig(self, *args, **kwargs)
1154
1155
1155 # We don't care that files in the store don't align with what is
1156 # We don't care that files in the store don't align with what is
1156 # advertised. So suppress these warnings.
1157 # advertised. So suppress these warnings.
1157 self.warnorphanstorefiles = False
1158 self.warnorphanstorefiles = False
1158
1159
1159 def extsetup(ui):
1160 def extsetup(ui):
1160 localrepo.featuresetupfuncs.add(featuresetup)
1161 localrepo.featuresetupfuncs.add(featuresetup)
1161 extensions.wrapfunction(localrepo, 'newreporequirements',
1162 extensions.wrapfunction(localrepo, 'newreporequirements',
1162 newreporequirements)
1163 newreporequirements)
1163 extensions.wrapfunction(localrepo, 'makefilestorage',
1164 extensions.wrapfunction(localrepo, 'makefilestorage',
1164 makefilestorage)
1165 makefilestorage)
1165 extensions.wrapfunction(localrepo, 'makemain',
1166 extensions.wrapfunction(localrepo, 'makemain',
1166 makemain)
1167 makemain)
1167 extensions.wrapfunction(verify.verifier, '__init__',
1168 extensions.wrapfunction(verify.verifier, '__init__',
1168 verifierinit)
1169 verifierinit)
1169
1170
1170 def reposetup(ui, repo):
1171 def reposetup(ui, repo):
1171 if isinstance(repo, sqliterepository):
1172 if isinstance(repo, sqliterepository):
1172 repo._db = None
1173 repo._db = None
1173
1174
1174 # TODO check for bundlerepository?
1175 # TODO check for bundlerepository?
@@ -1,1505 +1,1521 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18 def loadconfigtable(ui, extname, configtable):
18 def loadconfigtable(ui, extname, configtable):
19 """update config item known to the ui with the extension ones"""
19 """update config item known to the ui with the extension ones"""
20 for section, items in sorted(configtable.items()):
20 for section, items in sorted(configtable.items()):
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownkeys = set(knownitems)
22 knownkeys = set(knownitems)
23 newkeys = set(items)
23 newkeys = set(items)
24 for key in sorted(knownkeys & newkeys):
24 for key in sorted(knownkeys & newkeys):
25 msg = "extension '%s' overwrite config item '%s.%s'"
25 msg = "extension '%s' overwrite config item '%s.%s'"
26 msg %= (extname, section, key)
26 msg %= (extname, section, key)
27 ui.develwarn(msg, config='warn-config')
27 ui.develwarn(msg, config='warn-config')
28
28
29 knownitems.update(items)
29 knownitems.update(items)
30
30
31 class configitem(object):
31 class configitem(object):
32 """represent a known config item
32 """represent a known config item
33
33
34 :section: the official config section where to find this item,
34 :section: the official config section where to find this item,
35 :name: the official name within the section,
35 :name: the official name within the section,
36 :default: default value for this item,
36 :default: default value for this item,
37 :alias: optional list of tuples as alternatives,
37 :alias: optional list of tuples as alternatives,
38 :generic: this is a generic definition, match name using regular expression.
38 :generic: this is a generic definition, match name using regular expression.
39 """
39 """
40
40
41 def __init__(self, section, name, default=None, alias=(),
41 def __init__(self, section, name, default=None, alias=(),
42 generic=False, priority=0):
42 generic=False, priority=0, experimental=False):
43 self.section = section
43 self.section = section
44 self.name = name
44 self.name = name
45 self.default = default
45 self.default = default
46 self.alias = list(alias)
46 self.alias = list(alias)
47 self.generic = generic
47 self.generic = generic
48 self.priority = priority
48 self.priority = priority
49 self.experimental = experimental
49 self._re = None
50 self._re = None
50 if generic:
51 if generic:
51 self._re = re.compile(self.name)
52 self._re = re.compile(self.name)
52
53
53 class itemregister(dict):
54 class itemregister(dict):
54 """A specialized dictionary that can handle wild-card selection"""
55 """A specialized dictionary that can handle wild-card selection"""
55
56
56 def __init__(self):
57 def __init__(self):
57 super(itemregister, self).__init__()
58 super(itemregister, self).__init__()
58 self._generics = set()
59 self._generics = set()
59
60
60 def update(self, other):
61 def update(self, other):
61 super(itemregister, self).update(other)
62 super(itemregister, self).update(other)
62 self._generics.update(other._generics)
63 self._generics.update(other._generics)
63
64
64 def __setitem__(self, key, item):
65 def __setitem__(self, key, item):
65 super(itemregister, self).__setitem__(key, item)
66 super(itemregister, self).__setitem__(key, item)
66 if item.generic:
67 if item.generic:
67 self._generics.add(item)
68 self._generics.add(item)
68
69
69 def get(self, key):
70 def get(self, key):
70 baseitem = super(itemregister, self).get(key)
71 baseitem = super(itemregister, self).get(key)
71 if baseitem is not None and not baseitem.generic:
72 if baseitem is not None and not baseitem.generic:
72 return baseitem
73 return baseitem
73
74
74 # search for a matching generic item
75 # search for a matching generic item
75 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
76 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
76 for item in generics:
77 for item in generics:
77 # we use 'match' instead of 'search' to make the matching simpler
78 # we use 'match' instead of 'search' to make the matching simpler
78 # for people unfamiliar with regular expression. Having the match
79 # for people unfamiliar with regular expression. Having the match
79 # rooted to the start of the string will produce less surprising
80 # rooted to the start of the string will produce less surprising
80 # result for user writing simple regex for sub-attribute.
81 # result for user writing simple regex for sub-attribute.
81 #
82 #
82 # For example using "color\..*" match produces an unsurprising
83 # For example using "color\..*" match produces an unsurprising
83 # result, while using search could suddenly match apparently
84 # result, while using search could suddenly match apparently
84 # unrelated configuration that happens to contains "color."
85 # unrelated configuration that happens to contains "color."
85 # anywhere. This is a tradeoff where we favor requiring ".*" on
86 # anywhere. This is a tradeoff where we favor requiring ".*" on
86 # some match to avoid the need to prefix most pattern with "^".
87 # some match to avoid the need to prefix most pattern with "^".
87 # The "^" seems more error prone.
88 # The "^" seems more error prone.
88 if item._re.match(key):
89 if item._re.match(key):
89 return item
90 return item
90
91
91 return None
92 return None
92
93
93 coreitems = {}
94 coreitems = {}
94
95
95 def _register(configtable, *args, **kwargs):
96 def _register(configtable, *args, **kwargs):
96 item = configitem(*args, **kwargs)
97 item = configitem(*args, **kwargs)
97 section = configtable.setdefault(item.section, itemregister())
98 section = configtable.setdefault(item.section, itemregister())
98 if item.name in section:
99 if item.name in section:
99 msg = "duplicated config item registration for '%s.%s'"
100 msg = "duplicated config item registration for '%s.%s'"
100 raise error.ProgrammingError(msg % (item.section, item.name))
101 raise error.ProgrammingError(msg % (item.section, item.name))
101 section[item.name] = item
102 section[item.name] = item
102
103
103 # special value for case where the default is derived from other values
104 # special value for case where the default is derived from other values
104 dynamicdefault = object()
105 dynamicdefault = object()
105
106
106 # Registering actual config items
107 # Registering actual config items
107
108
108 def getitemregister(configtable):
109 def getitemregister(configtable):
109 f = functools.partial(_register, configtable)
110 f = functools.partial(_register, configtable)
110 # export pseudo enum as configitem.*
111 # export pseudo enum as configitem.*
111 f.dynamicdefault = dynamicdefault
112 f.dynamicdefault = dynamicdefault
112 return f
113 return f
113
114
114 coreconfigitem = getitemregister(coreitems)
115 coreconfigitem = getitemregister(coreitems)
115
116
116 def _registerdiffopts(section, configprefix=''):
117 def _registerdiffopts(section, configprefix=''):
117 coreconfigitem(section, configprefix + 'nodates',
118 coreconfigitem(section, configprefix + 'nodates',
118 default=False,
119 default=False,
119 )
120 )
120 coreconfigitem(section, configprefix + 'showfunc',
121 coreconfigitem(section, configprefix + 'showfunc',
121 default=False,
122 default=False,
122 )
123 )
123 coreconfigitem(section, configprefix + 'unified',
124 coreconfigitem(section, configprefix + 'unified',
124 default=None,
125 default=None,
125 )
126 )
126 coreconfigitem(section, configprefix + 'git',
127 coreconfigitem(section, configprefix + 'git',
127 default=False,
128 default=False,
128 )
129 )
129 coreconfigitem(section, configprefix + 'ignorews',
130 coreconfigitem(section, configprefix + 'ignorews',
130 default=False,
131 default=False,
131 )
132 )
132 coreconfigitem(section, configprefix + 'ignorewsamount',
133 coreconfigitem(section, configprefix + 'ignorewsamount',
133 default=False,
134 default=False,
134 )
135 )
135 coreconfigitem(section, configprefix + 'ignoreblanklines',
136 coreconfigitem(section, configprefix + 'ignoreblanklines',
136 default=False,
137 default=False,
137 )
138 )
138 coreconfigitem(section, configprefix + 'ignorewseol',
139 coreconfigitem(section, configprefix + 'ignorewseol',
139 default=False,
140 default=False,
140 )
141 )
141 coreconfigitem(section, configprefix + 'nobinary',
142 coreconfigitem(section, configprefix + 'nobinary',
142 default=False,
143 default=False,
143 )
144 )
144 coreconfigitem(section, configprefix + 'noprefix',
145 coreconfigitem(section, configprefix + 'noprefix',
145 default=False,
146 default=False,
146 )
147 )
147 coreconfigitem(section, configprefix + 'word-diff',
148 coreconfigitem(section, configprefix + 'word-diff',
148 default=False,
149 default=False,
149 )
150 )
150
151
151 coreconfigitem('alias', '.*',
152 coreconfigitem('alias', '.*',
152 default=dynamicdefault,
153 default=dynamicdefault,
153 generic=True,
154 generic=True,
154 )
155 )
155 coreconfigitem('auth', 'cookiefile',
156 coreconfigitem('auth', 'cookiefile',
156 default=None,
157 default=None,
157 )
158 )
158 _registerdiffopts(section='annotate')
159 _registerdiffopts(section='annotate')
159 # bookmarks.pushing: internal hack for discovery
160 # bookmarks.pushing: internal hack for discovery
160 coreconfigitem('bookmarks', 'pushing',
161 coreconfigitem('bookmarks', 'pushing',
161 default=list,
162 default=list,
162 )
163 )
163 # bundle.mainreporoot: internal hack for bundlerepo
164 # bundle.mainreporoot: internal hack for bundlerepo
164 coreconfigitem('bundle', 'mainreporoot',
165 coreconfigitem('bundle', 'mainreporoot',
165 default='',
166 default='',
166 )
167 )
167 coreconfigitem('censor', 'policy',
168 coreconfigitem('censor', 'policy',
168 default='abort',
169 default='abort',
170 experimental=True,
169 )
171 )
170 coreconfigitem('chgserver', 'idletimeout',
172 coreconfigitem('chgserver', 'idletimeout',
171 default=3600,
173 default=3600,
172 )
174 )
173 coreconfigitem('chgserver', 'skiphash',
175 coreconfigitem('chgserver', 'skiphash',
174 default=False,
176 default=False,
175 )
177 )
176 coreconfigitem('cmdserver', 'log',
178 coreconfigitem('cmdserver', 'log',
177 default=None,
179 default=None,
178 )
180 )
179 coreconfigitem('cmdserver', 'max-log-files',
181 coreconfigitem('cmdserver', 'max-log-files',
180 default=7,
182 default=7,
181 )
183 )
182 coreconfigitem('cmdserver', 'max-log-size',
184 coreconfigitem('cmdserver', 'max-log-size',
183 default='1 MB',
185 default='1 MB',
184 )
186 )
185 coreconfigitem('cmdserver', 'max-repo-cache',
187 coreconfigitem('cmdserver', 'max-repo-cache',
186 default=0,
188 default=0,
189 experimental=True,
187 )
190 )
188 coreconfigitem('cmdserver', 'message-encodings',
191 coreconfigitem('cmdserver', 'message-encodings',
189 default=list,
192 default=list,
193 experimental=True,
190 )
194 )
191 coreconfigitem('cmdserver', 'track-log',
195 coreconfigitem('cmdserver', 'track-log',
192 default=lambda: ['chgserver', 'cmdserver', 'repocache'],
196 default=lambda: ['chgserver', 'cmdserver', 'repocache'],
193 )
197 )
194 coreconfigitem('color', '.*',
198 coreconfigitem('color', '.*',
195 default=None,
199 default=None,
196 generic=True,
200 generic=True,
197 )
201 )
198 coreconfigitem('color', 'mode',
202 coreconfigitem('color', 'mode',
199 default='auto',
203 default='auto',
200 )
204 )
201 coreconfigitem('color', 'pagermode',
205 coreconfigitem('color', 'pagermode',
202 default=dynamicdefault,
206 default=dynamicdefault,
203 )
207 )
204 _registerdiffopts(section='commands', configprefix='commit.interactive.')
208 _registerdiffopts(section='commands', configprefix='commit.interactive.')
205 coreconfigitem('commands', 'commit.post-status',
209 coreconfigitem('commands', 'commit.post-status',
206 default=False,
210 default=False,
207 )
211 )
208 coreconfigitem('commands', 'grep.all-files',
212 coreconfigitem('commands', 'grep.all-files',
209 default=False,
213 default=False,
214 experimental=True,
210 )
215 )
211 coreconfigitem('commands', 'resolve.confirm',
216 coreconfigitem('commands', 'resolve.confirm',
212 default=False,
217 default=False,
213 )
218 )
214 coreconfigitem('commands', 'resolve.explicit-re-merge',
219 coreconfigitem('commands', 'resolve.explicit-re-merge',
215 default=False,
220 default=False,
216 )
221 )
217 coreconfigitem('commands', 'resolve.mark-check',
222 coreconfigitem('commands', 'resolve.mark-check',
218 default='none',
223 default='none',
219 )
224 )
220 _registerdiffopts(section='commands', configprefix='revert.interactive.')
225 _registerdiffopts(section='commands', configprefix='revert.interactive.')
221 coreconfigitem('commands', 'show.aliasprefix',
226 coreconfigitem('commands', 'show.aliasprefix',
222 default=list,
227 default=list,
223 )
228 )
224 coreconfigitem('commands', 'status.relative',
229 coreconfigitem('commands', 'status.relative',
225 default=False,
230 default=False,
226 )
231 )
227 coreconfigitem('commands', 'status.skipstates',
232 coreconfigitem('commands', 'status.skipstates',
228 default=[],
233 default=[],
234 experimental=True,
229 )
235 )
230 coreconfigitem('commands', 'status.terse',
236 coreconfigitem('commands', 'status.terse',
231 default='',
237 default='',
232 )
238 )
233 coreconfigitem('commands', 'status.verbose',
239 coreconfigitem('commands', 'status.verbose',
234 default=False,
240 default=False,
235 )
241 )
236 coreconfigitem('commands', 'update.check',
242 coreconfigitem('commands', 'update.check',
237 default=None,
243 default=None,
238 )
244 )
239 coreconfigitem('commands', 'update.requiredest',
245 coreconfigitem('commands', 'update.requiredest',
240 default=False,
246 default=False,
241 )
247 )
242 coreconfigitem('committemplate', '.*',
248 coreconfigitem('committemplate', '.*',
243 default=None,
249 default=None,
244 generic=True,
250 generic=True,
245 )
251 )
246 coreconfigitem('convert', 'bzr.saverev',
252 coreconfigitem('convert', 'bzr.saverev',
247 default=True,
253 default=True,
248 )
254 )
249 coreconfigitem('convert', 'cvsps.cache',
255 coreconfigitem('convert', 'cvsps.cache',
250 default=True,
256 default=True,
251 )
257 )
252 coreconfigitem('convert', 'cvsps.fuzz',
258 coreconfigitem('convert', 'cvsps.fuzz',
253 default=60,
259 default=60,
254 )
260 )
255 coreconfigitem('convert', 'cvsps.logencoding',
261 coreconfigitem('convert', 'cvsps.logencoding',
256 default=None,
262 default=None,
257 )
263 )
258 coreconfigitem('convert', 'cvsps.mergefrom',
264 coreconfigitem('convert', 'cvsps.mergefrom',
259 default=None,
265 default=None,
260 )
266 )
261 coreconfigitem('convert', 'cvsps.mergeto',
267 coreconfigitem('convert', 'cvsps.mergeto',
262 default=None,
268 default=None,
263 )
269 )
264 coreconfigitem('convert', 'git.committeractions',
270 coreconfigitem('convert', 'git.committeractions',
265 default=lambda: ['messagedifferent'],
271 default=lambda: ['messagedifferent'],
266 )
272 )
267 coreconfigitem('convert', 'git.extrakeys',
273 coreconfigitem('convert', 'git.extrakeys',
268 default=list,
274 default=list,
269 )
275 )
270 coreconfigitem('convert', 'git.findcopiesharder',
276 coreconfigitem('convert', 'git.findcopiesharder',
271 default=False,
277 default=False,
272 )
278 )
273 coreconfigitem('convert', 'git.remoteprefix',
279 coreconfigitem('convert', 'git.remoteprefix',
274 default='remote',
280 default='remote',
275 )
281 )
276 coreconfigitem('convert', 'git.renamelimit',
282 coreconfigitem('convert', 'git.renamelimit',
277 default=400,
283 default=400,
278 )
284 )
279 coreconfigitem('convert', 'git.saverev',
285 coreconfigitem('convert', 'git.saverev',
280 default=True,
286 default=True,
281 )
287 )
282 coreconfigitem('convert', 'git.similarity',
288 coreconfigitem('convert', 'git.similarity',
283 default=50,
289 default=50,
284 )
290 )
285 coreconfigitem('convert', 'git.skipsubmodules',
291 coreconfigitem('convert', 'git.skipsubmodules',
286 default=False,
292 default=False,
287 )
293 )
288 coreconfigitem('convert', 'hg.clonebranches',
294 coreconfigitem('convert', 'hg.clonebranches',
289 default=False,
295 default=False,
290 )
296 )
291 coreconfigitem('convert', 'hg.ignoreerrors',
297 coreconfigitem('convert', 'hg.ignoreerrors',
292 default=False,
298 default=False,
293 )
299 )
294 coreconfigitem('convert', 'hg.preserve-hash',
300 coreconfigitem('convert', 'hg.preserve-hash',
295 default=False,
301 default=False,
296 )
302 )
297 coreconfigitem('convert', 'hg.revs',
303 coreconfigitem('convert', 'hg.revs',
298 default=None,
304 default=None,
299 )
305 )
300 coreconfigitem('convert', 'hg.saverev',
306 coreconfigitem('convert', 'hg.saverev',
301 default=False,
307 default=False,
302 )
308 )
303 coreconfigitem('convert', 'hg.sourcename',
309 coreconfigitem('convert', 'hg.sourcename',
304 default=None,
310 default=None,
305 )
311 )
306 coreconfigitem('convert', 'hg.startrev',
312 coreconfigitem('convert', 'hg.startrev',
307 default=None,
313 default=None,
308 )
314 )
309 coreconfigitem('convert', 'hg.tagsbranch',
315 coreconfigitem('convert', 'hg.tagsbranch',
310 default='default',
316 default='default',
311 )
317 )
312 coreconfigitem('convert', 'hg.usebranchnames',
318 coreconfigitem('convert', 'hg.usebranchnames',
313 default=True,
319 default=True,
314 )
320 )
315 coreconfigitem('convert', 'ignoreancestorcheck',
321 coreconfigitem('convert', 'ignoreancestorcheck',
316 default=False,
322 default=False,
323 experimental=True,
317 )
324 )
318 coreconfigitem('convert', 'localtimezone',
325 coreconfigitem('convert', 'localtimezone',
319 default=False,
326 default=False,
320 )
327 )
321 coreconfigitem('convert', 'p4.encoding',
328 coreconfigitem('convert', 'p4.encoding',
322 default=dynamicdefault,
329 default=dynamicdefault,
323 )
330 )
324 coreconfigitem('convert', 'p4.startrev',
331 coreconfigitem('convert', 'p4.startrev',
325 default=0,
332 default=0,
326 )
333 )
327 coreconfigitem('convert', 'skiptags',
334 coreconfigitem('convert', 'skiptags',
328 default=False,
335 default=False,
329 )
336 )
330 coreconfigitem('convert', 'svn.debugsvnlog',
337 coreconfigitem('convert', 'svn.debugsvnlog',
331 default=True,
338 default=True,
332 )
339 )
333 coreconfigitem('convert', 'svn.trunk',
340 coreconfigitem('convert', 'svn.trunk',
334 default=None,
341 default=None,
335 )
342 )
336 coreconfigitem('convert', 'svn.tags',
343 coreconfigitem('convert', 'svn.tags',
337 default=None,
344 default=None,
338 )
345 )
339 coreconfigitem('convert', 'svn.branches',
346 coreconfigitem('convert', 'svn.branches',
340 default=None,
347 default=None,
341 )
348 )
342 coreconfigitem('convert', 'svn.startrev',
349 coreconfigitem('convert', 'svn.startrev',
343 default=0,
350 default=0,
344 )
351 )
345 coreconfigitem('debug', 'dirstate.delaywrite',
352 coreconfigitem('debug', 'dirstate.delaywrite',
346 default=0,
353 default=0,
347 )
354 )
348 coreconfigitem('defaults', '.*',
355 coreconfigitem('defaults', '.*',
349 default=None,
356 default=None,
350 generic=True,
357 generic=True,
351 )
358 )
352 coreconfigitem('devel', 'all-warnings',
359 coreconfigitem('devel', 'all-warnings',
353 default=False,
360 default=False,
354 )
361 )
355 coreconfigitem('devel', 'bundle2.debug',
362 coreconfigitem('devel', 'bundle2.debug',
356 default=False,
363 default=False,
357 )
364 )
358 coreconfigitem('devel', 'bundle.delta',
365 coreconfigitem('devel', 'bundle.delta',
359 default='',
366 default='',
360 )
367 )
361 coreconfigitem('devel', 'cache-vfs',
368 coreconfigitem('devel', 'cache-vfs',
362 default=None,
369 default=None,
363 )
370 )
364 coreconfigitem('devel', 'check-locks',
371 coreconfigitem('devel', 'check-locks',
365 default=False,
372 default=False,
366 )
373 )
367 coreconfigitem('devel', 'check-relroot',
374 coreconfigitem('devel', 'check-relroot',
368 default=False,
375 default=False,
369 )
376 )
370 coreconfigitem('devel', 'default-date',
377 coreconfigitem('devel', 'default-date',
371 default=None,
378 default=None,
372 )
379 )
373 coreconfigitem('devel', 'deprec-warn',
380 coreconfigitem('devel', 'deprec-warn',
374 default=False,
381 default=False,
375 )
382 )
376 coreconfigitem('devel', 'disableloaddefaultcerts',
383 coreconfigitem('devel', 'disableloaddefaultcerts',
377 default=False,
384 default=False,
378 )
385 )
379 coreconfigitem('devel', 'warn-empty-changegroup',
386 coreconfigitem('devel', 'warn-empty-changegroup',
380 default=False,
387 default=False,
381 )
388 )
382 coreconfigitem('devel', 'legacy.exchange',
389 coreconfigitem('devel', 'legacy.exchange',
383 default=list,
390 default=list,
384 )
391 )
385 coreconfigitem('devel', 'servercafile',
392 coreconfigitem('devel', 'servercafile',
386 default='',
393 default='',
387 )
394 )
388 coreconfigitem('devel', 'serverexactprotocol',
395 coreconfigitem('devel', 'serverexactprotocol',
389 default='',
396 default='',
390 )
397 )
391 coreconfigitem('devel', 'serverrequirecert',
398 coreconfigitem('devel', 'serverrequirecert',
392 default=False,
399 default=False,
393 )
400 )
394 coreconfigitem('devel', 'strip-obsmarkers',
401 coreconfigitem('devel', 'strip-obsmarkers',
395 default=True,
402 default=True,
396 )
403 )
397 coreconfigitem('devel', 'warn-config',
404 coreconfigitem('devel', 'warn-config',
398 default=None,
405 default=None,
399 )
406 )
400 coreconfigitem('devel', 'warn-config-default',
407 coreconfigitem('devel', 'warn-config-default',
401 default=None,
408 default=None,
402 )
409 )
403 coreconfigitem('devel', 'user.obsmarker',
410 coreconfigitem('devel', 'user.obsmarker',
404 default=None,
411 default=None,
405 )
412 )
406 coreconfigitem('devel', 'warn-config-unknown',
413 coreconfigitem('devel', 'warn-config-unknown',
407 default=None,
414 default=None,
408 )
415 )
409 coreconfigitem('devel', 'debug.copies',
416 coreconfigitem('devel', 'debug.copies',
410 default=False,
417 default=False,
411 )
418 )
412 coreconfigitem('devel', 'debug.extensions',
419 coreconfigitem('devel', 'debug.extensions',
413 default=False,
420 default=False,
414 )
421 )
415 coreconfigitem('devel', 'debug.peer-request',
422 coreconfigitem('devel', 'debug.peer-request',
416 default=False,
423 default=False,
417 )
424 )
418 coreconfigitem('devel', 'discovery.randomize',
425 coreconfigitem('devel', 'discovery.randomize',
419 default=True,
426 default=True,
420 )
427 )
421 _registerdiffopts(section='diff')
428 _registerdiffopts(section='diff')
422 coreconfigitem('email', 'bcc',
429 coreconfigitem('email', 'bcc',
423 default=None,
430 default=None,
424 )
431 )
425 coreconfigitem('email', 'cc',
432 coreconfigitem('email', 'cc',
426 default=None,
433 default=None,
427 )
434 )
428 coreconfigitem('email', 'charsets',
435 coreconfigitem('email', 'charsets',
429 default=list,
436 default=list,
430 )
437 )
431 coreconfigitem('email', 'from',
438 coreconfigitem('email', 'from',
432 default=None,
439 default=None,
433 )
440 )
434 coreconfigitem('email', 'method',
441 coreconfigitem('email', 'method',
435 default='smtp',
442 default='smtp',
436 )
443 )
437 coreconfigitem('email', 'reply-to',
444 coreconfigitem('email', 'reply-to',
438 default=None,
445 default=None,
439 )
446 )
440 coreconfigitem('email', 'to',
447 coreconfigitem('email', 'to',
441 default=None,
448 default=None,
442 )
449 )
443 coreconfigitem('experimental', 'archivemetatemplate',
450 coreconfigitem('experimental', 'archivemetatemplate',
444 default=dynamicdefault,
451 default=dynamicdefault,
445 )
452 )
446 coreconfigitem('experimental', 'auto-publish',
453 coreconfigitem('experimental', 'auto-publish',
447 default='publish',
454 default='publish',
448 )
455 )
449 coreconfigitem('experimental', 'bundle-phases',
456 coreconfigitem('experimental', 'bundle-phases',
450 default=False,
457 default=False,
451 )
458 )
452 coreconfigitem('experimental', 'bundle2-advertise',
459 coreconfigitem('experimental', 'bundle2-advertise',
453 default=True,
460 default=True,
454 )
461 )
455 coreconfigitem('experimental', 'bundle2-output-capture',
462 coreconfigitem('experimental', 'bundle2-output-capture',
456 default=False,
463 default=False,
457 )
464 )
458 coreconfigitem('experimental', 'bundle2.pushback',
465 coreconfigitem('experimental', 'bundle2.pushback',
459 default=False,
466 default=False,
460 )
467 )
461 coreconfigitem('experimental', 'bundle2lazylocking',
468 coreconfigitem('experimental', 'bundle2lazylocking',
462 default=False,
469 default=False,
463 )
470 )
464 coreconfigitem('experimental', 'bundlecomplevel',
471 coreconfigitem('experimental', 'bundlecomplevel',
465 default=None,
472 default=None,
466 )
473 )
467 coreconfigitem('experimental', 'bundlecomplevel.bzip2',
474 coreconfigitem('experimental', 'bundlecomplevel.bzip2',
468 default=None,
475 default=None,
469 )
476 )
470 coreconfigitem('experimental', 'bundlecomplevel.gzip',
477 coreconfigitem('experimental', 'bundlecomplevel.gzip',
471 default=None,
478 default=None,
472 )
479 )
473 coreconfigitem('experimental', 'bundlecomplevel.none',
480 coreconfigitem('experimental', 'bundlecomplevel.none',
474 default=None,
481 default=None,
475 )
482 )
476 coreconfigitem('experimental', 'bundlecomplevel.zstd',
483 coreconfigitem('experimental', 'bundlecomplevel.zstd',
477 default=None,
484 default=None,
478 )
485 )
479 coreconfigitem('experimental', 'changegroup3',
486 coreconfigitem('experimental', 'changegroup3',
480 default=False,
487 default=False,
481 )
488 )
482 coreconfigitem('experimental', 'cleanup-as-archived',
489 coreconfigitem('experimental', 'cleanup-as-archived',
483 default=False,
490 default=False,
484 )
491 )
485 coreconfigitem('experimental', 'clientcompressionengines',
492 coreconfigitem('experimental', 'clientcompressionengines',
486 default=list,
493 default=list,
487 )
494 )
488 coreconfigitem('experimental', 'copytrace',
495 coreconfigitem('experimental', 'copytrace',
489 default='on',
496 default='on',
490 )
497 )
491 coreconfigitem('experimental', 'copytrace.movecandidateslimit',
498 coreconfigitem('experimental', 'copytrace.movecandidateslimit',
492 default=100,
499 default=100,
493 )
500 )
494 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
501 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
495 default=100,
502 default=100,
496 )
503 )
497 coreconfigitem('experimental', 'copies.read-from',
504 coreconfigitem('experimental', 'copies.read-from',
498 default="filelog-only",
505 default="filelog-only",
499 )
506 )
500 coreconfigitem('experimental', 'copies.write-to',
507 coreconfigitem('experimental', 'copies.write-to',
501 default='filelog-only',
508 default='filelog-only',
502 )
509 )
503 coreconfigitem('experimental', 'crecordtest',
510 coreconfigitem('experimental', 'crecordtest',
504 default=None,
511 default=None,
505 )
512 )
506 coreconfigitem('experimental', 'directaccess',
513 coreconfigitem('experimental', 'directaccess',
507 default=False,
514 default=False,
508 )
515 )
509 coreconfigitem('experimental', 'directaccess.revnums',
516 coreconfigitem('experimental', 'directaccess.revnums',
510 default=False,
517 default=False,
511 )
518 )
512 coreconfigitem('experimental', 'editortmpinhg',
519 coreconfigitem('experimental', 'editortmpinhg',
513 default=False,
520 default=False,
514 )
521 )
515 coreconfigitem('experimental', 'evolution',
522 coreconfigitem('experimental', 'evolution',
516 default=list,
523 default=list,
517 )
524 )
518 coreconfigitem('experimental', 'evolution.allowdivergence',
525 coreconfigitem('experimental', 'evolution.allowdivergence',
519 default=False,
526 default=False,
520 alias=[('experimental', 'allowdivergence')]
527 alias=[('experimental', 'allowdivergence')]
521 )
528 )
522 coreconfigitem('experimental', 'evolution.allowunstable',
529 coreconfigitem('experimental', 'evolution.allowunstable',
523 default=None,
530 default=None,
524 )
531 )
525 coreconfigitem('experimental', 'evolution.createmarkers',
532 coreconfigitem('experimental', 'evolution.createmarkers',
526 default=None,
533 default=None,
527 )
534 )
528 coreconfigitem('experimental', 'evolution.effect-flags',
535 coreconfigitem('experimental', 'evolution.effect-flags',
529 default=True,
536 default=True,
530 alias=[('experimental', 'effect-flags')]
537 alias=[('experimental', 'effect-flags')]
531 )
538 )
532 coreconfigitem('experimental', 'evolution.exchange',
539 coreconfigitem('experimental', 'evolution.exchange',
533 default=None,
540 default=None,
534 )
541 )
535 coreconfigitem('experimental', 'evolution.bundle-obsmarker',
542 coreconfigitem('experimental', 'evolution.bundle-obsmarker',
536 default=False,
543 default=False,
537 )
544 )
538 coreconfigitem('experimental', 'log.topo',
545 coreconfigitem('experimental', 'log.topo',
539 default=False,
546 default=False,
540 )
547 )
541 coreconfigitem('experimental', 'evolution.report-instabilities',
548 coreconfigitem('experimental', 'evolution.report-instabilities',
542 default=True,
549 default=True,
543 )
550 )
544 coreconfigitem('experimental', 'evolution.track-operation',
551 coreconfigitem('experimental', 'evolution.track-operation',
545 default=True,
552 default=True,
546 )
553 )
547 # repo-level config to exclude a revset visibility
554 # repo-level config to exclude a revset visibility
548 #
555 #
549 # The target use case is to use `share` to expose different subset of the same
556 # The target use case is to use `share` to expose different subset of the same
550 # repository, especially server side. See also `server.view`.
557 # repository, especially server side. See also `server.view`.
551 coreconfigitem('experimental', 'extra-filter-revs',
558 coreconfigitem('experimental', 'extra-filter-revs',
552 default=None,
559 default=None,
553 )
560 )
554 coreconfigitem('experimental', 'maxdeltachainspan',
561 coreconfigitem('experimental', 'maxdeltachainspan',
555 default=-1,
562 default=-1,
556 )
563 )
557 coreconfigitem('experimental', 'mergetempdirprefix',
564 coreconfigitem('experimental', 'mergetempdirprefix',
558 default=None,
565 default=None,
559 )
566 )
560 coreconfigitem('experimental', 'mmapindexthreshold',
567 coreconfigitem('experimental', 'mmapindexthreshold',
561 default=None,
568 default=None,
562 )
569 )
563 coreconfigitem('experimental', 'narrow',
570 coreconfigitem('experimental', 'narrow',
564 default=False,
571 default=False,
565 )
572 )
566 coreconfigitem('experimental', 'nonnormalparanoidcheck',
573 coreconfigitem('experimental', 'nonnormalparanoidcheck',
567 default=False,
574 default=False,
568 )
575 )
569 coreconfigitem('experimental', 'exportableenviron',
576 coreconfigitem('experimental', 'exportableenviron',
570 default=list,
577 default=list,
571 )
578 )
572 coreconfigitem('experimental', 'extendedheader.index',
579 coreconfigitem('experimental', 'extendedheader.index',
573 default=None,
580 default=None,
574 )
581 )
575 coreconfigitem('experimental', 'extendedheader.similarity',
582 coreconfigitem('experimental', 'extendedheader.similarity',
576 default=False,
583 default=False,
577 )
584 )
578 coreconfigitem('experimental', 'graphshorten',
585 coreconfigitem('experimental', 'graphshorten',
579 default=False,
586 default=False,
580 )
587 )
581 coreconfigitem('experimental', 'graphstyle.parent',
588 coreconfigitem('experimental', 'graphstyle.parent',
582 default=dynamicdefault,
589 default=dynamicdefault,
583 )
590 )
584 coreconfigitem('experimental', 'graphstyle.missing',
591 coreconfigitem('experimental', 'graphstyle.missing',
585 default=dynamicdefault,
592 default=dynamicdefault,
586 )
593 )
587 coreconfigitem('experimental', 'graphstyle.grandparent',
594 coreconfigitem('experimental', 'graphstyle.grandparent',
588 default=dynamicdefault,
595 default=dynamicdefault,
589 )
596 )
590 coreconfigitem('experimental', 'hook-track-tags',
597 coreconfigitem('experimental', 'hook-track-tags',
591 default=False,
598 default=False,
592 )
599 )
593 coreconfigitem('experimental', 'httppeer.advertise-v2',
600 coreconfigitem('experimental', 'httppeer.advertise-v2',
594 default=False,
601 default=False,
595 )
602 )
596 coreconfigitem('experimental', 'httppeer.v2-encoder-order',
603 coreconfigitem('experimental', 'httppeer.v2-encoder-order',
597 default=None,
604 default=None,
598 )
605 )
599 coreconfigitem('experimental', 'httppostargs',
606 coreconfigitem('experimental', 'httppostargs',
600 default=False,
607 default=False,
601 )
608 )
602 coreconfigitem('experimental', 'mergedriver',
609 coreconfigitem('experimental', 'mergedriver',
603 default=None,
610 default=None,
604 )
611 )
605 coreconfigitem('experimental', 'nointerrupt', default=False)
612 coreconfigitem('experimental', 'nointerrupt', default=False)
606 coreconfigitem('experimental', 'nointerrupt-interactiveonly', default=True)
613 coreconfigitem('experimental', 'nointerrupt-interactiveonly', default=True)
607
614
608 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
615 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
609 default=False,
616 default=False,
610 )
617 )
611 coreconfigitem('experimental', 'remotenames',
618 coreconfigitem('experimental', 'remotenames',
612 default=False,
619 default=False,
613 )
620 )
614 coreconfigitem('experimental', 'removeemptydirs',
621 coreconfigitem('experimental', 'removeemptydirs',
615 default=True,
622 default=True,
616 )
623 )
617 coreconfigitem('experimental', 'revert.interactive.select-to-keep',
624 coreconfigitem('experimental', 'revert.interactive.select-to-keep',
618 default=False,
625 default=False,
619 )
626 )
620 coreconfigitem('experimental', 'revisions.prefixhexnode',
627 coreconfigitem('experimental', 'revisions.prefixhexnode',
621 default=False,
628 default=False,
622 )
629 )
623 coreconfigitem('experimental', 'revlogv2',
630 coreconfigitem('experimental', 'revlogv2',
624 default=None,
631 default=None,
625 )
632 )
626 coreconfigitem('experimental', 'revisions.disambiguatewithin',
633 coreconfigitem('experimental', 'revisions.disambiguatewithin',
627 default=None,
634 default=None,
628 )
635 )
629 coreconfigitem('experimental', 'server.filesdata.recommended-batch-size',
636 coreconfigitem('experimental', 'server.filesdata.recommended-batch-size',
630 default=50000,
637 default=50000,
631 )
638 )
632 coreconfigitem('experimental', 'server.manifestdata.recommended-batch-size',
639 coreconfigitem('experimental', 'server.manifestdata.recommended-batch-size',
633 default=100000,
640 default=100000,
634 )
641 )
635 coreconfigitem('experimental', 'server.stream-narrow-clones',
642 coreconfigitem('experimental', 'server.stream-narrow-clones',
636 default=False,
643 default=False,
637 )
644 )
638 coreconfigitem('experimental', 'single-head-per-branch',
645 coreconfigitem('experimental', 'single-head-per-branch',
639 default=False,
646 default=False,
640 )
647 )
641 coreconfigitem('experimental', 'sshserver.support-v2',
648 coreconfigitem('experimental', 'sshserver.support-v2',
642 default=False,
649 default=False,
643 )
650 )
644 coreconfigitem('experimental', 'sparse-read',
651 coreconfigitem('experimental', 'sparse-read',
645 default=False,
652 default=False,
646 )
653 )
647 coreconfigitem('experimental', 'sparse-read.density-threshold',
654 coreconfigitem('experimental', 'sparse-read.density-threshold',
648 default=0.50,
655 default=0.50,
649 )
656 )
650 coreconfigitem('experimental', 'sparse-read.min-gap-size',
657 coreconfigitem('experimental', 'sparse-read.min-gap-size',
651 default='65K',
658 default='65K',
652 )
659 )
653 coreconfigitem('experimental', 'treemanifest',
660 coreconfigitem('experimental', 'treemanifest',
654 default=False,
661 default=False,
655 )
662 )
656 coreconfigitem('experimental', 'update.atomic-file',
663 coreconfigitem('experimental', 'update.atomic-file',
657 default=False,
664 default=False,
658 )
665 )
659 coreconfigitem('experimental', 'sshpeer.advertise-v2',
666 coreconfigitem('experimental', 'sshpeer.advertise-v2',
660 default=False,
667 default=False,
661 )
668 )
662 coreconfigitem('experimental', 'web.apiserver',
669 coreconfigitem('experimental', 'web.apiserver',
663 default=False,
670 default=False,
664 )
671 )
665 coreconfigitem('experimental', 'web.api.http-v2',
672 coreconfigitem('experimental', 'web.api.http-v2',
666 default=False,
673 default=False,
667 )
674 )
668 coreconfigitem('experimental', 'web.api.debugreflect',
675 coreconfigitem('experimental', 'web.api.debugreflect',
669 default=False,
676 default=False,
670 )
677 )
671 coreconfigitem('experimental', 'worker.wdir-get-thread-safe',
678 coreconfigitem('experimental', 'worker.wdir-get-thread-safe',
672 default=False,
679 default=False,
673 )
680 )
674 coreconfigitem('experimental', 'xdiff',
681 coreconfigitem('experimental', 'xdiff',
675 default=False,
682 default=False,
676 )
683 )
677 coreconfigitem('extensions', '.*',
684 coreconfigitem('extensions', '.*',
678 default=None,
685 default=None,
679 generic=True,
686 generic=True,
680 )
687 )
681 coreconfigitem('extdata', '.*',
688 coreconfigitem('extdata', '.*',
682 default=None,
689 default=None,
683 generic=True,
690 generic=True,
684 )
691 )
685 coreconfigitem('format', 'bookmarks-in-store',
692 coreconfigitem('format', 'bookmarks-in-store',
686 default=False,
693 default=False,
687 )
694 )
688 coreconfigitem('format', 'chunkcachesize',
695 coreconfigitem('format', 'chunkcachesize',
689 default=None,
696 default=None,
697 experimental=True,
690 )
698 )
691 coreconfigitem('format', 'dotencode',
699 coreconfigitem('format', 'dotencode',
692 default=True,
700 default=True,
693 )
701 )
694 coreconfigitem('format', 'generaldelta',
702 coreconfigitem('format', 'generaldelta',
695 default=False,
703 default=False,
704 experimental=True,
696 )
705 )
697 coreconfigitem('format', 'manifestcachesize',
706 coreconfigitem('format', 'manifestcachesize',
698 default=None,
707 default=None,
708 experimental=True,
699 )
709 )
700 coreconfigitem('format', 'maxchainlen',
710 coreconfigitem('format', 'maxchainlen',
701 default=dynamicdefault,
711 default=dynamicdefault,
712 experimental=True,
702 )
713 )
703 coreconfigitem('format', 'obsstore-version',
714 coreconfigitem('format', 'obsstore-version',
704 default=None,
715 default=None,
705 )
716 )
706 coreconfigitem('format', 'sparse-revlog',
717 coreconfigitem('format', 'sparse-revlog',
707 default=True,
718 default=True,
708 )
719 )
709 coreconfigitem('format', 'revlog-compression',
720 coreconfigitem('format', 'revlog-compression',
710 default='zlib',
721 default='zlib',
711 alias=[('experimental', 'format.compression')]
722 alias=[('experimental', 'format.compression')]
712 )
723 )
713 coreconfigitem('format', 'usefncache',
724 coreconfigitem('format', 'usefncache',
714 default=True,
725 default=True,
715 )
726 )
716 coreconfigitem('format', 'usegeneraldelta',
727 coreconfigitem('format', 'usegeneraldelta',
717 default=True,
728 default=True,
718 )
729 )
719 coreconfigitem('format', 'usestore',
730 coreconfigitem('format', 'usestore',
720 default=True,
731 default=True,
721 )
732 )
722 coreconfigitem('format', 'internal-phase',
733 coreconfigitem('format', 'internal-phase',
723 default=False,
734 default=False,
735 experimental=True,
724 )
736 )
725 coreconfigitem('fsmonitor', 'warn_when_unused',
737 coreconfigitem('fsmonitor', 'warn_when_unused',
726 default=True,
738 default=True,
727 )
739 )
728 coreconfigitem('fsmonitor', 'warn_update_file_count',
740 coreconfigitem('fsmonitor', 'warn_update_file_count',
729 default=50000,
741 default=50000,
730 )
742 )
731 coreconfigitem('help', br'hidden-command\..*',
743 coreconfigitem('help', br'hidden-command\..*',
732 default=False,
744 default=False,
733 generic=True,
745 generic=True,
734 )
746 )
735 coreconfigitem('help', br'hidden-topic\..*',
747 coreconfigitem('help', br'hidden-topic\..*',
736 default=False,
748 default=False,
737 generic=True,
749 generic=True,
738 )
750 )
739 coreconfigitem('hooks', '.*',
751 coreconfigitem('hooks', '.*',
740 default=dynamicdefault,
752 default=dynamicdefault,
741 generic=True,
753 generic=True,
742 )
754 )
743 coreconfigitem('hgweb-paths', '.*',
755 coreconfigitem('hgweb-paths', '.*',
744 default=list,
756 default=list,
745 generic=True,
757 generic=True,
746 )
758 )
747 coreconfigitem('hostfingerprints', '.*',
759 coreconfigitem('hostfingerprints', '.*',
748 default=list,
760 default=list,
749 generic=True,
761 generic=True,
750 )
762 )
751 coreconfigitem('hostsecurity', 'ciphers',
763 coreconfigitem('hostsecurity', 'ciphers',
752 default=None,
764 default=None,
753 )
765 )
754 coreconfigitem('hostsecurity', 'disabletls10warning',
766 coreconfigitem('hostsecurity', 'disabletls10warning',
755 default=False,
767 default=False,
756 )
768 )
757 coreconfigitem('hostsecurity', 'minimumprotocol',
769 coreconfigitem('hostsecurity', 'minimumprotocol',
758 default=dynamicdefault,
770 default=dynamicdefault,
759 )
771 )
760 coreconfigitem('hostsecurity', '.*:minimumprotocol$',
772 coreconfigitem('hostsecurity', '.*:minimumprotocol$',
761 default=dynamicdefault,
773 default=dynamicdefault,
762 generic=True,
774 generic=True,
763 )
775 )
764 coreconfigitem('hostsecurity', '.*:ciphers$',
776 coreconfigitem('hostsecurity', '.*:ciphers$',
765 default=dynamicdefault,
777 default=dynamicdefault,
766 generic=True,
778 generic=True,
767 )
779 )
768 coreconfigitem('hostsecurity', '.*:fingerprints$',
780 coreconfigitem('hostsecurity', '.*:fingerprints$',
769 default=list,
781 default=list,
770 generic=True,
782 generic=True,
771 )
783 )
772 coreconfigitem('hostsecurity', '.*:verifycertsfile$',
784 coreconfigitem('hostsecurity', '.*:verifycertsfile$',
773 default=None,
785 default=None,
774 generic=True,
786 generic=True,
775 )
787 )
776
788
777 coreconfigitem('http_proxy', 'always',
789 coreconfigitem('http_proxy', 'always',
778 default=False,
790 default=False,
779 )
791 )
780 coreconfigitem('http_proxy', 'host',
792 coreconfigitem('http_proxy', 'host',
781 default=None,
793 default=None,
782 )
794 )
783 coreconfigitem('http_proxy', 'no',
795 coreconfigitem('http_proxy', 'no',
784 default=list,
796 default=list,
785 )
797 )
786 coreconfigitem('http_proxy', 'passwd',
798 coreconfigitem('http_proxy', 'passwd',
787 default=None,
799 default=None,
788 )
800 )
789 coreconfigitem('http_proxy', 'user',
801 coreconfigitem('http_proxy', 'user',
790 default=None,
802 default=None,
791 )
803 )
792
804
793 coreconfigitem('http', 'timeout',
805 coreconfigitem('http', 'timeout',
794 default=None,
806 default=None,
795 )
807 )
796
808
797 coreconfigitem('logtoprocess', 'commandexception',
809 coreconfigitem('logtoprocess', 'commandexception',
798 default=None,
810 default=None,
799 )
811 )
800 coreconfigitem('logtoprocess', 'commandfinish',
812 coreconfigitem('logtoprocess', 'commandfinish',
801 default=None,
813 default=None,
802 )
814 )
803 coreconfigitem('logtoprocess', 'command',
815 coreconfigitem('logtoprocess', 'command',
804 default=None,
816 default=None,
805 )
817 )
806 coreconfigitem('logtoprocess', 'develwarn',
818 coreconfigitem('logtoprocess', 'develwarn',
807 default=None,
819 default=None,
808 )
820 )
809 coreconfigitem('logtoprocess', 'uiblocked',
821 coreconfigitem('logtoprocess', 'uiblocked',
810 default=None,
822 default=None,
811 )
823 )
812 coreconfigitem('merge', 'checkunknown',
824 coreconfigitem('merge', 'checkunknown',
813 default='abort',
825 default='abort',
814 )
826 )
815 coreconfigitem('merge', 'checkignored',
827 coreconfigitem('merge', 'checkignored',
816 default='abort',
828 default='abort',
817 )
829 )
818 coreconfigitem('experimental', 'merge.checkpathconflicts',
830 coreconfigitem('experimental', 'merge.checkpathconflicts',
819 default=False,
831 default=False,
820 )
832 )
821 coreconfigitem('merge', 'followcopies',
833 coreconfigitem('merge', 'followcopies',
822 default=True,
834 default=True,
823 )
835 )
824 coreconfigitem('merge', 'on-failure',
836 coreconfigitem('merge', 'on-failure',
825 default='continue',
837 default='continue',
826 )
838 )
827 coreconfigitem('merge', 'preferancestor',
839 coreconfigitem('merge', 'preferancestor',
828 default=lambda: ['*'],
840 default=lambda: ['*'],
841 experimental=True,
829 )
842 )
830 coreconfigitem('merge', 'strict-capability-check',
843 coreconfigitem('merge', 'strict-capability-check',
831 default=False,
844 default=False,
832 )
845 )
833 coreconfigitem('merge-tools', '.*',
846 coreconfigitem('merge-tools', '.*',
834 default=None,
847 default=None,
835 generic=True,
848 generic=True,
836 )
849 )
837 coreconfigitem('merge-tools', br'.*\.args$',
850 coreconfigitem('merge-tools', br'.*\.args$',
838 default="$local $base $other",
851 default="$local $base $other",
839 generic=True,
852 generic=True,
840 priority=-1,
853 priority=-1,
841 )
854 )
842 coreconfigitem('merge-tools', br'.*\.binary$',
855 coreconfigitem('merge-tools', br'.*\.binary$',
843 default=False,
856 default=False,
844 generic=True,
857 generic=True,
845 priority=-1,
858 priority=-1,
846 )
859 )
847 coreconfigitem('merge-tools', br'.*\.check$',
860 coreconfigitem('merge-tools', br'.*\.check$',
848 default=list,
861 default=list,
849 generic=True,
862 generic=True,
850 priority=-1,
863 priority=-1,
851 )
864 )
852 coreconfigitem('merge-tools', br'.*\.checkchanged$',
865 coreconfigitem('merge-tools', br'.*\.checkchanged$',
853 default=False,
866 default=False,
854 generic=True,
867 generic=True,
855 priority=-1,
868 priority=-1,
856 )
869 )
857 coreconfigitem('merge-tools', br'.*\.executable$',
870 coreconfigitem('merge-tools', br'.*\.executable$',
858 default=dynamicdefault,
871 default=dynamicdefault,
859 generic=True,
872 generic=True,
860 priority=-1,
873 priority=-1,
861 )
874 )
862 coreconfigitem('merge-tools', br'.*\.fixeol$',
875 coreconfigitem('merge-tools', br'.*\.fixeol$',
863 default=False,
876 default=False,
864 generic=True,
877 generic=True,
865 priority=-1,
878 priority=-1,
866 )
879 )
867 coreconfigitem('merge-tools', br'.*\.gui$',
880 coreconfigitem('merge-tools', br'.*\.gui$',
868 default=False,
881 default=False,
869 generic=True,
882 generic=True,
870 priority=-1,
883 priority=-1,
871 )
884 )
872 coreconfigitem('merge-tools', br'.*\.mergemarkers$',
885 coreconfigitem('merge-tools', br'.*\.mergemarkers$',
873 default='basic',
886 default='basic',
874 generic=True,
887 generic=True,
875 priority=-1,
888 priority=-1,
876 )
889 )
877 coreconfigitem('merge-tools', br'.*\.mergemarkertemplate$',
890 coreconfigitem('merge-tools', br'.*\.mergemarkertemplate$',
878 default=dynamicdefault, # take from ui.mergemarkertemplate
891 default=dynamicdefault, # take from ui.mergemarkertemplate
879 generic=True,
892 generic=True,
880 priority=-1,
893 priority=-1,
881 )
894 )
882 coreconfigitem('merge-tools', br'.*\.priority$',
895 coreconfigitem('merge-tools', br'.*\.priority$',
883 default=0,
896 default=0,
884 generic=True,
897 generic=True,
885 priority=-1,
898 priority=-1,
886 )
899 )
887 coreconfigitem('merge-tools', br'.*\.premerge$',
900 coreconfigitem('merge-tools', br'.*\.premerge$',
888 default=dynamicdefault,
901 default=dynamicdefault,
889 generic=True,
902 generic=True,
890 priority=-1,
903 priority=-1,
891 )
904 )
892 coreconfigitem('merge-tools', br'.*\.symlink$',
905 coreconfigitem('merge-tools', br'.*\.symlink$',
893 default=False,
906 default=False,
894 generic=True,
907 generic=True,
895 priority=-1,
908 priority=-1,
896 )
909 )
897 coreconfigitem('pager', 'attend-.*',
910 coreconfigitem('pager', 'attend-.*',
898 default=dynamicdefault,
911 default=dynamicdefault,
899 generic=True,
912 generic=True,
900 )
913 )
901 coreconfigitem('pager', 'ignore',
914 coreconfigitem('pager', 'ignore',
902 default=list,
915 default=list,
903 )
916 )
904 coreconfigitem('pager', 'pager',
917 coreconfigitem('pager', 'pager',
905 default=dynamicdefault,
918 default=dynamicdefault,
906 )
919 )
907 coreconfigitem('patch', 'eol',
920 coreconfigitem('patch', 'eol',
908 default='strict',
921 default='strict',
909 )
922 )
910 coreconfigitem('patch', 'fuzz',
923 coreconfigitem('patch', 'fuzz',
911 default=2,
924 default=2,
912 )
925 )
913 coreconfigitem('paths', 'default',
926 coreconfigitem('paths', 'default',
914 default=None,
927 default=None,
915 )
928 )
916 coreconfigitem('paths', 'default-push',
929 coreconfigitem('paths', 'default-push',
917 default=None,
930 default=None,
918 )
931 )
919 coreconfigitem('paths', '.*',
932 coreconfigitem('paths', '.*',
920 default=None,
933 default=None,
921 generic=True,
934 generic=True,
922 )
935 )
923 coreconfigitem('phases', 'checksubrepos',
936 coreconfigitem('phases', 'checksubrepos',
924 default='follow',
937 default='follow',
925 )
938 )
926 coreconfigitem('phases', 'new-commit',
939 coreconfigitem('phases', 'new-commit',
927 default='draft',
940 default='draft',
928 )
941 )
929 coreconfigitem('phases', 'publish',
942 coreconfigitem('phases', 'publish',
930 default=True,
943 default=True,
931 )
944 )
932 coreconfigitem('profiling', 'enabled',
945 coreconfigitem('profiling', 'enabled',
933 default=False,
946 default=False,
934 )
947 )
935 coreconfigitem('profiling', 'format',
948 coreconfigitem('profiling', 'format',
936 default='text',
949 default='text',
937 )
950 )
938 coreconfigitem('profiling', 'freq',
951 coreconfigitem('profiling', 'freq',
939 default=1000,
952 default=1000,
940 )
953 )
941 coreconfigitem('profiling', 'limit',
954 coreconfigitem('profiling', 'limit',
942 default=30,
955 default=30,
943 )
956 )
944 coreconfigitem('profiling', 'nested',
957 coreconfigitem('profiling', 'nested',
945 default=0,
958 default=0,
946 )
959 )
947 coreconfigitem('profiling', 'output',
960 coreconfigitem('profiling', 'output',
948 default=None,
961 default=None,
949 )
962 )
950 coreconfigitem('profiling', 'showmax',
963 coreconfigitem('profiling', 'showmax',
951 default=0.999,
964 default=0.999,
952 )
965 )
953 coreconfigitem('profiling', 'showmin',
966 coreconfigitem('profiling', 'showmin',
954 default=dynamicdefault,
967 default=dynamicdefault,
955 )
968 )
956 coreconfigitem('profiling', 'showtime',
969 coreconfigitem('profiling', 'showtime',
957 default=True,
970 default=True,
958 )
971 )
959 coreconfigitem('profiling', 'sort',
972 coreconfigitem('profiling', 'sort',
960 default='inlinetime',
973 default='inlinetime',
961 )
974 )
962 coreconfigitem('profiling', 'statformat',
975 coreconfigitem('profiling', 'statformat',
963 default='hotpath',
976 default='hotpath',
964 )
977 )
965 coreconfigitem('profiling', 'time-track',
978 coreconfigitem('profiling', 'time-track',
966 default=dynamicdefault,
979 default=dynamicdefault,
967 )
980 )
968 coreconfigitem('profiling', 'type',
981 coreconfigitem('profiling', 'type',
969 default='stat',
982 default='stat',
970 )
983 )
971 coreconfigitem('progress', 'assume-tty',
984 coreconfigitem('progress', 'assume-tty',
972 default=False,
985 default=False,
973 )
986 )
974 coreconfigitem('progress', 'changedelay',
987 coreconfigitem('progress', 'changedelay',
975 default=1,
988 default=1,
976 )
989 )
977 coreconfigitem('progress', 'clear-complete',
990 coreconfigitem('progress', 'clear-complete',
978 default=True,
991 default=True,
979 )
992 )
980 coreconfigitem('progress', 'debug',
993 coreconfigitem('progress', 'debug',
981 default=False,
994 default=False,
982 )
995 )
983 coreconfigitem('progress', 'delay',
996 coreconfigitem('progress', 'delay',
984 default=3,
997 default=3,
985 )
998 )
986 coreconfigitem('progress', 'disable',
999 coreconfigitem('progress', 'disable',
987 default=False,
1000 default=False,
988 )
1001 )
989 coreconfigitem('progress', 'estimateinterval',
1002 coreconfigitem('progress', 'estimateinterval',
990 default=60.0,
1003 default=60.0,
991 )
1004 )
992 coreconfigitem('progress', 'format',
1005 coreconfigitem('progress', 'format',
993 default=lambda: ['topic', 'bar', 'number', 'estimate'],
1006 default=lambda: ['topic', 'bar', 'number', 'estimate'],
994 )
1007 )
995 coreconfigitem('progress', 'refresh',
1008 coreconfigitem('progress', 'refresh',
996 default=0.1,
1009 default=0.1,
997 )
1010 )
998 coreconfigitem('progress', 'width',
1011 coreconfigitem('progress', 'width',
999 default=dynamicdefault,
1012 default=dynamicdefault,
1000 )
1013 )
1001 coreconfigitem('push', 'pushvars.server',
1014 coreconfigitem('push', 'pushvars.server',
1002 default=False,
1015 default=False,
1003 )
1016 )
1004 coreconfigitem('rewrite', 'backup-bundle',
1017 coreconfigitem('rewrite', 'backup-bundle',
1005 default=True,
1018 default=True,
1006 alias=[('ui', 'history-editing-backup')],
1019 alias=[('ui', 'history-editing-backup')],
1007 )
1020 )
1008 coreconfigitem('rewrite', 'update-timestamp',
1021 coreconfigitem('rewrite', 'update-timestamp',
1009 default=False,
1022 default=False,
1010 )
1023 )
1011 coreconfigitem('storage', 'new-repo-backend',
1024 coreconfigitem('storage', 'new-repo-backend',
1012 default='revlogv1',
1025 default='revlogv1',
1026 experimental=True,
1013 )
1027 )
1014 coreconfigitem('storage', 'revlog.optimize-delta-parent-choice',
1028 coreconfigitem('storage', 'revlog.optimize-delta-parent-choice',
1015 default=True,
1029 default=True,
1016 alias=[('format', 'aggressivemergedeltas')],
1030 alias=[('format', 'aggressivemergedeltas')],
1017 )
1031 )
1018 coreconfigitem('storage', 'revlog.reuse-external-delta',
1032 coreconfigitem('storage', 'revlog.reuse-external-delta',
1019 default=True,
1033 default=True,
1020 )
1034 )
1021 coreconfigitem('storage', 'revlog.reuse-external-delta-parent',
1035 coreconfigitem('storage', 'revlog.reuse-external-delta-parent',
1022 default=None,
1036 default=None,
1023 )
1037 )
1024 coreconfigitem('storage', 'revlog.zlib.level',
1038 coreconfigitem('storage', 'revlog.zlib.level',
1025 default=None,
1039 default=None,
1026 )
1040 )
1027 coreconfigitem('storage', 'revlog.zstd.level',
1041 coreconfigitem('storage', 'revlog.zstd.level',
1028 default=None,
1042 default=None,
1029 )
1043 )
1030 coreconfigitem('server', 'bookmarks-pushkey-compat',
1044 coreconfigitem('server', 'bookmarks-pushkey-compat',
1031 default=True,
1045 default=True,
1032 )
1046 )
1033 coreconfigitem('server', 'bundle1',
1047 coreconfigitem('server', 'bundle1',
1034 default=True,
1048 default=True,
1035 )
1049 )
1036 coreconfigitem('server', 'bundle1gd',
1050 coreconfigitem('server', 'bundle1gd',
1037 default=None,
1051 default=None,
1038 )
1052 )
1039 coreconfigitem('server', 'bundle1.pull',
1053 coreconfigitem('server', 'bundle1.pull',
1040 default=None,
1054 default=None,
1041 )
1055 )
1042 coreconfigitem('server', 'bundle1gd.pull',
1056 coreconfigitem('server', 'bundle1gd.pull',
1043 default=None,
1057 default=None,
1044 )
1058 )
1045 coreconfigitem('server', 'bundle1.push',
1059 coreconfigitem('server', 'bundle1.push',
1046 default=None,
1060 default=None,
1047 )
1061 )
1048 coreconfigitem('server', 'bundle1gd.push',
1062 coreconfigitem('server', 'bundle1gd.push',
1049 default=None,
1063 default=None,
1050 )
1064 )
1051 coreconfigitem('server', 'bundle2.stream',
1065 coreconfigitem('server', 'bundle2.stream',
1052 default=True,
1066 default=True,
1053 alias=[('experimental', 'bundle2.stream')]
1067 alias=[('experimental', 'bundle2.stream')]
1054 )
1068 )
1055 coreconfigitem('server', 'compressionengines',
1069 coreconfigitem('server', 'compressionengines',
1056 default=list,
1070 default=list,
1057 )
1071 )
1058 coreconfigitem('server', 'concurrent-push-mode',
1072 coreconfigitem('server', 'concurrent-push-mode',
1059 default='strict',
1073 default='strict',
1060 )
1074 )
1061 coreconfigitem('server', 'disablefullbundle',
1075 coreconfigitem('server', 'disablefullbundle',
1062 default=False,
1076 default=False,
1063 )
1077 )
1064 coreconfigitem('server', 'maxhttpheaderlen',
1078 coreconfigitem('server', 'maxhttpheaderlen',
1065 default=1024,
1079 default=1024,
1066 )
1080 )
1067 coreconfigitem('server', 'pullbundle',
1081 coreconfigitem('server', 'pullbundle',
1068 default=False,
1082 default=False,
1069 )
1083 )
1070 coreconfigitem('server', 'preferuncompressed',
1084 coreconfigitem('server', 'preferuncompressed',
1071 default=False,
1085 default=False,
1072 )
1086 )
1073 coreconfigitem('server', 'streamunbundle',
1087 coreconfigitem('server', 'streamunbundle',
1074 default=False,
1088 default=False,
1075 )
1089 )
1076 coreconfigitem('server', 'uncompressed',
1090 coreconfigitem('server', 'uncompressed',
1077 default=True,
1091 default=True,
1078 )
1092 )
1079 coreconfigitem('server', 'uncompressedallowsecret',
1093 coreconfigitem('server', 'uncompressedallowsecret',
1080 default=False,
1094 default=False,
1081 )
1095 )
1082 coreconfigitem('server', 'view',
1096 coreconfigitem('server', 'view',
1083 default='served',
1097 default='served',
1084 )
1098 )
1085 coreconfigitem('server', 'validate',
1099 coreconfigitem('server', 'validate',
1086 default=False,
1100 default=False,
1087 )
1101 )
1088 coreconfigitem('server', 'zliblevel',
1102 coreconfigitem('server', 'zliblevel',
1089 default=-1,
1103 default=-1,
1090 )
1104 )
1091 coreconfigitem('server', 'zstdlevel',
1105 coreconfigitem('server', 'zstdlevel',
1092 default=3,
1106 default=3,
1093 )
1107 )
1094 coreconfigitem('share', 'pool',
1108 coreconfigitem('share', 'pool',
1095 default=None,
1109 default=None,
1096 )
1110 )
1097 coreconfigitem('share', 'poolnaming',
1111 coreconfigitem('share', 'poolnaming',
1098 default='identity',
1112 default='identity',
1099 )
1113 )
1100 coreconfigitem('shelve','maxbackups',
1114 coreconfigitem('shelve','maxbackups',
1101 default=10,
1115 default=10,
1102 )
1116 )
1103 coreconfigitem('smtp', 'host',
1117 coreconfigitem('smtp', 'host',
1104 default=None,
1118 default=None,
1105 )
1119 )
1106 coreconfigitem('smtp', 'local_hostname',
1120 coreconfigitem('smtp', 'local_hostname',
1107 default=None,
1121 default=None,
1108 )
1122 )
1109 coreconfigitem('smtp', 'password',
1123 coreconfigitem('smtp', 'password',
1110 default=None,
1124 default=None,
1111 )
1125 )
1112 coreconfigitem('smtp', 'port',
1126 coreconfigitem('smtp', 'port',
1113 default=dynamicdefault,
1127 default=dynamicdefault,
1114 )
1128 )
1115 coreconfigitem('smtp', 'tls',
1129 coreconfigitem('smtp', 'tls',
1116 default='none',
1130 default='none',
1117 )
1131 )
1118 coreconfigitem('smtp', 'username',
1132 coreconfigitem('smtp', 'username',
1119 default=None,
1133 default=None,
1120 )
1134 )
1121 coreconfigitem('sparse', 'missingwarning',
1135 coreconfigitem('sparse', 'missingwarning',
1122 default=True,
1136 default=True,
1137 experimental=True,
1123 )
1138 )
1124 coreconfigitem('subrepos', 'allowed',
1139 coreconfigitem('subrepos', 'allowed',
1125 default=dynamicdefault, # to make backporting simpler
1140 default=dynamicdefault, # to make backporting simpler
1126 )
1141 )
1127 coreconfigitem('subrepos', 'hg:allowed',
1142 coreconfigitem('subrepos', 'hg:allowed',
1128 default=dynamicdefault,
1143 default=dynamicdefault,
1129 )
1144 )
1130 coreconfigitem('subrepos', 'git:allowed',
1145 coreconfigitem('subrepos', 'git:allowed',
1131 default=dynamicdefault,
1146 default=dynamicdefault,
1132 )
1147 )
1133 coreconfigitem('subrepos', 'svn:allowed',
1148 coreconfigitem('subrepos', 'svn:allowed',
1134 default=dynamicdefault,
1149 default=dynamicdefault,
1135 )
1150 )
1136 coreconfigitem('templates', '.*',
1151 coreconfigitem('templates', '.*',
1137 default=None,
1152 default=None,
1138 generic=True,
1153 generic=True,
1139 )
1154 )
1140 coreconfigitem('templateconfig', '.*',
1155 coreconfigitem('templateconfig', '.*',
1141 default=dynamicdefault,
1156 default=dynamicdefault,
1142 generic=True,
1157 generic=True,
1143 )
1158 )
1144 coreconfigitem('trusted', 'groups',
1159 coreconfigitem('trusted', 'groups',
1145 default=list,
1160 default=list,
1146 )
1161 )
1147 coreconfigitem('trusted', 'users',
1162 coreconfigitem('trusted', 'users',
1148 default=list,
1163 default=list,
1149 )
1164 )
1150 coreconfigitem('ui', '_usedassubrepo',
1165 coreconfigitem('ui', '_usedassubrepo',
1151 default=False,
1166 default=False,
1152 )
1167 )
1153 coreconfigitem('ui', 'allowemptycommit',
1168 coreconfigitem('ui', 'allowemptycommit',
1154 default=False,
1169 default=False,
1155 )
1170 )
1156 coreconfigitem('ui', 'archivemeta',
1171 coreconfigitem('ui', 'archivemeta',
1157 default=True,
1172 default=True,
1158 )
1173 )
1159 coreconfigitem('ui', 'askusername',
1174 coreconfigitem('ui', 'askusername',
1160 default=False,
1175 default=False,
1161 )
1176 )
1162 coreconfigitem('ui', 'clonebundlefallback',
1177 coreconfigitem('ui', 'clonebundlefallback',
1163 default=False,
1178 default=False,
1164 )
1179 )
1165 coreconfigitem('ui', 'clonebundleprefers',
1180 coreconfigitem('ui', 'clonebundleprefers',
1166 default=list,
1181 default=list,
1167 )
1182 )
1168 coreconfigitem('ui', 'clonebundles',
1183 coreconfigitem('ui', 'clonebundles',
1169 default=True,
1184 default=True,
1170 )
1185 )
1171 coreconfigitem('ui', 'color',
1186 coreconfigitem('ui', 'color',
1172 default='auto',
1187 default='auto',
1173 )
1188 )
1174 coreconfigitem('ui', 'commitsubrepos',
1189 coreconfigitem('ui', 'commitsubrepos',
1175 default=False,
1190 default=False,
1176 )
1191 )
1177 coreconfigitem('ui', 'debug',
1192 coreconfigitem('ui', 'debug',
1178 default=False,
1193 default=False,
1179 )
1194 )
1180 coreconfigitem('ui', 'debugger',
1195 coreconfigitem('ui', 'debugger',
1181 default=None,
1196 default=None,
1182 )
1197 )
1183 coreconfigitem('ui', 'editor',
1198 coreconfigitem('ui', 'editor',
1184 default=dynamicdefault,
1199 default=dynamicdefault,
1185 )
1200 )
1186 coreconfigitem('ui', 'fallbackencoding',
1201 coreconfigitem('ui', 'fallbackencoding',
1187 default=None,
1202 default=None,
1188 )
1203 )
1189 coreconfigitem('ui', 'forcecwd',
1204 coreconfigitem('ui', 'forcecwd',
1190 default=None,
1205 default=None,
1191 )
1206 )
1192 coreconfigitem('ui', 'forcemerge',
1207 coreconfigitem('ui', 'forcemerge',
1193 default=None,
1208 default=None,
1194 )
1209 )
1195 coreconfigitem('ui', 'formatdebug',
1210 coreconfigitem('ui', 'formatdebug',
1196 default=False,
1211 default=False,
1197 )
1212 )
1198 coreconfigitem('ui', 'formatjson',
1213 coreconfigitem('ui', 'formatjson',
1199 default=False,
1214 default=False,
1200 )
1215 )
1201 coreconfigitem('ui', 'formatted',
1216 coreconfigitem('ui', 'formatted',
1202 default=None,
1217 default=None,
1203 )
1218 )
1204 coreconfigitem('ui', 'graphnodetemplate',
1219 coreconfigitem('ui', 'graphnodetemplate',
1205 default=None,
1220 default=None,
1206 )
1221 )
1207 coreconfigitem('ui', 'interactive',
1222 coreconfigitem('ui', 'interactive',
1208 default=None,
1223 default=None,
1209 )
1224 )
1210 coreconfigitem('ui', 'interface',
1225 coreconfigitem('ui', 'interface',
1211 default=None,
1226 default=None,
1212 )
1227 )
1213 coreconfigitem('ui', 'interface.chunkselector',
1228 coreconfigitem('ui', 'interface.chunkselector',
1214 default=None,
1229 default=None,
1215 )
1230 )
1216 coreconfigitem('ui', 'large-file-limit',
1231 coreconfigitem('ui', 'large-file-limit',
1217 default=10000000,
1232 default=10000000,
1218 )
1233 )
1219 coreconfigitem('ui', 'logblockedtimes',
1234 coreconfigitem('ui', 'logblockedtimes',
1220 default=False,
1235 default=False,
1221 )
1236 )
1222 coreconfigitem('ui', 'logtemplate',
1237 coreconfigitem('ui', 'logtemplate',
1223 default=None,
1238 default=None,
1224 )
1239 )
1225 coreconfigitem('ui', 'merge',
1240 coreconfigitem('ui', 'merge',
1226 default=None,
1241 default=None,
1227 )
1242 )
1228 coreconfigitem('ui', 'mergemarkers',
1243 coreconfigitem('ui', 'mergemarkers',
1229 default='basic',
1244 default='basic',
1230 )
1245 )
1231 coreconfigitem('ui', 'mergemarkertemplate',
1246 coreconfigitem('ui', 'mergemarkertemplate',
1232 default=('{node|short} '
1247 default=('{node|short} '
1233 '{ifeq(tags, "tip", "", '
1248 '{ifeq(tags, "tip", "", '
1234 'ifeq(tags, "", "", "{tags} "))}'
1249 'ifeq(tags, "", "", "{tags} "))}'
1235 '{if(bookmarks, "{bookmarks} ")}'
1250 '{if(bookmarks, "{bookmarks} ")}'
1236 '{ifeq(branch, "default", "", "{branch} ")}'
1251 '{ifeq(branch, "default", "", "{branch} ")}'
1237 '- {author|user}: {desc|firstline}')
1252 '- {author|user}: {desc|firstline}')
1238 )
1253 )
1239 coreconfigitem('ui', 'message-output',
1254 coreconfigitem('ui', 'message-output',
1240 default='stdio',
1255 default='stdio',
1241 )
1256 )
1242 coreconfigitem('ui', 'nontty',
1257 coreconfigitem('ui', 'nontty',
1243 default=False,
1258 default=False,
1244 )
1259 )
1245 coreconfigitem('ui', 'origbackuppath',
1260 coreconfigitem('ui', 'origbackuppath',
1246 default=None,
1261 default=None,
1247 )
1262 )
1248 coreconfigitem('ui', 'paginate',
1263 coreconfigitem('ui', 'paginate',
1249 default=True,
1264 default=True,
1250 )
1265 )
1251 coreconfigitem('ui', 'patch',
1266 coreconfigitem('ui', 'patch',
1252 default=None,
1267 default=None,
1253 )
1268 )
1254 coreconfigitem('ui', 'pre-merge-tool-output-template',
1269 coreconfigitem('ui', 'pre-merge-tool-output-template',
1255 default=None,
1270 default=None,
1256 )
1271 )
1257 coreconfigitem('ui', 'portablefilenames',
1272 coreconfigitem('ui', 'portablefilenames',
1258 default='warn',
1273 default='warn',
1259 )
1274 )
1260 coreconfigitem('ui', 'promptecho',
1275 coreconfigitem('ui', 'promptecho',
1261 default=False,
1276 default=False,
1262 )
1277 )
1263 coreconfigitem('ui', 'quiet',
1278 coreconfigitem('ui', 'quiet',
1264 default=False,
1279 default=False,
1265 )
1280 )
1266 coreconfigitem('ui', 'quietbookmarkmove',
1281 coreconfigitem('ui', 'quietbookmarkmove',
1267 default=False,
1282 default=False,
1268 )
1283 )
1269 coreconfigitem('ui', 'relative-paths',
1284 coreconfigitem('ui', 'relative-paths',
1270 default='legacy',
1285 default='legacy',
1271 )
1286 )
1272 coreconfigitem('ui', 'remotecmd',
1287 coreconfigitem('ui', 'remotecmd',
1273 default='hg',
1288 default='hg',
1274 )
1289 )
1275 coreconfigitem('ui', 'report_untrusted',
1290 coreconfigitem('ui', 'report_untrusted',
1276 default=True,
1291 default=True,
1277 )
1292 )
1278 coreconfigitem('ui', 'rollback',
1293 coreconfigitem('ui', 'rollback',
1279 default=True,
1294 default=True,
1280 )
1295 )
1281 coreconfigitem('ui', 'signal-safe-lock',
1296 coreconfigitem('ui', 'signal-safe-lock',
1282 default=True,
1297 default=True,
1283 )
1298 )
1284 coreconfigitem('ui', 'slash',
1299 coreconfigitem('ui', 'slash',
1285 default=False,
1300 default=False,
1286 )
1301 )
1287 coreconfigitem('ui', 'ssh',
1302 coreconfigitem('ui', 'ssh',
1288 default='ssh',
1303 default='ssh',
1289 )
1304 )
1290 coreconfigitem('ui', 'ssherrorhint',
1305 coreconfigitem('ui', 'ssherrorhint',
1291 default=None,
1306 default=None,
1292 )
1307 )
1293 coreconfigitem('ui', 'statuscopies',
1308 coreconfigitem('ui', 'statuscopies',
1294 default=False,
1309 default=False,
1295 )
1310 )
1296 coreconfigitem('ui', 'strict',
1311 coreconfigitem('ui', 'strict',
1297 default=False,
1312 default=False,
1298 )
1313 )
1299 coreconfigitem('ui', 'style',
1314 coreconfigitem('ui', 'style',
1300 default='',
1315 default='',
1301 )
1316 )
1302 coreconfigitem('ui', 'supportcontact',
1317 coreconfigitem('ui', 'supportcontact',
1303 default=None,
1318 default=None,
1304 )
1319 )
1305 coreconfigitem('ui', 'textwidth',
1320 coreconfigitem('ui', 'textwidth',
1306 default=78,
1321 default=78,
1307 )
1322 )
1308 coreconfigitem('ui', 'timeout',
1323 coreconfigitem('ui', 'timeout',
1309 default='600',
1324 default='600',
1310 )
1325 )
1311 coreconfigitem('ui', 'timeout.warn',
1326 coreconfigitem('ui', 'timeout.warn',
1312 default=0,
1327 default=0,
1313 )
1328 )
1314 coreconfigitem('ui', 'traceback',
1329 coreconfigitem('ui', 'traceback',
1315 default=False,
1330 default=False,
1316 )
1331 )
1317 coreconfigitem('ui', 'tweakdefaults',
1332 coreconfigitem('ui', 'tweakdefaults',
1318 default=False,
1333 default=False,
1319 )
1334 )
1320 coreconfigitem('ui', 'username',
1335 coreconfigitem('ui', 'username',
1321 alias=[('ui', 'user')]
1336 alias=[('ui', 'user')]
1322 )
1337 )
1323 coreconfigitem('ui', 'verbose',
1338 coreconfigitem('ui', 'verbose',
1324 default=False,
1339 default=False,
1325 )
1340 )
1326 coreconfigitem('verify', 'skipflags',
1341 coreconfigitem('verify', 'skipflags',
1327 default=None,
1342 default=None,
1328 )
1343 )
1329 coreconfigitem('web', 'allowbz2',
1344 coreconfigitem('web', 'allowbz2',
1330 default=False,
1345 default=False,
1331 )
1346 )
1332 coreconfigitem('web', 'allowgz',
1347 coreconfigitem('web', 'allowgz',
1333 default=False,
1348 default=False,
1334 )
1349 )
1335 coreconfigitem('web', 'allow-pull',
1350 coreconfigitem('web', 'allow-pull',
1336 alias=[('web', 'allowpull')],
1351 alias=[('web', 'allowpull')],
1337 default=True,
1352 default=True,
1338 )
1353 )
1339 coreconfigitem('web', 'allow-push',
1354 coreconfigitem('web', 'allow-push',
1340 alias=[('web', 'allow_push')],
1355 alias=[('web', 'allow_push')],
1341 default=list,
1356 default=list,
1342 )
1357 )
1343 coreconfigitem('web', 'allowzip',
1358 coreconfigitem('web', 'allowzip',
1344 default=False,
1359 default=False,
1345 )
1360 )
1346 coreconfigitem('web', 'archivesubrepos',
1361 coreconfigitem('web', 'archivesubrepos',
1347 default=False,
1362 default=False,
1348 )
1363 )
1349 coreconfigitem('web', 'cache',
1364 coreconfigitem('web', 'cache',
1350 default=True,
1365 default=True,
1351 )
1366 )
1352 coreconfigitem('web', 'comparisoncontext',
1367 coreconfigitem('web', 'comparisoncontext',
1353 default=5,
1368 default=5,
1354 )
1369 )
1355 coreconfigitem('web', 'contact',
1370 coreconfigitem('web', 'contact',
1356 default=None,
1371 default=None,
1357 )
1372 )
1358 coreconfigitem('web', 'deny_push',
1373 coreconfigitem('web', 'deny_push',
1359 default=list,
1374 default=list,
1360 )
1375 )
1361 coreconfigitem('web', 'guessmime',
1376 coreconfigitem('web', 'guessmime',
1362 default=False,
1377 default=False,
1363 )
1378 )
1364 coreconfigitem('web', 'hidden',
1379 coreconfigitem('web', 'hidden',
1365 default=False,
1380 default=False,
1366 )
1381 )
1367 coreconfigitem('web', 'labels',
1382 coreconfigitem('web', 'labels',
1368 default=list,
1383 default=list,
1369 )
1384 )
1370 coreconfigitem('web', 'logoimg',
1385 coreconfigitem('web', 'logoimg',
1371 default='hglogo.png',
1386 default='hglogo.png',
1372 )
1387 )
1373 coreconfigitem('web', 'logourl',
1388 coreconfigitem('web', 'logourl',
1374 default='https://mercurial-scm.org/',
1389 default='https://mercurial-scm.org/',
1375 )
1390 )
1376 coreconfigitem('web', 'accesslog',
1391 coreconfigitem('web', 'accesslog',
1377 default='-',
1392 default='-',
1378 )
1393 )
1379 coreconfigitem('web', 'address',
1394 coreconfigitem('web', 'address',
1380 default='',
1395 default='',
1381 )
1396 )
1382 coreconfigitem('web', 'allow-archive',
1397 coreconfigitem('web', 'allow-archive',
1383 alias=[('web', 'allow_archive')],
1398 alias=[('web', 'allow_archive')],
1384 default=list,
1399 default=list,
1385 )
1400 )
1386 coreconfigitem('web', 'allow_read',
1401 coreconfigitem('web', 'allow_read',
1387 default=list,
1402 default=list,
1388 )
1403 )
1389 coreconfigitem('web', 'baseurl',
1404 coreconfigitem('web', 'baseurl',
1390 default=None,
1405 default=None,
1391 )
1406 )
1392 coreconfigitem('web', 'cacerts',
1407 coreconfigitem('web', 'cacerts',
1393 default=None,
1408 default=None,
1394 )
1409 )
1395 coreconfigitem('web', 'certificate',
1410 coreconfigitem('web', 'certificate',
1396 default=None,
1411 default=None,
1397 )
1412 )
1398 coreconfigitem('web', 'collapse',
1413 coreconfigitem('web', 'collapse',
1399 default=False,
1414 default=False,
1400 )
1415 )
1401 coreconfigitem('web', 'csp',
1416 coreconfigitem('web', 'csp',
1402 default=None,
1417 default=None,
1403 )
1418 )
1404 coreconfigitem('web', 'deny_read',
1419 coreconfigitem('web', 'deny_read',
1405 default=list,
1420 default=list,
1406 )
1421 )
1407 coreconfigitem('web', 'descend',
1422 coreconfigitem('web', 'descend',
1408 default=True,
1423 default=True,
1409 )
1424 )
1410 coreconfigitem('web', 'description',
1425 coreconfigitem('web', 'description',
1411 default="",
1426 default="",
1412 )
1427 )
1413 coreconfigitem('web', 'encoding',
1428 coreconfigitem('web', 'encoding',
1414 default=lambda: encoding.encoding,
1429 default=lambda: encoding.encoding,
1415 )
1430 )
1416 coreconfigitem('web', 'errorlog',
1431 coreconfigitem('web', 'errorlog',
1417 default='-',
1432 default='-',
1418 )
1433 )
1419 coreconfigitem('web', 'ipv6',
1434 coreconfigitem('web', 'ipv6',
1420 default=False,
1435 default=False,
1421 )
1436 )
1422 coreconfigitem('web', 'maxchanges',
1437 coreconfigitem('web', 'maxchanges',
1423 default=10,
1438 default=10,
1424 )
1439 )
1425 coreconfigitem('web', 'maxfiles',
1440 coreconfigitem('web', 'maxfiles',
1426 default=10,
1441 default=10,
1427 )
1442 )
1428 coreconfigitem('web', 'maxshortchanges',
1443 coreconfigitem('web', 'maxshortchanges',
1429 default=60,
1444 default=60,
1430 )
1445 )
1431 coreconfigitem('web', 'motd',
1446 coreconfigitem('web', 'motd',
1432 default='',
1447 default='',
1433 )
1448 )
1434 coreconfigitem('web', 'name',
1449 coreconfigitem('web', 'name',
1435 default=dynamicdefault,
1450 default=dynamicdefault,
1436 )
1451 )
1437 coreconfigitem('web', 'port',
1452 coreconfigitem('web', 'port',
1438 default=8000,
1453 default=8000,
1439 )
1454 )
1440 coreconfigitem('web', 'prefix',
1455 coreconfigitem('web', 'prefix',
1441 default='',
1456 default='',
1442 )
1457 )
1443 coreconfigitem('web', 'push_ssl',
1458 coreconfigitem('web', 'push_ssl',
1444 default=True,
1459 default=True,
1445 )
1460 )
1446 coreconfigitem('web', 'refreshinterval',
1461 coreconfigitem('web', 'refreshinterval',
1447 default=20,
1462 default=20,
1448 )
1463 )
1449 coreconfigitem('web', 'server-header',
1464 coreconfigitem('web', 'server-header',
1450 default=None,
1465 default=None,
1451 )
1466 )
1452 coreconfigitem('web', 'static',
1467 coreconfigitem('web', 'static',
1453 default=None,
1468 default=None,
1454 )
1469 )
1455 coreconfigitem('web', 'staticurl',
1470 coreconfigitem('web', 'staticurl',
1456 default=None,
1471 default=None,
1457 )
1472 )
1458 coreconfigitem('web', 'stripes',
1473 coreconfigitem('web', 'stripes',
1459 default=1,
1474 default=1,
1460 )
1475 )
1461 coreconfigitem('web', 'style',
1476 coreconfigitem('web', 'style',
1462 default='paper',
1477 default='paper',
1463 )
1478 )
1464 coreconfigitem('web', 'templates',
1479 coreconfigitem('web', 'templates',
1465 default=None,
1480 default=None,
1466 )
1481 )
1467 coreconfigitem('web', 'view',
1482 coreconfigitem('web', 'view',
1468 default='served',
1483 default='served',
1484 experimental=True,
1469 )
1485 )
1470 coreconfigitem('worker', 'backgroundclose',
1486 coreconfigitem('worker', 'backgroundclose',
1471 default=dynamicdefault,
1487 default=dynamicdefault,
1472 )
1488 )
1473 # Windows defaults to a limit of 512 open files. A buffer of 128
1489 # Windows defaults to a limit of 512 open files. A buffer of 128
1474 # should give us enough headway.
1490 # should give us enough headway.
1475 coreconfigitem('worker', 'backgroundclosemaxqueue',
1491 coreconfigitem('worker', 'backgroundclosemaxqueue',
1476 default=384,
1492 default=384,
1477 )
1493 )
1478 coreconfigitem('worker', 'backgroundcloseminfilecount',
1494 coreconfigitem('worker', 'backgroundcloseminfilecount',
1479 default=2048,
1495 default=2048,
1480 )
1496 )
1481 coreconfigitem('worker', 'backgroundclosethreadcount',
1497 coreconfigitem('worker', 'backgroundclosethreadcount',
1482 default=4,
1498 default=4,
1483 )
1499 )
1484 coreconfigitem('worker', 'enabled',
1500 coreconfigitem('worker', 'enabled',
1485 default=True,
1501 default=True,
1486 )
1502 )
1487 coreconfigitem('worker', 'numcpus',
1503 coreconfigitem('worker', 'numcpus',
1488 default=None,
1504 default=None,
1489 )
1505 )
1490
1506
1491 # Rebase related configuration moved to core because other extension are doing
1507 # Rebase related configuration moved to core because other extension are doing
1492 # strange things. For example, shelve import the extensions to reuse some bit
1508 # strange things. For example, shelve import the extensions to reuse some bit
1493 # without formally loading it.
1509 # without formally loading it.
1494 coreconfigitem('commands', 'rebase.requiredest',
1510 coreconfigitem('commands', 'rebase.requiredest',
1495 default=False,
1511 default=False,
1496 )
1512 )
1497 coreconfigitem('experimental', 'rebaseskipobsolete',
1513 coreconfigitem('experimental', 'rebaseskipobsolete',
1498 default=True,
1514 default=True,
1499 )
1515 )
1500 coreconfigitem('rebase', 'singletransaction',
1516 coreconfigitem('rebase', 'singletransaction',
1501 default=False,
1517 default=False,
1502 )
1518 )
1503 coreconfigitem('rebase', 'experimental.inmemory',
1519 coreconfigitem('rebase', 'experimental.inmemory',
1504 default=False,
1520 default=False,
1505 )
1521 )
General Comments 0
You need to be logged in to leave comments. Login now