##// END OF EJS Templates
branching: merge stable into default
marmoute -
r51886:12c308c5 merge default
parent child Browse files
Show More
@@ -1,4449 +1,4569 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 import contextlib
57 import contextlib
58 import functools
58 import functools
59 import gc
59 import gc
60 import os
60 import os
61 import random
61 import random
62 import shutil
62 import shutil
63 import struct
63 import struct
64 import sys
64 import sys
65 import tempfile
65 import tempfile
66 import threading
66 import threading
67 import time
67 import time
68
68
69 import mercurial.revlog
69 import mercurial.revlog
70 from mercurial import (
70 from mercurial import (
71 changegroup,
71 changegroup,
72 cmdutil,
72 cmdutil,
73 commands,
73 commands,
74 copies,
74 copies,
75 error,
75 error,
76 extensions,
76 extensions,
77 hg,
77 hg,
78 mdiff,
78 mdiff,
79 merge,
79 merge,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122 try:
122 try:
123 from mercurial.revlogutils import constants as revlog_constants
123 from mercurial.revlogutils import constants as revlog_constants
124
124
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126
126
127 def revlog(opener, *args, **kwargs):
127 def revlog(opener, *args, **kwargs):
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129
129
130
130
131 except (ImportError, AttributeError):
131 except (ImportError, AttributeError):
132 perf_rl_kind = None
132 perf_rl_kind = None
133
133
134 def revlog(opener, *args, **kwargs):
134 def revlog(opener, *args, **kwargs):
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136
136
137
137
138 def identity(a):
138 def identity(a):
139 return a
139 return a
140
140
141
141
142 try:
142 try:
143 from mercurial import pycompat
143 from mercurial import pycompat
144
144
145 getargspec = pycompat.getargspec # added to module after 4.5
145 getargspec = pycompat.getargspec # added to module after 4.5
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 if pycompat.ispy3:
151 if pycompat.ispy3:
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 else:
153 else:
154 _maxint = sys.maxint
154 _maxint = sys.maxint
155 except (NameError, ImportError, AttributeError):
155 except (NameError, ImportError, AttributeError):
156 import inspect
156 import inspect
157
157
158 getargspec = inspect.getargspec
158 getargspec = inspect.getargspec
159 _byteskwargs = identity
159 _byteskwargs = identity
160 _bytestr = str
160 _bytestr = str
161 fsencode = identity # no py3 support
161 fsencode = identity # no py3 support
162 _maxint = sys.maxint # no py3 support
162 _maxint = sys.maxint # no py3 support
163 _sysstr = lambda x: x # no py3 support
163 _sysstr = lambda x: x # no py3 support
164 _xrange = xrange
164 _xrange = xrange
165
165
166 try:
166 try:
167 # 4.7+
167 # 4.7+
168 queue = pycompat.queue.Queue
168 queue = pycompat.queue.Queue
169 except (NameError, AttributeError, ImportError):
169 except (NameError, AttributeError, ImportError):
170 # <4.7.
170 # <4.7.
171 try:
171 try:
172 queue = pycompat.queue
172 queue = pycompat.queue
173 except (NameError, AttributeError, ImportError):
173 except (NameError, AttributeError, ImportError):
174 import Queue as queue
174 import Queue as queue
175
175
176 try:
176 try:
177 from mercurial import logcmdutil
177 from mercurial import logcmdutil
178
178
179 makelogtemplater = logcmdutil.maketemplater
179 makelogtemplater = logcmdutil.maketemplater
180 except (AttributeError, ImportError):
180 except (AttributeError, ImportError):
181 try:
181 try:
182 makelogtemplater = cmdutil.makelogtemplater
182 makelogtemplater = cmdutil.makelogtemplater
183 except (AttributeError, ImportError):
183 except (AttributeError, ImportError):
184 makelogtemplater = None
184 makelogtemplater = None
185
185
186 # for "historical portability":
186 # for "historical portability":
187 # define util.safehasattr forcibly, because util.safehasattr has been
187 # define util.safehasattr forcibly, because util.safehasattr has been
188 # available since 1.9.3 (or 94b200a11cf7)
188 # available since 1.9.3 (or 94b200a11cf7)
189 _undefined = object()
189 _undefined = object()
190
190
191
191
192 def safehasattr(thing, attr):
192 def safehasattr(thing, attr):
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194
194
195
195
196 setattr(util, 'safehasattr', safehasattr)
196 setattr(util, 'safehasattr', safehasattr)
197
197
198 # for "historical portability":
198 # for "historical portability":
199 # define util.timer forcibly, because util.timer has been available
199 # define util.timer forcibly, because util.timer has been available
200 # since ae5d60bb70c9
200 # since ae5d60bb70c9
201 if safehasattr(time, 'perf_counter'):
201 if safehasattr(time, 'perf_counter'):
202 util.timer = time.perf_counter
202 util.timer = time.perf_counter
203 elif os.name == b'nt':
203 elif os.name == b'nt':
204 util.timer = time.clock
204 util.timer = time.clock
205 else:
205 else:
206 util.timer = time.time
206 util.timer = time.time
207
207
208 # for "historical portability":
208 # for "historical portability":
209 # use locally defined empty option list, if formatteropts isn't
209 # use locally defined empty option list, if formatteropts isn't
210 # available, because commands.formatteropts has been available since
210 # available, because commands.formatteropts has been available since
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 # available since 2.2 (or ae5f92e154d3)
212 # available since 2.2 (or ae5f92e154d3)
213 formatteropts = getattr(
213 formatteropts = getattr(
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 )
215 )
216
216
217 # for "historical portability":
217 # for "historical portability":
218 # use locally defined option list, if debugrevlogopts isn't available,
218 # use locally defined option list, if debugrevlogopts isn't available,
219 # because commands.debugrevlogopts has been available since 3.7 (or
219 # because commands.debugrevlogopts has been available since 3.7 (or
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 # since 1.9 (or a79fea6b3e77).
221 # since 1.9 (or a79fea6b3e77).
222 revlogopts = getattr(
222 revlogopts = getattr(
223 cmdutil,
223 cmdutil,
224 "debugrevlogopts",
224 "debugrevlogopts",
225 getattr(
225 getattr(
226 commands,
226 commands,
227 "debugrevlogopts",
227 "debugrevlogopts",
228 [
228 [
229 (b'c', b'changelog', False, b'open changelog'),
229 (b'c', b'changelog', False, b'open changelog'),
230 (b'm', b'manifest', False, b'open manifest'),
230 (b'm', b'manifest', False, b'open manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
232 ],
232 ],
233 ),
233 ),
234 )
234 )
235
235
236 cmdtable = {}
236 cmdtable = {}
237
237
238
238
239 # for "historical portability":
239 # for "historical portability":
240 # define parsealiases locally, because cmdutil.parsealiases has been
240 # define parsealiases locally, because cmdutil.parsealiases has been
241 # available since 1.5 (or 6252852b4332)
241 # available since 1.5 (or 6252852b4332)
242 def parsealiases(cmd):
242 def parsealiases(cmd):
243 return cmd.split(b"|")
243 return cmd.split(b"|")
244
244
245
245
246 if safehasattr(registrar, 'command'):
246 if safehasattr(registrar, 'command'):
247 command = registrar.command(cmdtable)
247 command = registrar.command(cmdtable)
248 elif safehasattr(cmdutil, 'command'):
248 elif safehasattr(cmdutil, 'command'):
249 command = cmdutil.command(cmdtable)
249 command = cmdutil.command(cmdtable)
250 if 'norepo' not in getargspec(command).args:
250 if 'norepo' not in getargspec(command).args:
251 # for "historical portability":
251 # for "historical portability":
252 # wrap original cmdutil.command, because "norepo" option has
252 # wrap original cmdutil.command, because "norepo" option has
253 # been available since 3.1 (or 75a96326cecb)
253 # been available since 3.1 (or 75a96326cecb)
254 _command = command
254 _command = command
255
255
256 def command(name, options=(), synopsis=None, norepo=False):
256 def command(name, options=(), synopsis=None, norepo=False):
257 if norepo:
257 if norepo:
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
259 return _command(name, list(options), synopsis)
259 return _command(name, list(options), synopsis)
260
260
261
261
262 else:
262 else:
263 # for "historical portability":
263 # for "historical portability":
264 # define "@command" annotation locally, because cmdutil.command
264 # define "@command" annotation locally, because cmdutil.command
265 # has been available since 1.9 (or 2daa5179e73f)
265 # has been available since 1.9 (or 2daa5179e73f)
266 def command(name, options=(), synopsis=None, norepo=False):
266 def command(name, options=(), synopsis=None, norepo=False):
267 def decorator(func):
267 def decorator(func):
268 if synopsis:
268 if synopsis:
269 cmdtable[name] = func, list(options), synopsis
269 cmdtable[name] = func, list(options), synopsis
270 else:
270 else:
271 cmdtable[name] = func, list(options)
271 cmdtable[name] = func, list(options)
272 if norepo:
272 if norepo:
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
274 return func
274 return func
275
275
276 return decorator
276 return decorator
277
277
278
278
279 try:
279 try:
280 import mercurial.registrar
280 import mercurial.registrar
281 import mercurial.configitems
281 import mercurial.configitems
282
282
283 configtable = {}
283 configtable = {}
284 configitem = mercurial.registrar.configitem(configtable)
284 configitem = mercurial.registrar.configitem(configtable)
285 configitem(
285 configitem(
286 b'perf',
286 b'perf',
287 b'presleep',
287 b'presleep',
288 default=mercurial.configitems.dynamicdefault,
288 default=mercurial.configitems.dynamicdefault,
289 experimental=True,
289 experimental=True,
290 )
290 )
291 configitem(
291 configitem(
292 b'perf',
292 b'perf',
293 b'stub',
293 b'stub',
294 default=mercurial.configitems.dynamicdefault,
294 default=mercurial.configitems.dynamicdefault,
295 experimental=True,
295 experimental=True,
296 )
296 )
297 configitem(
297 configitem(
298 b'perf',
298 b'perf',
299 b'parentscount',
299 b'parentscount',
300 default=mercurial.configitems.dynamicdefault,
300 default=mercurial.configitems.dynamicdefault,
301 experimental=True,
301 experimental=True,
302 )
302 )
303 configitem(
303 configitem(
304 b'perf',
304 b'perf',
305 b'all-timing',
305 b'all-timing',
306 default=mercurial.configitems.dynamicdefault,
306 default=mercurial.configitems.dynamicdefault,
307 experimental=True,
307 experimental=True,
308 )
308 )
309 configitem(
309 configitem(
310 b'perf',
310 b'perf',
311 b'pre-run',
311 b'pre-run',
312 default=mercurial.configitems.dynamicdefault,
312 default=mercurial.configitems.dynamicdefault,
313 )
313 )
314 configitem(
314 configitem(
315 b'perf',
315 b'perf',
316 b'profile-benchmark',
316 b'profile-benchmark',
317 default=mercurial.configitems.dynamicdefault,
317 default=mercurial.configitems.dynamicdefault,
318 )
318 )
319 configitem(
319 configitem(
320 b'perf',
320 b'perf',
321 b'run-limits',
321 b'run-limits',
322 default=mercurial.configitems.dynamicdefault,
322 default=mercurial.configitems.dynamicdefault,
323 experimental=True,
323 experimental=True,
324 )
324 )
325 except (ImportError, AttributeError):
325 except (ImportError, AttributeError):
326 pass
326 pass
327 except TypeError:
327 except TypeError:
328 # compatibility fix for a11fd395e83f
328 # compatibility fix for a11fd395e83f
329 # hg version: 5.2
329 # hg version: 5.2
330 configitem(
330 configitem(
331 b'perf',
331 b'perf',
332 b'presleep',
332 b'presleep',
333 default=mercurial.configitems.dynamicdefault,
333 default=mercurial.configitems.dynamicdefault,
334 )
334 )
335 configitem(
335 configitem(
336 b'perf',
336 b'perf',
337 b'stub',
337 b'stub',
338 default=mercurial.configitems.dynamicdefault,
338 default=mercurial.configitems.dynamicdefault,
339 )
339 )
340 configitem(
340 configitem(
341 b'perf',
341 b'perf',
342 b'parentscount',
342 b'parentscount',
343 default=mercurial.configitems.dynamicdefault,
343 default=mercurial.configitems.dynamicdefault,
344 )
344 )
345 configitem(
345 configitem(
346 b'perf',
346 b'perf',
347 b'all-timing',
347 b'all-timing',
348 default=mercurial.configitems.dynamicdefault,
348 default=mercurial.configitems.dynamicdefault,
349 )
349 )
350 configitem(
350 configitem(
351 b'perf',
351 b'perf',
352 b'pre-run',
352 b'pre-run',
353 default=mercurial.configitems.dynamicdefault,
353 default=mercurial.configitems.dynamicdefault,
354 )
354 )
355 configitem(
355 configitem(
356 b'perf',
356 b'perf',
357 b'profile-benchmark',
357 b'profile-benchmark',
358 default=mercurial.configitems.dynamicdefault,
358 default=mercurial.configitems.dynamicdefault,
359 )
359 )
360 configitem(
360 configitem(
361 b'perf',
361 b'perf',
362 b'run-limits',
362 b'run-limits',
363 default=mercurial.configitems.dynamicdefault,
363 default=mercurial.configitems.dynamicdefault,
364 )
364 )
365
365
366
366
367 def getlen(ui):
367 def getlen(ui):
368 if ui.configbool(b"perf", b"stub", False):
368 if ui.configbool(b"perf", b"stub", False):
369 return lambda x: 1
369 return lambda x: 1
370 return len
370 return len
371
371
372
372
373 class noop:
373 class noop:
374 """dummy context manager"""
374 """dummy context manager"""
375
375
376 def __enter__(self):
376 def __enter__(self):
377 pass
377 pass
378
378
379 def __exit__(self, *args):
379 def __exit__(self, *args):
380 pass
380 pass
381
381
382
382
383 NOOPCTX = noop()
383 NOOPCTX = noop()
384
384
385
385
386 def gettimer(ui, opts=None):
386 def gettimer(ui, opts=None):
387 """return a timer function and formatter: (timer, formatter)
387 """return a timer function and formatter: (timer, formatter)
388
388
389 This function exists to gather the creation of formatter in a single
389 This function exists to gather the creation of formatter in a single
390 place instead of duplicating it in all performance commands."""
390 place instead of duplicating it in all performance commands."""
391
391
392 # enforce an idle period before execution to counteract power management
392 # enforce an idle period before execution to counteract power management
393 # experimental config: perf.presleep
393 # experimental config: perf.presleep
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
395
395
396 if opts is None:
396 if opts is None:
397 opts = {}
397 opts = {}
398 # redirect all to stderr unless buffer api is in use
398 # redirect all to stderr unless buffer api is in use
399 if not ui._buffers:
399 if not ui._buffers:
400 ui = ui.copy()
400 ui = ui.copy()
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
402 if uifout:
402 if uifout:
403 # for "historical portability":
403 # for "historical portability":
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
405 uifout.set(ui.ferr)
405 uifout.set(ui.ferr)
406
406
407 # get a formatter
407 # get a formatter
408 uiformatter = getattr(ui, 'formatter', None)
408 uiformatter = getattr(ui, 'formatter', None)
409 if uiformatter:
409 if uiformatter:
410 fm = uiformatter(b'perf', opts)
410 fm = uiformatter(b'perf', opts)
411 else:
411 else:
412 # for "historical portability":
412 # for "historical portability":
413 # define formatter locally, because ui.formatter has been
413 # define formatter locally, because ui.formatter has been
414 # available since 2.2 (or ae5f92e154d3)
414 # available since 2.2 (or ae5f92e154d3)
415 from mercurial import node
415 from mercurial import node
416
416
417 class defaultformatter:
417 class defaultformatter:
418 """Minimized composition of baseformatter and plainformatter"""
418 """Minimized composition of baseformatter and plainformatter"""
419
419
420 def __init__(self, ui, topic, opts):
420 def __init__(self, ui, topic, opts):
421 self._ui = ui
421 self._ui = ui
422 if ui.debugflag:
422 if ui.debugflag:
423 self.hexfunc = node.hex
423 self.hexfunc = node.hex
424 else:
424 else:
425 self.hexfunc = node.short
425 self.hexfunc = node.short
426
426
427 def __nonzero__(self):
427 def __nonzero__(self):
428 return False
428 return False
429
429
430 __bool__ = __nonzero__
430 __bool__ = __nonzero__
431
431
432 def startitem(self):
432 def startitem(self):
433 pass
433 pass
434
434
435 def data(self, **data):
435 def data(self, **data):
436 pass
436 pass
437
437
438 def write(self, fields, deftext, *fielddata, **opts):
438 def write(self, fields, deftext, *fielddata, **opts):
439 self._ui.write(deftext % fielddata, **opts)
439 self._ui.write(deftext % fielddata, **opts)
440
440
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
442 if cond:
442 if cond:
443 self._ui.write(deftext % fielddata, **opts)
443 self._ui.write(deftext % fielddata, **opts)
444
444
445 def plain(self, text, **opts):
445 def plain(self, text, **opts):
446 self._ui.write(text, **opts)
446 self._ui.write(text, **opts)
447
447
448 def end(self):
448 def end(self):
449 pass
449 pass
450
450
451 fm = defaultformatter(ui, b'perf', opts)
451 fm = defaultformatter(ui, b'perf', opts)
452
452
453 # stub function, runs code only once instead of in a loop
453 # stub function, runs code only once instead of in a loop
454 # experimental config: perf.stub
454 # experimental config: perf.stub
455 if ui.configbool(b"perf", b"stub", False):
455 if ui.configbool(b"perf", b"stub", False):
456 return functools.partial(stub_timer, fm), fm
456 return functools.partial(stub_timer, fm), fm
457
457
458 # experimental config: perf.all-timing
458 # experimental config: perf.all-timing
459 displayall = ui.configbool(b"perf", b"all-timing", True)
459 displayall = ui.configbool(b"perf", b"all-timing", True)
460
460
461 # experimental config: perf.run-limits
461 # experimental config: perf.run-limits
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
463 limits = []
463 limits = []
464 for item in limitspec:
464 for item in limitspec:
465 parts = item.split(b'-', 1)
465 parts = item.split(b'-', 1)
466 if len(parts) < 2:
466 if len(parts) < 2:
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
468 continue
468 continue
469 try:
469 try:
470 time_limit = float(_sysstr(parts[0]))
470 time_limit = float(_sysstr(parts[0]))
471 except ValueError as e:
471 except ValueError as e:
472 ui.warn(
472 ui.warn(
473 (
473 (
474 b'malformatted run limit entry, %s: %s\n'
474 b'malformatted run limit entry, %s: %s\n'
475 % (_bytestr(e), item)
475 % (_bytestr(e), item)
476 )
476 )
477 )
477 )
478 continue
478 continue
479 try:
479 try:
480 run_limit = int(_sysstr(parts[1]))
480 run_limit = int(_sysstr(parts[1]))
481 except ValueError as e:
481 except ValueError as e:
482 ui.warn(
482 ui.warn(
483 (
483 (
484 b'malformatted run limit entry, %s: %s\n'
484 b'malformatted run limit entry, %s: %s\n'
485 % (_bytestr(e), item)
485 % (_bytestr(e), item)
486 )
486 )
487 )
487 )
488 continue
488 continue
489 limits.append((time_limit, run_limit))
489 limits.append((time_limit, run_limit))
490 if not limits:
490 if not limits:
491 limits = DEFAULTLIMITS
491 limits = DEFAULTLIMITS
492
492
493 profiler = None
493 profiler = None
494 if profiling is not None:
494 if profiling is not None:
495 if ui.configbool(b"perf", b"profile-benchmark", False):
495 if ui.configbool(b"perf", b"profile-benchmark", False):
496 profiler = profiling.profile(ui)
496 profiler = profiling.profile(ui)
497
497
498 prerun = getint(ui, b"perf", b"pre-run", 0)
498 prerun = getint(ui, b"perf", b"pre-run", 0)
499 t = functools.partial(
499 t = functools.partial(
500 _timer,
500 _timer,
501 fm,
501 fm,
502 displayall=displayall,
502 displayall=displayall,
503 limits=limits,
503 limits=limits,
504 prerun=prerun,
504 prerun=prerun,
505 profiler=profiler,
505 profiler=profiler,
506 )
506 )
507 return t, fm
507 return t, fm
508
508
509
509
510 def stub_timer(fm, func, setup=None, title=None):
510 def stub_timer(fm, func, setup=None, title=None):
511 if setup is not None:
511 if setup is not None:
512 setup()
512 setup()
513 func()
513 func()
514
514
515
515
516 @contextlib.contextmanager
516 @contextlib.contextmanager
517 def timeone():
517 def timeone():
518 r = []
518 r = []
519 ostart = os.times()
519 ostart = os.times()
520 cstart = util.timer()
520 cstart = util.timer()
521 yield r
521 yield r
522 cstop = util.timer()
522 cstop = util.timer()
523 ostop = os.times()
523 ostop = os.times()
524 a, b = ostart, ostop
524 a, b = ostart, ostop
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
526
526
527
527
528 # list of stop condition (elapsed time, minimal run count)
528 # list of stop condition (elapsed time, minimal run count)
529 DEFAULTLIMITS = (
529 DEFAULTLIMITS = (
530 (3.0, 100),
530 (3.0, 100),
531 (10.0, 3),
531 (10.0, 3),
532 )
532 )
533
533
534
534
535 @contextlib.contextmanager
535 @contextlib.contextmanager
536 def noop_context():
536 def noop_context():
537 yield
537 yield
538
538
539
539
540 def _timer(
540 def _timer(
541 fm,
541 fm,
542 func,
542 func,
543 setup=None,
543 setup=None,
544 context=noop_context,
544 context=noop_context,
545 title=None,
545 title=None,
546 displayall=False,
546 displayall=False,
547 limits=DEFAULTLIMITS,
547 limits=DEFAULTLIMITS,
548 prerun=0,
548 prerun=0,
549 profiler=None,
549 profiler=None,
550 ):
550 ):
551 gc.collect()
551 gc.collect()
552 results = []
552 results = []
553 begin = util.timer()
553 begin = util.timer()
554 count = 0
554 count = 0
555 if profiler is None:
555 if profiler is None:
556 profiler = NOOPCTX
556 profiler = NOOPCTX
557 for i in range(prerun):
557 for i in range(prerun):
558 if setup is not None:
558 if setup is not None:
559 setup()
559 setup()
560 with context():
560 with context():
561 func()
561 func()
562 keepgoing = True
562 keepgoing = True
563 while keepgoing:
563 while keepgoing:
564 if setup is not None:
564 if setup is not None:
565 setup()
565 setup()
566 with context():
566 with context():
567 with profiler:
567 with profiler:
568 with timeone() as item:
568 with timeone() as item:
569 r = func()
569 r = func()
570 profiler = NOOPCTX
570 profiler = NOOPCTX
571 count += 1
571 count += 1
572 results.append(item[0])
572 results.append(item[0])
573 cstop = util.timer()
573 cstop = util.timer()
574 # Look for a stop condition.
574 # Look for a stop condition.
575 elapsed = cstop - begin
575 elapsed = cstop - begin
576 for t, mincount in limits:
576 for t, mincount in limits:
577 if elapsed >= t and count >= mincount:
577 if elapsed >= t and count >= mincount:
578 keepgoing = False
578 keepgoing = False
579 break
579 break
580
580
581 formatone(fm, results, title=title, result=r, displayall=displayall)
581 formatone(fm, results, title=title, result=r, displayall=displayall)
582
582
583
583
584 def formatone(fm, timings, title=None, result=None, displayall=False):
584 def formatone(fm, timings, title=None, result=None, displayall=False):
585 count = len(timings)
585 count = len(timings)
586
586
587 fm.startitem()
587 fm.startitem()
588
588
589 if title:
589 if title:
590 fm.write(b'title', b'! %s\n', title)
590 fm.write(b'title', b'! %s\n', title)
591 if result:
591 if result:
592 fm.write(b'result', b'! result: %s\n', result)
592 fm.write(b'result', b'! result: %s\n', result)
593
593
594 def display(role, entry):
594 def display(role, entry):
595 prefix = b''
595 prefix = b''
596 if role != b'best':
596 if role != b'best':
597 prefix = b'%s.' % role
597 prefix = b'%s.' % role
598 fm.plain(b'!')
598 fm.plain(b'!')
599 fm.write(prefix + b'wall', b' wall %f', entry[0])
599 fm.write(prefix + b'wall', b' wall %f', entry[0])
600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
601 fm.write(prefix + b'user', b' user %f', entry[1])
601 fm.write(prefix + b'user', b' user %f', entry[1])
602 fm.write(prefix + b'sys', b' sys %f', entry[2])
602 fm.write(prefix + b'sys', b' sys %f', entry[2])
603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
604 fm.plain(b'\n')
604 fm.plain(b'\n')
605
605
606 timings.sort()
606 timings.sort()
607 min_val = timings[0]
607 min_val = timings[0]
608 display(b'best', min_val)
608 display(b'best', min_val)
609 if displayall:
609 if displayall:
610 max_val = timings[-1]
610 max_val = timings[-1]
611 display(b'max', max_val)
611 display(b'max', max_val)
612 avg = tuple([sum(x) / count for x in zip(*timings)])
612 avg = tuple([sum(x) / count for x in zip(*timings)])
613 display(b'avg', avg)
613 display(b'avg', avg)
614 median = timings[len(timings) // 2]
614 median = timings[len(timings) // 2]
615 display(b'median', median)
615 display(b'median', median)
616
616
617
617
618 # utilities for historical portability
618 # utilities for historical portability
619
619
620
620
621 def getint(ui, section, name, default):
621 def getint(ui, section, name, default):
622 # for "historical portability":
622 # for "historical portability":
623 # ui.configint has been available since 1.9 (or fa2b596db182)
623 # ui.configint has been available since 1.9 (or fa2b596db182)
624 v = ui.config(section, name, None)
624 v = ui.config(section, name, None)
625 if v is None:
625 if v is None:
626 return default
626 return default
627 try:
627 try:
628 return int(v)
628 return int(v)
629 except ValueError:
629 except ValueError:
630 raise error.ConfigError(
630 raise error.ConfigError(
631 b"%s.%s is not an integer ('%s')" % (section, name, v)
631 b"%s.%s is not an integer ('%s')" % (section, name, v)
632 )
632 )
633
633
634
634
635 def safeattrsetter(obj, name, ignoremissing=False):
635 def safeattrsetter(obj, name, ignoremissing=False):
636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
637
637
638 This function is aborted, if 'obj' doesn't have 'name' attribute
638 This function is aborted, if 'obj' doesn't have 'name' attribute
639 at runtime. This avoids overlooking removal of an attribute, which
639 at runtime. This avoids overlooking removal of an attribute, which
640 breaks assumption of performance measurement, in the future.
640 breaks assumption of performance measurement, in the future.
641
641
642 This function returns the object to (1) assign a new value, and
642 This function returns the object to (1) assign a new value, and
643 (2) restore an original value to the attribute.
643 (2) restore an original value to the attribute.
644
644
645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
646 abortion, and this function returns None. This is useful to
646 abortion, and this function returns None. This is useful to
647 examine an attribute, which isn't ensured in all Mercurial
647 examine an attribute, which isn't ensured in all Mercurial
648 versions.
648 versions.
649 """
649 """
650 if not util.safehasattr(obj, name):
650 if not util.safehasattr(obj, name):
651 if ignoremissing:
651 if ignoremissing:
652 return None
652 return None
653 raise error.Abort(
653 raise error.Abort(
654 (
654 (
655 b"missing attribute %s of %s might break assumption"
655 b"missing attribute %s of %s might break assumption"
656 b" of performance measurement"
656 b" of performance measurement"
657 )
657 )
658 % (name, obj)
658 % (name, obj)
659 )
659 )
660
660
661 origvalue = getattr(obj, _sysstr(name))
661 origvalue = getattr(obj, _sysstr(name))
662
662
663 class attrutil:
663 class attrutil:
664 def set(self, newvalue):
664 def set(self, newvalue):
665 setattr(obj, _sysstr(name), newvalue)
665 setattr(obj, _sysstr(name), newvalue)
666
666
667 def restore(self):
667 def restore(self):
668 setattr(obj, _sysstr(name), origvalue)
668 setattr(obj, _sysstr(name), origvalue)
669
669
670 return attrutil()
670 return attrutil()
671
671
672
672
673 # utilities to examine each internal API changes
673 # utilities to examine each internal API changes
674
674
675
675
676 def getbranchmapsubsettable():
676 def getbranchmapsubsettable():
677 # for "historical portability":
677 # for "historical portability":
678 # subsettable is defined in:
678 # subsettable is defined in:
679 # - branchmap since 2.9 (or 175c6fd8cacc)
679 # - branchmap since 2.9 (or 175c6fd8cacc)
680 # - repoview since 2.5 (or 59a9f18d4587)
680 # - repoview since 2.5 (or 59a9f18d4587)
681 # - repoviewutil since 5.0
681 # - repoviewutil since 5.0
682 for mod in (branchmap, repoview, repoviewutil):
682 for mod in (branchmap, repoview, repoviewutil):
683 subsettable = getattr(mod, 'subsettable', None)
683 subsettable = getattr(mod, 'subsettable', None)
684 if subsettable:
684 if subsettable:
685 return subsettable
685 return subsettable
686
686
687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
688 # branchmap and repoview modules exist, but subsettable attribute
688 # branchmap and repoview modules exist, but subsettable attribute
689 # doesn't)
689 # doesn't)
690 raise error.Abort(
690 raise error.Abort(
691 b"perfbranchmap not available with this Mercurial",
691 b"perfbranchmap not available with this Mercurial",
692 hint=b"use 2.5 or later",
692 hint=b"use 2.5 or later",
693 )
693 )
694
694
695
695
696 def getsvfs(repo):
696 def getsvfs(repo):
697 """Return appropriate object to access files under .hg/store"""
697 """Return appropriate object to access files under .hg/store"""
698 # for "historical portability":
698 # for "historical portability":
699 # repo.svfs has been available since 2.3 (or 7034365089bf)
699 # repo.svfs has been available since 2.3 (or 7034365089bf)
700 svfs = getattr(repo, 'svfs', None)
700 svfs = getattr(repo, 'svfs', None)
701 if svfs:
701 if svfs:
702 return svfs
702 return svfs
703 else:
703 else:
704 return getattr(repo, 'sopener')
704 return getattr(repo, 'sopener')
705
705
706
706
707 def getvfs(repo):
707 def getvfs(repo):
708 """Return appropriate object to access files under .hg"""
708 """Return appropriate object to access files under .hg"""
709 # for "historical portability":
709 # for "historical portability":
710 # repo.vfs has been available since 2.3 (or 7034365089bf)
710 # repo.vfs has been available since 2.3 (or 7034365089bf)
711 vfs = getattr(repo, 'vfs', None)
711 vfs = getattr(repo, 'vfs', None)
712 if vfs:
712 if vfs:
713 return vfs
713 return vfs
714 else:
714 else:
715 return getattr(repo, 'opener')
715 return getattr(repo, 'opener')
716
716
717
717
718 def repocleartagscachefunc(repo):
718 def repocleartagscachefunc(repo):
719 """Return the function to clear tags cache according to repo internal API"""
719 """Return the function to clear tags cache according to repo internal API"""
720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
721 # in this case, setattr(repo, '_tagscache', None) or so isn't
721 # in this case, setattr(repo, '_tagscache', None) or so isn't
722 # correct way to clear tags cache, because existing code paths
722 # correct way to clear tags cache, because existing code paths
723 # expect _tagscache to be a structured object.
723 # expect _tagscache to be a structured object.
724 def clearcache():
724 def clearcache():
725 # _tagscache has been filteredpropertycache since 2.5 (or
725 # _tagscache has been filteredpropertycache since 2.5 (or
726 # 98c867ac1330), and delattr() can't work in such case
726 # 98c867ac1330), and delattr() can't work in such case
727 if '_tagscache' in vars(repo):
727 if '_tagscache' in vars(repo):
728 del repo.__dict__['_tagscache']
728 del repo.__dict__['_tagscache']
729
729
730 return clearcache
730 return clearcache
731
731
732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
733 if repotags: # since 1.4 (or 5614a628d173)
733 if repotags: # since 1.4 (or 5614a628d173)
734 return lambda: repotags.set(None)
734 return lambda: repotags.set(None)
735
735
736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
737 if repotagscache: # since 0.6 (or d7df759d0e97)
737 if repotagscache: # since 0.6 (or d7df759d0e97)
738 return lambda: repotagscache.set(None)
738 return lambda: repotagscache.set(None)
739
739
740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
741 # this point, but it isn't so problematic, because:
741 # this point, but it isn't so problematic, because:
742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
743 # in perftags() causes failure soon
743 # in perftags() causes failure soon
744 # - perf.py itself has been available since 1.1 (or eb240755386d)
744 # - perf.py itself has been available since 1.1 (or eb240755386d)
745 raise error.Abort(b"tags API of this hg command is unknown")
745 raise error.Abort(b"tags API of this hg command is unknown")
746
746
747
747
748 # utilities to clear cache
748 # utilities to clear cache
749
749
750
750
751 def clearfilecache(obj, attrname):
751 def clearfilecache(obj, attrname):
752 unfiltered = getattr(obj, 'unfiltered', None)
752 unfiltered = getattr(obj, 'unfiltered', None)
753 if unfiltered is not None:
753 if unfiltered is not None:
754 obj = obj.unfiltered()
754 obj = obj.unfiltered()
755 if attrname in vars(obj):
755 if attrname in vars(obj):
756 delattr(obj, attrname)
756 delattr(obj, attrname)
757 obj._filecache.pop(attrname, None)
757 obj._filecache.pop(attrname, None)
758
758
759
759
760 def clearchangelog(repo):
760 def clearchangelog(repo):
761 if repo is not repo.unfiltered():
761 if repo is not repo.unfiltered():
762 object.__setattr__(repo, '_clcachekey', None)
762 object.__setattr__(repo, '_clcachekey', None)
763 object.__setattr__(repo, '_clcache', None)
763 object.__setattr__(repo, '_clcache', None)
764 clearfilecache(repo.unfiltered(), 'changelog')
764 clearfilecache(repo.unfiltered(), 'changelog')
765
765
766
766
767 # perf commands
767 # perf commands
768
768
769
769
770 @command(b'perf::walk|perfwalk', formatteropts)
770 @command(b'perf::walk|perfwalk', formatteropts)
771 def perfwalk(ui, repo, *pats, **opts):
771 def perfwalk(ui, repo, *pats, **opts):
772 opts = _byteskwargs(opts)
772 opts = _byteskwargs(opts)
773 timer, fm = gettimer(ui, opts)
773 timer, fm = gettimer(ui, opts)
774 m = scmutil.match(repo[None], pats, {})
774 m = scmutil.match(repo[None], pats, {})
775 timer(
775 timer(
776 lambda: len(
776 lambda: len(
777 list(
777 list(
778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
779 )
779 )
780 )
780 )
781 )
781 )
782 fm.end()
782 fm.end()
783
783
784
784
785 @command(b'perf::annotate|perfannotate', formatteropts)
785 @command(b'perf::annotate|perfannotate', formatteropts)
786 def perfannotate(ui, repo, f, **opts):
786 def perfannotate(ui, repo, f, **opts):
787 opts = _byteskwargs(opts)
787 opts = _byteskwargs(opts)
788 timer, fm = gettimer(ui, opts)
788 timer, fm = gettimer(ui, opts)
789 fc = repo[b'.'][f]
789 fc = repo[b'.'][f]
790 timer(lambda: len(fc.annotate(True)))
790 timer(lambda: len(fc.annotate(True)))
791 fm.end()
791 fm.end()
792
792
793
793
794 @command(
794 @command(
795 b'perf::status|perfstatus',
795 b'perf::status|perfstatus',
796 [
796 [
797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
799 ]
799 ]
800 + formatteropts,
800 + formatteropts,
801 )
801 )
802 def perfstatus(ui, repo, **opts):
802 def perfstatus(ui, repo, **opts):
803 """benchmark the performance of a single status call
803 """benchmark the performance of a single status call
804
804
805 The repository data are preserved between each call.
805 The repository data are preserved between each call.
806
806
807 By default, only the status of the tracked file are requested. If
807 By default, only the status of the tracked file are requested. If
808 `--unknown` is passed, the "unknown" files are also tracked.
808 `--unknown` is passed, the "unknown" files are also tracked.
809 """
809 """
810 opts = _byteskwargs(opts)
810 opts = _byteskwargs(opts)
811 # m = match.always(repo.root, repo.getcwd())
811 # m = match.always(repo.root, repo.getcwd())
812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
813 # False))))
813 # False))))
814 timer, fm = gettimer(ui, opts)
814 timer, fm = gettimer(ui, opts)
815 if opts[b'dirstate']:
815 if opts[b'dirstate']:
816 dirstate = repo.dirstate
816 dirstate = repo.dirstate
817 m = scmutil.matchall(repo)
817 m = scmutil.matchall(repo)
818 unknown = opts[b'unknown']
818 unknown = opts[b'unknown']
819
819
820 def status_dirstate():
820 def status_dirstate():
821 s = dirstate.status(
821 s = dirstate.status(
822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
823 )
823 )
824 sum(map(bool, s))
824 sum(map(bool, s))
825
825
826 if util.safehasattr(dirstate, 'running_status'):
826 if util.safehasattr(dirstate, 'running_status'):
827 with dirstate.running_status(repo):
827 with dirstate.running_status(repo):
828 timer(status_dirstate)
828 timer(status_dirstate)
829 dirstate.invalidate()
829 dirstate.invalidate()
830 else:
830 else:
831 timer(status_dirstate)
831 timer(status_dirstate)
832 else:
832 else:
833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
834 fm.end()
834 fm.end()
835
835
836
836
837 @command(b'perf::addremove|perfaddremove', formatteropts)
837 @command(b'perf::addremove|perfaddremove', formatteropts)
838 def perfaddremove(ui, repo, **opts):
838 def perfaddremove(ui, repo, **opts):
839 opts = _byteskwargs(opts)
839 opts = _byteskwargs(opts)
840 timer, fm = gettimer(ui, opts)
840 timer, fm = gettimer(ui, opts)
841 try:
841 try:
842 oldquiet = repo.ui.quiet
842 oldquiet = repo.ui.quiet
843 repo.ui.quiet = True
843 repo.ui.quiet = True
844 matcher = scmutil.match(repo[None])
844 matcher = scmutil.match(repo[None])
845 opts[b'dry_run'] = True
845 opts[b'dry_run'] = True
846 if 'uipathfn' in getargspec(scmutil.addremove).args:
846 if 'uipathfn' in getargspec(scmutil.addremove).args:
847 uipathfn = scmutil.getuipathfn(repo)
847 uipathfn = scmutil.getuipathfn(repo)
848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
849 else:
849 else:
850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
851 finally:
851 finally:
852 repo.ui.quiet = oldquiet
852 repo.ui.quiet = oldquiet
853 fm.end()
853 fm.end()
854
854
855
855
856 def clearcaches(cl):
856 def clearcaches(cl):
857 # behave somewhat consistently across internal API changes
857 # behave somewhat consistently across internal API changes
858 if util.safehasattr(cl, b'clearcaches'):
858 if util.safehasattr(cl, b'clearcaches'):
859 cl.clearcaches()
859 cl.clearcaches()
860 elif util.safehasattr(cl, b'_nodecache'):
860 elif util.safehasattr(cl, b'_nodecache'):
861 # <= hg-5.2
861 # <= hg-5.2
862 from mercurial.node import nullid, nullrev
862 from mercurial.node import nullid, nullrev
863
863
864 cl._nodecache = {nullid: nullrev}
864 cl._nodecache = {nullid: nullrev}
865 cl._nodepos = None
865 cl._nodepos = None
866
866
867
867
868 @command(b'perf::heads|perfheads', formatteropts)
868 @command(b'perf::heads|perfheads', formatteropts)
869 def perfheads(ui, repo, **opts):
869 def perfheads(ui, repo, **opts):
870 """benchmark the computation of a changelog heads"""
870 """benchmark the computation of a changelog heads"""
871 opts = _byteskwargs(opts)
871 opts = _byteskwargs(opts)
872 timer, fm = gettimer(ui, opts)
872 timer, fm = gettimer(ui, opts)
873 cl = repo.changelog
873 cl = repo.changelog
874
874
875 def s():
875 def s():
876 clearcaches(cl)
876 clearcaches(cl)
877
877
878 def d():
878 def d():
879 len(cl.headrevs())
879 len(cl.headrevs())
880
880
881 timer(d, setup=s)
881 timer(d, setup=s)
882 fm.end()
882 fm.end()
883
883
884
884
885 def _default_clear_on_disk_tags_cache(repo):
886 from mercurial import tags
887
888 repo.cachevfs.tryunlink(tags._filename(repo))
889
890
891 def _default_clear_on_disk_tags_fnodes_cache(repo):
892 from mercurial import tags
893
894 repo.cachevfs.tryunlink(tags._fnodescachefile)
895
896
897 def _default_forget_fnodes(repo, revs):
898 """function used by the perf extension to prune some entries from the
899 fnodes cache"""
900 from mercurial import tags
901
902 missing_1 = b'\xff' * 4
903 missing_2 = b'\xff' * 20
904 cache = tags.hgtagsfnodescache(repo.unfiltered())
905 for r in revs:
906 cache._writeentry(r * tags._fnodesrecsize, missing_1, missing_2)
907 cache.write()
908
909
885 @command(
910 @command(
886 b'perf::tags|perftags',
911 b'perf::tags|perftags',
887 formatteropts
912 formatteropts
888 + [
913 + [
889 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
914 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
915 (
916 b'',
917 b'clear-on-disk-cache',
918 False,
919 b'clear on disk tags cache (DESTRUCTIVE)',
920 ),
921 (
922 b'',
923 b'clear-fnode-cache-all',
924 False,
925 b'clear on disk file node cache (DESTRUCTIVE),',
926 ),
927 (
928 b'',
929 b'clear-fnode-cache-rev',
930 [],
931 b'clear on disk file node cache (DESTRUCTIVE),',
932 b'REVS',
933 ),
934 (
935 b'',
936 b'update-last',
937 b'',
938 b'simulate an update over the last N revisions (DESTRUCTIVE),',
939 b'N',
940 ),
890 ],
941 ],
891 )
942 )
892 def perftags(ui, repo, **opts):
943 def perftags(ui, repo, **opts):
944 """Benchmark tags retrieval in various situation
945
946 The option marked as (DESTRUCTIVE) will alter the on-disk cache, possibly
947 altering performance after the command was run. However, it does not
948 destroy any stored data.
949 """
950 from mercurial import tags
951
893 opts = _byteskwargs(opts)
952 opts = _byteskwargs(opts)
894 timer, fm = gettimer(ui, opts)
953 timer, fm = gettimer(ui, opts)
895 repocleartagscache = repocleartagscachefunc(repo)
954 repocleartagscache = repocleartagscachefunc(repo)
896 clearrevlogs = opts[b'clear_revlogs']
955 clearrevlogs = opts[b'clear_revlogs']
956 clear_disk = opts[b'clear_on_disk_cache']
957 clear_fnode = opts[b'clear_fnode_cache_all']
958
959 clear_fnode_revs = opts[b'clear_fnode_cache_rev']
960 update_last_str = opts[b'update_last']
961 update_last = None
962 if update_last_str:
963 try:
964 update_last = int(update_last_str)
965 except ValueError:
966 msg = b'could not parse value for update-last: "%s"'
967 msg %= update_last_str
968 hint = b'value should be an integer'
969 raise error.Abort(msg, hint=hint)
970
971 clear_disk_fn = getattr(
972 tags,
973 "clear_cache_on_disk",
974 _default_clear_on_disk_tags_cache,
975 )
976 clear_fnodes_fn = getattr(
977 tags,
978 "clear_cache_fnodes",
979 _default_clear_on_disk_tags_fnodes_cache,
980 )
981 clear_fnodes_rev_fn = getattr(
982 tags,
983 "forget_fnodes",
984 _default_forget_fnodes,
985 )
986
987 clear_revs = []
988 if clear_fnode_revs:
989 clear_revs.extends(scmutil.revrange(repo, clear_fnode_revs))
990
991 if update_last:
992 revset = b'last(all(), %d)' % update_last
993 last_revs = repo.unfiltered().revs(revset)
994 clear_revs.extend(last_revs)
995
996 from mercurial import repoview
997
998 rev_filter = {(b'experimental', b'extra-filter-revs'): revset}
999 with repo.ui.configoverride(rev_filter, source=b"perf"):
1000 filter_id = repoview.extrafilter(repo.ui)
1001
1002 filter_name = b'%s%%%s' % (repo.filtername, filter_id)
1003 pre_repo = repo.filtered(filter_name)
1004 pre_repo.tags() # warm the cache
1005 old_tags_path = repo.cachevfs.join(tags._filename(pre_repo))
1006 new_tags_path = repo.cachevfs.join(tags._filename(repo))
1007
1008 clear_revs = sorted(set(clear_revs))
897
1009
898 def s():
1010 def s():
1011 if update_last:
1012 util.copyfile(old_tags_path, new_tags_path)
899 if clearrevlogs:
1013 if clearrevlogs:
900 clearchangelog(repo)
1014 clearchangelog(repo)
901 clearfilecache(repo.unfiltered(), 'manifest')
1015 clearfilecache(repo.unfiltered(), 'manifest')
1016 if clear_disk:
1017 clear_disk_fn(repo)
1018 if clear_fnode:
1019 clear_fnodes_fn(repo)
1020 elif clear_revs:
1021 clear_fnodes_rev_fn(repo, clear_revs)
902 repocleartagscache()
1022 repocleartagscache()
903
1023
904 def t():
1024 def t():
905 len(repo.tags())
1025 len(repo.tags())
906
1026
907 timer(t, setup=s)
1027 timer(t, setup=s)
908 fm.end()
1028 fm.end()
909
1029
910
1030
911 @command(b'perf::ancestors|perfancestors', formatteropts)
1031 @command(b'perf::ancestors|perfancestors', formatteropts)
912 def perfancestors(ui, repo, **opts):
1032 def perfancestors(ui, repo, **opts):
913 opts = _byteskwargs(opts)
1033 opts = _byteskwargs(opts)
914 timer, fm = gettimer(ui, opts)
1034 timer, fm = gettimer(ui, opts)
915 heads = repo.changelog.headrevs()
1035 heads = repo.changelog.headrevs()
916
1036
917 def d():
1037 def d():
918 for a in repo.changelog.ancestors(heads):
1038 for a in repo.changelog.ancestors(heads):
919 pass
1039 pass
920
1040
921 timer(d)
1041 timer(d)
922 fm.end()
1042 fm.end()
923
1043
924
1044
925 @command(b'perf::ancestorset|perfancestorset', formatteropts)
1045 @command(b'perf::ancestorset|perfancestorset', formatteropts)
926 def perfancestorset(ui, repo, revset, **opts):
1046 def perfancestorset(ui, repo, revset, **opts):
927 opts = _byteskwargs(opts)
1047 opts = _byteskwargs(opts)
928 timer, fm = gettimer(ui, opts)
1048 timer, fm = gettimer(ui, opts)
929 revs = repo.revs(revset)
1049 revs = repo.revs(revset)
930 heads = repo.changelog.headrevs()
1050 heads = repo.changelog.headrevs()
931
1051
932 def d():
1052 def d():
933 s = repo.changelog.ancestors(heads)
1053 s = repo.changelog.ancestors(heads)
934 for rev in revs:
1054 for rev in revs:
935 rev in s
1055 rev in s
936
1056
937 timer(d)
1057 timer(d)
938 fm.end()
1058 fm.end()
939
1059
940
1060
941 @command(
1061 @command(
942 b'perf::delta-find',
1062 b'perf::delta-find',
943 revlogopts + formatteropts,
1063 revlogopts + formatteropts,
944 b'-c|-m|FILE REV',
1064 b'-c|-m|FILE REV',
945 )
1065 )
946 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
1066 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
947 """benchmark the process of finding a valid delta for a revlog revision
1067 """benchmark the process of finding a valid delta for a revlog revision
948
1068
949 When a revlog receives a new revision (e.g. from a commit, or from an
1069 When a revlog receives a new revision (e.g. from a commit, or from an
950 incoming bundle), it searches for a suitable delta-base to produce a delta.
1070 incoming bundle), it searches for a suitable delta-base to produce a delta.
951 This perf command measures how much time we spend in this process. It
1071 This perf command measures how much time we spend in this process. It
952 operates on an already stored revision.
1072 operates on an already stored revision.
953
1073
954 See `hg help debug-delta-find` for another related command.
1074 See `hg help debug-delta-find` for another related command.
955 """
1075 """
956 from mercurial import revlogutils
1076 from mercurial import revlogutils
957 import mercurial.revlogutils.deltas as deltautil
1077 import mercurial.revlogutils.deltas as deltautil
958
1078
959 opts = _byteskwargs(opts)
1079 opts = _byteskwargs(opts)
960 if arg_2 is None:
1080 if arg_2 is None:
961 file_ = None
1081 file_ = None
962 rev = arg_1
1082 rev = arg_1
963 else:
1083 else:
964 file_ = arg_1
1084 file_ = arg_1
965 rev = arg_2
1085 rev = arg_2
966
1086
967 repo = repo.unfiltered()
1087 repo = repo.unfiltered()
968
1088
969 timer, fm = gettimer(ui, opts)
1089 timer, fm = gettimer(ui, opts)
970
1090
971 rev = int(rev)
1091 rev = int(rev)
972
1092
973 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
1093 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
974
1094
975 deltacomputer = deltautil.deltacomputer(revlog)
1095 deltacomputer = deltautil.deltacomputer(revlog)
976
1096
977 node = revlog.node(rev)
1097 node = revlog.node(rev)
978 p1r, p2r = revlog.parentrevs(rev)
1098 p1r, p2r = revlog.parentrevs(rev)
979 p1 = revlog.node(p1r)
1099 p1 = revlog.node(p1r)
980 p2 = revlog.node(p2r)
1100 p2 = revlog.node(p2r)
981 full_text = revlog.revision(rev)
1101 full_text = revlog.revision(rev)
982 textlen = len(full_text)
1102 textlen = len(full_text)
983 cachedelta = None
1103 cachedelta = None
984 flags = revlog.flags(rev)
1104 flags = revlog.flags(rev)
985
1105
986 revinfo = revlogutils.revisioninfo(
1106 revinfo = revlogutils.revisioninfo(
987 node,
1107 node,
988 p1,
1108 p1,
989 p2,
1109 p2,
990 [full_text], # btext
1110 [full_text], # btext
991 textlen,
1111 textlen,
992 cachedelta,
1112 cachedelta,
993 flags,
1113 flags,
994 )
1114 )
995
1115
996 # Note: we should probably purge the potential caches (like the full
1116 # Note: we should probably purge the potential caches (like the full
997 # manifest cache) between runs.
1117 # manifest cache) between runs.
998 def find_one():
1118 def find_one():
999 with revlog._datafp() as fh:
1119 with revlog._datafp() as fh:
1000 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1120 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1001
1121
1002 timer(find_one)
1122 timer(find_one)
1003 fm.end()
1123 fm.end()
1004
1124
1005
1125
1006 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1126 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1007 def perfdiscovery(ui, repo, path, **opts):
1127 def perfdiscovery(ui, repo, path, **opts):
1008 """benchmark discovery between local repo and the peer at given path"""
1128 """benchmark discovery between local repo and the peer at given path"""
1009 repos = [repo, None]
1129 repos = [repo, None]
1010 timer, fm = gettimer(ui, opts)
1130 timer, fm = gettimer(ui, opts)
1011
1131
1012 try:
1132 try:
1013 from mercurial.utils.urlutil import get_unique_pull_path_obj
1133 from mercurial.utils.urlutil import get_unique_pull_path_obj
1014
1134
1015 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1135 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1016 except ImportError:
1136 except ImportError:
1017 try:
1137 try:
1018 from mercurial.utils.urlutil import get_unique_pull_path
1138 from mercurial.utils.urlutil import get_unique_pull_path
1019
1139
1020 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1140 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1021 except ImportError:
1141 except ImportError:
1022 path = ui.expandpath(path)
1142 path = ui.expandpath(path)
1023
1143
1024 def s():
1144 def s():
1025 repos[1] = hg.peer(ui, opts, path)
1145 repos[1] = hg.peer(ui, opts, path)
1026
1146
1027 def d():
1147 def d():
1028 setdiscovery.findcommonheads(ui, *repos)
1148 setdiscovery.findcommonheads(ui, *repos)
1029
1149
1030 timer(d, setup=s)
1150 timer(d, setup=s)
1031 fm.end()
1151 fm.end()
1032
1152
1033
1153
1034 @command(
1154 @command(
1035 b'perf::bookmarks|perfbookmarks',
1155 b'perf::bookmarks|perfbookmarks',
1036 formatteropts
1156 formatteropts
1037 + [
1157 + [
1038 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1158 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1039 ],
1159 ],
1040 )
1160 )
1041 def perfbookmarks(ui, repo, **opts):
1161 def perfbookmarks(ui, repo, **opts):
1042 """benchmark parsing bookmarks from disk to memory"""
1162 """benchmark parsing bookmarks from disk to memory"""
1043 opts = _byteskwargs(opts)
1163 opts = _byteskwargs(opts)
1044 timer, fm = gettimer(ui, opts)
1164 timer, fm = gettimer(ui, opts)
1045
1165
1046 clearrevlogs = opts[b'clear_revlogs']
1166 clearrevlogs = opts[b'clear_revlogs']
1047
1167
1048 def s():
1168 def s():
1049 if clearrevlogs:
1169 if clearrevlogs:
1050 clearchangelog(repo)
1170 clearchangelog(repo)
1051 clearfilecache(repo, b'_bookmarks')
1171 clearfilecache(repo, b'_bookmarks')
1052
1172
1053 def d():
1173 def d():
1054 repo._bookmarks
1174 repo._bookmarks
1055
1175
1056 timer(d, setup=s)
1176 timer(d, setup=s)
1057 fm.end()
1177 fm.end()
1058
1178
1059
1179
1060 @command(
1180 @command(
1061 b'perf::bundle',
1181 b'perf::bundle',
1062 [
1182 [
1063 (
1183 (
1064 b'r',
1184 b'r',
1065 b'rev',
1185 b'rev',
1066 [],
1186 [],
1067 b'changesets to bundle',
1187 b'changesets to bundle',
1068 b'REV',
1188 b'REV',
1069 ),
1189 ),
1070 (
1190 (
1071 b't',
1191 b't',
1072 b'type',
1192 b'type',
1073 b'none',
1193 b'none',
1074 b'bundlespec to use (see `hg help bundlespec`)',
1194 b'bundlespec to use (see `hg help bundlespec`)',
1075 b'TYPE',
1195 b'TYPE',
1076 ),
1196 ),
1077 ]
1197 ]
1078 + formatteropts,
1198 + formatteropts,
1079 b'REVS',
1199 b'REVS',
1080 )
1200 )
1081 def perfbundle(ui, repo, *revs, **opts):
1201 def perfbundle(ui, repo, *revs, **opts):
1082 """benchmark the creation of a bundle from a repository
1202 """benchmark the creation of a bundle from a repository
1083
1203
1084 For now, this only supports "none" compression.
1204 For now, this only supports "none" compression.
1085 """
1205 """
1086 try:
1206 try:
1087 from mercurial import bundlecaches
1207 from mercurial import bundlecaches
1088
1208
1089 parsebundlespec = bundlecaches.parsebundlespec
1209 parsebundlespec = bundlecaches.parsebundlespec
1090 except ImportError:
1210 except ImportError:
1091 from mercurial import exchange
1211 from mercurial import exchange
1092
1212
1093 parsebundlespec = exchange.parsebundlespec
1213 parsebundlespec = exchange.parsebundlespec
1094
1214
1095 from mercurial import discovery
1215 from mercurial import discovery
1096 from mercurial import bundle2
1216 from mercurial import bundle2
1097
1217
1098 opts = _byteskwargs(opts)
1218 opts = _byteskwargs(opts)
1099 timer, fm = gettimer(ui, opts)
1219 timer, fm = gettimer(ui, opts)
1100
1220
1101 cl = repo.changelog
1221 cl = repo.changelog
1102 revs = list(revs)
1222 revs = list(revs)
1103 revs.extend(opts.get(b'rev', ()))
1223 revs.extend(opts.get(b'rev', ()))
1104 revs = scmutil.revrange(repo, revs)
1224 revs = scmutil.revrange(repo, revs)
1105 if not revs:
1225 if not revs:
1106 raise error.Abort(b"not revision specified")
1226 raise error.Abort(b"not revision specified")
1107 # make it a consistent set (ie: without topological gaps)
1227 # make it a consistent set (ie: without topological gaps)
1108 old_len = len(revs)
1228 old_len = len(revs)
1109 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1229 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1110 if old_len != len(revs):
1230 if old_len != len(revs):
1111 new_count = len(revs) - old_len
1231 new_count = len(revs) - old_len
1112 msg = b"add %d new revisions to make it a consistent set\n"
1232 msg = b"add %d new revisions to make it a consistent set\n"
1113 ui.write_err(msg % new_count)
1233 ui.write_err(msg % new_count)
1114
1234
1115 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1235 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1116 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1236 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1117 outgoing = discovery.outgoing(repo, bases, targets)
1237 outgoing = discovery.outgoing(repo, bases, targets)
1118
1238
1119 bundle_spec = opts.get(b'type')
1239 bundle_spec = opts.get(b'type')
1120
1240
1121 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1241 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1122
1242
1123 cgversion = bundle_spec.params.get(b"cg.version")
1243 cgversion = bundle_spec.params.get(b"cg.version")
1124 if cgversion is None:
1244 if cgversion is None:
1125 if bundle_spec.version == b'v1':
1245 if bundle_spec.version == b'v1':
1126 cgversion = b'01'
1246 cgversion = b'01'
1127 if bundle_spec.version == b'v2':
1247 if bundle_spec.version == b'v2':
1128 cgversion = b'02'
1248 cgversion = b'02'
1129 if cgversion not in changegroup.supportedoutgoingversions(repo):
1249 if cgversion not in changegroup.supportedoutgoingversions(repo):
1130 err = b"repository does not support bundle version %s"
1250 err = b"repository does not support bundle version %s"
1131 raise error.Abort(err % cgversion)
1251 raise error.Abort(err % cgversion)
1132
1252
1133 if cgversion == b'01': # bundle1
1253 if cgversion == b'01': # bundle1
1134 bversion = b'HG10' + bundle_spec.wirecompression
1254 bversion = b'HG10' + bundle_spec.wirecompression
1135 bcompression = None
1255 bcompression = None
1136 elif cgversion in (b'02', b'03'):
1256 elif cgversion in (b'02', b'03'):
1137 bversion = b'HG20'
1257 bversion = b'HG20'
1138 bcompression = bundle_spec.wirecompression
1258 bcompression = bundle_spec.wirecompression
1139 else:
1259 else:
1140 err = b'perf::bundle: unexpected changegroup version %s'
1260 err = b'perf::bundle: unexpected changegroup version %s'
1141 raise error.ProgrammingError(err % cgversion)
1261 raise error.ProgrammingError(err % cgversion)
1142
1262
1143 if bcompression is None:
1263 if bcompression is None:
1144 bcompression = b'UN'
1264 bcompression = b'UN'
1145
1265
1146 if bcompression != b'UN':
1266 if bcompression != b'UN':
1147 err = b'perf::bundle: compression currently unsupported: %s'
1267 err = b'perf::bundle: compression currently unsupported: %s'
1148 raise error.ProgrammingError(err % bcompression)
1268 raise error.ProgrammingError(err % bcompression)
1149
1269
1150 def do_bundle():
1270 def do_bundle():
1151 bundle2.writenewbundle(
1271 bundle2.writenewbundle(
1152 ui,
1272 ui,
1153 repo,
1273 repo,
1154 b'perf::bundle',
1274 b'perf::bundle',
1155 os.devnull,
1275 os.devnull,
1156 bversion,
1276 bversion,
1157 outgoing,
1277 outgoing,
1158 bundle_spec.params,
1278 bundle_spec.params,
1159 )
1279 )
1160
1280
1161 timer(do_bundle)
1281 timer(do_bundle)
1162 fm.end()
1282 fm.end()
1163
1283
1164
1284
1165 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1285 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1166 def perfbundleread(ui, repo, bundlepath, **opts):
1286 def perfbundleread(ui, repo, bundlepath, **opts):
1167 """Benchmark reading of bundle files.
1287 """Benchmark reading of bundle files.
1168
1288
1169 This command is meant to isolate the I/O part of bundle reading as
1289 This command is meant to isolate the I/O part of bundle reading as
1170 much as possible.
1290 much as possible.
1171 """
1291 """
1172 from mercurial import (
1292 from mercurial import (
1173 bundle2,
1293 bundle2,
1174 exchange,
1294 exchange,
1175 streamclone,
1295 streamclone,
1176 )
1296 )
1177
1297
1178 opts = _byteskwargs(opts)
1298 opts = _byteskwargs(opts)
1179
1299
1180 def makebench(fn):
1300 def makebench(fn):
1181 def run():
1301 def run():
1182 with open(bundlepath, b'rb') as fh:
1302 with open(bundlepath, b'rb') as fh:
1183 bundle = exchange.readbundle(ui, fh, bundlepath)
1303 bundle = exchange.readbundle(ui, fh, bundlepath)
1184 fn(bundle)
1304 fn(bundle)
1185
1305
1186 return run
1306 return run
1187
1307
1188 def makereadnbytes(size):
1308 def makereadnbytes(size):
1189 def run():
1309 def run():
1190 with open(bundlepath, b'rb') as fh:
1310 with open(bundlepath, b'rb') as fh:
1191 bundle = exchange.readbundle(ui, fh, bundlepath)
1311 bundle = exchange.readbundle(ui, fh, bundlepath)
1192 while bundle.read(size):
1312 while bundle.read(size):
1193 pass
1313 pass
1194
1314
1195 return run
1315 return run
1196
1316
1197 def makestdioread(size):
1317 def makestdioread(size):
1198 def run():
1318 def run():
1199 with open(bundlepath, b'rb') as fh:
1319 with open(bundlepath, b'rb') as fh:
1200 while fh.read(size):
1320 while fh.read(size):
1201 pass
1321 pass
1202
1322
1203 return run
1323 return run
1204
1324
1205 # bundle1
1325 # bundle1
1206
1326
1207 def deltaiter(bundle):
1327 def deltaiter(bundle):
1208 for delta in bundle.deltaiter():
1328 for delta in bundle.deltaiter():
1209 pass
1329 pass
1210
1330
1211 def iterchunks(bundle):
1331 def iterchunks(bundle):
1212 for chunk in bundle.getchunks():
1332 for chunk in bundle.getchunks():
1213 pass
1333 pass
1214
1334
1215 # bundle2
1335 # bundle2
1216
1336
1217 def forwardchunks(bundle):
1337 def forwardchunks(bundle):
1218 for chunk in bundle._forwardchunks():
1338 for chunk in bundle._forwardchunks():
1219 pass
1339 pass
1220
1340
1221 def iterparts(bundle):
1341 def iterparts(bundle):
1222 for part in bundle.iterparts():
1342 for part in bundle.iterparts():
1223 pass
1343 pass
1224
1344
1225 def iterpartsseekable(bundle):
1345 def iterpartsseekable(bundle):
1226 for part in bundle.iterparts(seekable=True):
1346 for part in bundle.iterparts(seekable=True):
1227 pass
1347 pass
1228
1348
1229 def seek(bundle):
1349 def seek(bundle):
1230 for part in bundle.iterparts(seekable=True):
1350 for part in bundle.iterparts(seekable=True):
1231 part.seek(0, os.SEEK_END)
1351 part.seek(0, os.SEEK_END)
1232
1352
1233 def makepartreadnbytes(size):
1353 def makepartreadnbytes(size):
1234 def run():
1354 def run():
1235 with open(bundlepath, b'rb') as fh:
1355 with open(bundlepath, b'rb') as fh:
1236 bundle = exchange.readbundle(ui, fh, bundlepath)
1356 bundle = exchange.readbundle(ui, fh, bundlepath)
1237 for part in bundle.iterparts():
1357 for part in bundle.iterparts():
1238 while part.read(size):
1358 while part.read(size):
1239 pass
1359 pass
1240
1360
1241 return run
1361 return run
1242
1362
1243 benches = [
1363 benches = [
1244 (makestdioread(8192), b'read(8k)'),
1364 (makestdioread(8192), b'read(8k)'),
1245 (makestdioread(16384), b'read(16k)'),
1365 (makestdioread(16384), b'read(16k)'),
1246 (makestdioread(32768), b'read(32k)'),
1366 (makestdioread(32768), b'read(32k)'),
1247 (makestdioread(131072), b'read(128k)'),
1367 (makestdioread(131072), b'read(128k)'),
1248 ]
1368 ]
1249
1369
1250 with open(bundlepath, b'rb') as fh:
1370 with open(bundlepath, b'rb') as fh:
1251 bundle = exchange.readbundle(ui, fh, bundlepath)
1371 bundle = exchange.readbundle(ui, fh, bundlepath)
1252
1372
1253 if isinstance(bundle, changegroup.cg1unpacker):
1373 if isinstance(bundle, changegroup.cg1unpacker):
1254 benches.extend(
1374 benches.extend(
1255 [
1375 [
1256 (makebench(deltaiter), b'cg1 deltaiter()'),
1376 (makebench(deltaiter), b'cg1 deltaiter()'),
1257 (makebench(iterchunks), b'cg1 getchunks()'),
1377 (makebench(iterchunks), b'cg1 getchunks()'),
1258 (makereadnbytes(8192), b'cg1 read(8k)'),
1378 (makereadnbytes(8192), b'cg1 read(8k)'),
1259 (makereadnbytes(16384), b'cg1 read(16k)'),
1379 (makereadnbytes(16384), b'cg1 read(16k)'),
1260 (makereadnbytes(32768), b'cg1 read(32k)'),
1380 (makereadnbytes(32768), b'cg1 read(32k)'),
1261 (makereadnbytes(131072), b'cg1 read(128k)'),
1381 (makereadnbytes(131072), b'cg1 read(128k)'),
1262 ]
1382 ]
1263 )
1383 )
1264 elif isinstance(bundle, bundle2.unbundle20):
1384 elif isinstance(bundle, bundle2.unbundle20):
1265 benches.extend(
1385 benches.extend(
1266 [
1386 [
1267 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1387 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1268 (makebench(iterparts), b'bundle2 iterparts()'),
1388 (makebench(iterparts), b'bundle2 iterparts()'),
1269 (
1389 (
1270 makebench(iterpartsseekable),
1390 makebench(iterpartsseekable),
1271 b'bundle2 iterparts() seekable',
1391 b'bundle2 iterparts() seekable',
1272 ),
1392 ),
1273 (makebench(seek), b'bundle2 part seek()'),
1393 (makebench(seek), b'bundle2 part seek()'),
1274 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1394 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1275 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1395 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1276 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1396 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1277 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1397 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1278 ]
1398 ]
1279 )
1399 )
1280 elif isinstance(bundle, streamclone.streamcloneapplier):
1400 elif isinstance(bundle, streamclone.streamcloneapplier):
1281 raise error.Abort(b'stream clone bundles not supported')
1401 raise error.Abort(b'stream clone bundles not supported')
1282 else:
1402 else:
1283 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1403 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1284
1404
1285 for fn, title in benches:
1405 for fn, title in benches:
1286 timer, fm = gettimer(ui, opts)
1406 timer, fm = gettimer(ui, opts)
1287 timer(fn, title=title)
1407 timer(fn, title=title)
1288 fm.end()
1408 fm.end()
1289
1409
1290
1410
1291 @command(
1411 @command(
1292 b'perf::changegroupchangelog|perfchangegroupchangelog',
1412 b'perf::changegroupchangelog|perfchangegroupchangelog',
1293 formatteropts
1413 formatteropts
1294 + [
1414 + [
1295 (b'', b'cgversion', b'02', b'changegroup version'),
1415 (b'', b'cgversion', b'02', b'changegroup version'),
1296 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1416 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1297 ],
1417 ],
1298 )
1418 )
1299 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1419 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1300 """Benchmark producing a changelog group for a changegroup.
1420 """Benchmark producing a changelog group for a changegroup.
1301
1421
1302 This measures the time spent processing the changelog during a
1422 This measures the time spent processing the changelog during a
1303 bundle operation. This occurs during `hg bundle` and on a server
1423 bundle operation. This occurs during `hg bundle` and on a server
1304 processing a `getbundle` wire protocol request (handles clones
1424 processing a `getbundle` wire protocol request (handles clones
1305 and pull requests).
1425 and pull requests).
1306
1426
1307 By default, all revisions are added to the changegroup.
1427 By default, all revisions are added to the changegroup.
1308 """
1428 """
1309 opts = _byteskwargs(opts)
1429 opts = _byteskwargs(opts)
1310 cl = repo.changelog
1430 cl = repo.changelog
1311 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1431 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1312 bundler = changegroup.getbundler(cgversion, repo)
1432 bundler = changegroup.getbundler(cgversion, repo)
1313
1433
1314 def d():
1434 def d():
1315 state, chunks = bundler._generatechangelog(cl, nodes)
1435 state, chunks = bundler._generatechangelog(cl, nodes)
1316 for chunk in chunks:
1436 for chunk in chunks:
1317 pass
1437 pass
1318
1438
1319 timer, fm = gettimer(ui, opts)
1439 timer, fm = gettimer(ui, opts)
1320
1440
1321 # Terminal printing can interfere with timing. So disable it.
1441 # Terminal printing can interfere with timing. So disable it.
1322 with ui.configoverride({(b'progress', b'disable'): True}):
1442 with ui.configoverride({(b'progress', b'disable'): True}):
1323 timer(d)
1443 timer(d)
1324
1444
1325 fm.end()
1445 fm.end()
1326
1446
1327
1447
1328 @command(b'perf::dirs|perfdirs', formatteropts)
1448 @command(b'perf::dirs|perfdirs', formatteropts)
1329 def perfdirs(ui, repo, **opts):
1449 def perfdirs(ui, repo, **opts):
1330 opts = _byteskwargs(opts)
1450 opts = _byteskwargs(opts)
1331 timer, fm = gettimer(ui, opts)
1451 timer, fm = gettimer(ui, opts)
1332 dirstate = repo.dirstate
1452 dirstate = repo.dirstate
1333 b'a' in dirstate
1453 b'a' in dirstate
1334
1454
1335 def d():
1455 def d():
1336 dirstate.hasdir(b'a')
1456 dirstate.hasdir(b'a')
1337 try:
1457 try:
1338 del dirstate._map._dirs
1458 del dirstate._map._dirs
1339 except AttributeError:
1459 except AttributeError:
1340 pass
1460 pass
1341
1461
1342 timer(d)
1462 timer(d)
1343 fm.end()
1463 fm.end()
1344
1464
1345
1465
1346 @command(
1466 @command(
1347 b'perf::dirstate|perfdirstate',
1467 b'perf::dirstate|perfdirstate',
1348 [
1468 [
1349 (
1469 (
1350 b'',
1470 b'',
1351 b'iteration',
1471 b'iteration',
1352 None,
1472 None,
1353 b'benchmark a full iteration for the dirstate',
1473 b'benchmark a full iteration for the dirstate',
1354 ),
1474 ),
1355 (
1475 (
1356 b'',
1476 b'',
1357 b'contains',
1477 b'contains',
1358 None,
1478 None,
1359 b'benchmark a large amount of `nf in dirstate` calls',
1479 b'benchmark a large amount of `nf in dirstate` calls',
1360 ),
1480 ),
1361 ]
1481 ]
1362 + formatteropts,
1482 + formatteropts,
1363 )
1483 )
1364 def perfdirstate(ui, repo, **opts):
1484 def perfdirstate(ui, repo, **opts):
1365 """benchmap the time of various distate operations
1485 """benchmap the time of various distate operations
1366
1486
1367 By default benchmark the time necessary to load a dirstate from scratch.
1487 By default benchmark the time necessary to load a dirstate from scratch.
1368 The dirstate is loaded to the point were a "contains" request can be
1488 The dirstate is loaded to the point were a "contains" request can be
1369 answered.
1489 answered.
1370 """
1490 """
1371 opts = _byteskwargs(opts)
1491 opts = _byteskwargs(opts)
1372 timer, fm = gettimer(ui, opts)
1492 timer, fm = gettimer(ui, opts)
1373 b"a" in repo.dirstate
1493 b"a" in repo.dirstate
1374
1494
1375 if opts[b'iteration'] and opts[b'contains']:
1495 if opts[b'iteration'] and opts[b'contains']:
1376 msg = b'only specify one of --iteration or --contains'
1496 msg = b'only specify one of --iteration or --contains'
1377 raise error.Abort(msg)
1497 raise error.Abort(msg)
1378
1498
1379 if opts[b'iteration']:
1499 if opts[b'iteration']:
1380 setup = None
1500 setup = None
1381 dirstate = repo.dirstate
1501 dirstate = repo.dirstate
1382
1502
1383 def d():
1503 def d():
1384 for f in dirstate:
1504 for f in dirstate:
1385 pass
1505 pass
1386
1506
1387 elif opts[b'contains']:
1507 elif opts[b'contains']:
1388 setup = None
1508 setup = None
1389 dirstate = repo.dirstate
1509 dirstate = repo.dirstate
1390 allfiles = list(dirstate)
1510 allfiles = list(dirstate)
1391 # also add file path that will be "missing" from the dirstate
1511 # also add file path that will be "missing" from the dirstate
1392 allfiles.extend([f[::-1] for f in allfiles])
1512 allfiles.extend([f[::-1] for f in allfiles])
1393
1513
1394 def d():
1514 def d():
1395 for f in allfiles:
1515 for f in allfiles:
1396 f in dirstate
1516 f in dirstate
1397
1517
1398 else:
1518 else:
1399
1519
1400 def setup():
1520 def setup():
1401 repo.dirstate.invalidate()
1521 repo.dirstate.invalidate()
1402
1522
1403 def d():
1523 def d():
1404 b"a" in repo.dirstate
1524 b"a" in repo.dirstate
1405
1525
1406 timer(d, setup=setup)
1526 timer(d, setup=setup)
1407 fm.end()
1527 fm.end()
1408
1528
1409
1529
1410 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1530 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1411 def perfdirstatedirs(ui, repo, **opts):
1531 def perfdirstatedirs(ui, repo, **opts):
1412 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1532 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1413 opts = _byteskwargs(opts)
1533 opts = _byteskwargs(opts)
1414 timer, fm = gettimer(ui, opts)
1534 timer, fm = gettimer(ui, opts)
1415 repo.dirstate.hasdir(b"a")
1535 repo.dirstate.hasdir(b"a")
1416
1536
1417 def setup():
1537 def setup():
1418 try:
1538 try:
1419 del repo.dirstate._map._dirs
1539 del repo.dirstate._map._dirs
1420 except AttributeError:
1540 except AttributeError:
1421 pass
1541 pass
1422
1542
1423 def d():
1543 def d():
1424 repo.dirstate.hasdir(b"a")
1544 repo.dirstate.hasdir(b"a")
1425
1545
1426 timer(d, setup=setup)
1546 timer(d, setup=setup)
1427 fm.end()
1547 fm.end()
1428
1548
1429
1549
1430 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1550 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1431 def perfdirstatefoldmap(ui, repo, **opts):
1551 def perfdirstatefoldmap(ui, repo, **opts):
1432 """benchmap a `dirstate._map.filefoldmap.get()` request
1552 """benchmap a `dirstate._map.filefoldmap.get()` request
1433
1553
1434 The dirstate filefoldmap cache is dropped between every request.
1554 The dirstate filefoldmap cache is dropped between every request.
1435 """
1555 """
1436 opts = _byteskwargs(opts)
1556 opts = _byteskwargs(opts)
1437 timer, fm = gettimer(ui, opts)
1557 timer, fm = gettimer(ui, opts)
1438 dirstate = repo.dirstate
1558 dirstate = repo.dirstate
1439 dirstate._map.filefoldmap.get(b'a')
1559 dirstate._map.filefoldmap.get(b'a')
1440
1560
1441 def setup():
1561 def setup():
1442 del dirstate._map.filefoldmap
1562 del dirstate._map.filefoldmap
1443
1563
1444 def d():
1564 def d():
1445 dirstate._map.filefoldmap.get(b'a')
1565 dirstate._map.filefoldmap.get(b'a')
1446
1566
1447 timer(d, setup=setup)
1567 timer(d, setup=setup)
1448 fm.end()
1568 fm.end()
1449
1569
1450
1570
1451 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1571 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1452 def perfdirfoldmap(ui, repo, **opts):
1572 def perfdirfoldmap(ui, repo, **opts):
1453 """benchmap a `dirstate._map.dirfoldmap.get()` request
1573 """benchmap a `dirstate._map.dirfoldmap.get()` request
1454
1574
1455 The dirstate dirfoldmap cache is dropped between every request.
1575 The dirstate dirfoldmap cache is dropped between every request.
1456 """
1576 """
1457 opts = _byteskwargs(opts)
1577 opts = _byteskwargs(opts)
1458 timer, fm = gettimer(ui, opts)
1578 timer, fm = gettimer(ui, opts)
1459 dirstate = repo.dirstate
1579 dirstate = repo.dirstate
1460 dirstate._map.dirfoldmap.get(b'a')
1580 dirstate._map.dirfoldmap.get(b'a')
1461
1581
1462 def setup():
1582 def setup():
1463 del dirstate._map.dirfoldmap
1583 del dirstate._map.dirfoldmap
1464 try:
1584 try:
1465 del dirstate._map._dirs
1585 del dirstate._map._dirs
1466 except AttributeError:
1586 except AttributeError:
1467 pass
1587 pass
1468
1588
1469 def d():
1589 def d():
1470 dirstate._map.dirfoldmap.get(b'a')
1590 dirstate._map.dirfoldmap.get(b'a')
1471
1591
1472 timer(d, setup=setup)
1592 timer(d, setup=setup)
1473 fm.end()
1593 fm.end()
1474
1594
1475
1595
1476 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1596 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1477 def perfdirstatewrite(ui, repo, **opts):
1597 def perfdirstatewrite(ui, repo, **opts):
1478 """benchmap the time it take to write a dirstate on disk"""
1598 """benchmap the time it take to write a dirstate on disk"""
1479 opts = _byteskwargs(opts)
1599 opts = _byteskwargs(opts)
1480 timer, fm = gettimer(ui, opts)
1600 timer, fm = gettimer(ui, opts)
1481 ds = repo.dirstate
1601 ds = repo.dirstate
1482 b"a" in ds
1602 b"a" in ds
1483
1603
1484 def setup():
1604 def setup():
1485 ds._dirty = True
1605 ds._dirty = True
1486
1606
1487 def d():
1607 def d():
1488 ds.write(repo.currenttransaction())
1608 ds.write(repo.currenttransaction())
1489
1609
1490 with repo.wlock():
1610 with repo.wlock():
1491 timer(d, setup=setup)
1611 timer(d, setup=setup)
1492 fm.end()
1612 fm.end()
1493
1613
1494
1614
1495 def _getmergerevs(repo, opts):
1615 def _getmergerevs(repo, opts):
1496 """parse command argument to return rev involved in merge
1616 """parse command argument to return rev involved in merge
1497
1617
1498 input: options dictionnary with `rev`, `from` and `bse`
1618 input: options dictionnary with `rev`, `from` and `bse`
1499 output: (localctx, otherctx, basectx)
1619 output: (localctx, otherctx, basectx)
1500 """
1620 """
1501 if opts[b'from']:
1621 if opts[b'from']:
1502 fromrev = scmutil.revsingle(repo, opts[b'from'])
1622 fromrev = scmutil.revsingle(repo, opts[b'from'])
1503 wctx = repo[fromrev]
1623 wctx = repo[fromrev]
1504 else:
1624 else:
1505 wctx = repo[None]
1625 wctx = repo[None]
1506 # we don't want working dir files to be stat'd in the benchmark, so
1626 # we don't want working dir files to be stat'd in the benchmark, so
1507 # prime that cache
1627 # prime that cache
1508 wctx.dirty()
1628 wctx.dirty()
1509 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1629 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1510 if opts[b'base']:
1630 if opts[b'base']:
1511 fromrev = scmutil.revsingle(repo, opts[b'base'])
1631 fromrev = scmutil.revsingle(repo, opts[b'base'])
1512 ancestor = repo[fromrev]
1632 ancestor = repo[fromrev]
1513 else:
1633 else:
1514 ancestor = wctx.ancestor(rctx)
1634 ancestor = wctx.ancestor(rctx)
1515 return (wctx, rctx, ancestor)
1635 return (wctx, rctx, ancestor)
1516
1636
1517
1637
1518 @command(
1638 @command(
1519 b'perf::mergecalculate|perfmergecalculate',
1639 b'perf::mergecalculate|perfmergecalculate',
1520 [
1640 [
1521 (b'r', b'rev', b'.', b'rev to merge against'),
1641 (b'r', b'rev', b'.', b'rev to merge against'),
1522 (b'', b'from', b'', b'rev to merge from'),
1642 (b'', b'from', b'', b'rev to merge from'),
1523 (b'', b'base', b'', b'the revision to use as base'),
1643 (b'', b'base', b'', b'the revision to use as base'),
1524 ]
1644 ]
1525 + formatteropts,
1645 + formatteropts,
1526 )
1646 )
1527 def perfmergecalculate(ui, repo, **opts):
1647 def perfmergecalculate(ui, repo, **opts):
1528 opts = _byteskwargs(opts)
1648 opts = _byteskwargs(opts)
1529 timer, fm = gettimer(ui, opts)
1649 timer, fm = gettimer(ui, opts)
1530
1650
1531 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1651 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1532
1652
1533 def d():
1653 def d():
1534 # acceptremote is True because we don't want prompts in the middle of
1654 # acceptremote is True because we don't want prompts in the middle of
1535 # our benchmark
1655 # our benchmark
1536 merge.calculateupdates(
1656 merge.calculateupdates(
1537 repo,
1657 repo,
1538 wctx,
1658 wctx,
1539 rctx,
1659 rctx,
1540 [ancestor],
1660 [ancestor],
1541 branchmerge=False,
1661 branchmerge=False,
1542 force=False,
1662 force=False,
1543 acceptremote=True,
1663 acceptremote=True,
1544 followcopies=True,
1664 followcopies=True,
1545 )
1665 )
1546
1666
1547 timer(d)
1667 timer(d)
1548 fm.end()
1668 fm.end()
1549
1669
1550
1670
1551 @command(
1671 @command(
1552 b'perf::mergecopies|perfmergecopies',
1672 b'perf::mergecopies|perfmergecopies',
1553 [
1673 [
1554 (b'r', b'rev', b'.', b'rev to merge against'),
1674 (b'r', b'rev', b'.', b'rev to merge against'),
1555 (b'', b'from', b'', b'rev to merge from'),
1675 (b'', b'from', b'', b'rev to merge from'),
1556 (b'', b'base', b'', b'the revision to use as base'),
1676 (b'', b'base', b'', b'the revision to use as base'),
1557 ]
1677 ]
1558 + formatteropts,
1678 + formatteropts,
1559 )
1679 )
1560 def perfmergecopies(ui, repo, **opts):
1680 def perfmergecopies(ui, repo, **opts):
1561 """measure runtime of `copies.mergecopies`"""
1681 """measure runtime of `copies.mergecopies`"""
1562 opts = _byteskwargs(opts)
1682 opts = _byteskwargs(opts)
1563 timer, fm = gettimer(ui, opts)
1683 timer, fm = gettimer(ui, opts)
1564 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1684 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1565
1685
1566 def d():
1686 def d():
1567 # acceptremote is True because we don't want prompts in the middle of
1687 # acceptremote is True because we don't want prompts in the middle of
1568 # our benchmark
1688 # our benchmark
1569 copies.mergecopies(repo, wctx, rctx, ancestor)
1689 copies.mergecopies(repo, wctx, rctx, ancestor)
1570
1690
1571 timer(d)
1691 timer(d)
1572 fm.end()
1692 fm.end()
1573
1693
1574
1694
1575 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1695 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1576 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1696 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1577 """benchmark the copy tracing logic"""
1697 """benchmark the copy tracing logic"""
1578 opts = _byteskwargs(opts)
1698 opts = _byteskwargs(opts)
1579 timer, fm = gettimer(ui, opts)
1699 timer, fm = gettimer(ui, opts)
1580 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1700 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1581 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1701 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1582
1702
1583 def d():
1703 def d():
1584 copies.pathcopies(ctx1, ctx2)
1704 copies.pathcopies(ctx1, ctx2)
1585
1705
1586 timer(d)
1706 timer(d)
1587 fm.end()
1707 fm.end()
1588
1708
1589
1709
1590 @command(
1710 @command(
1591 b'perf::phases|perfphases',
1711 b'perf::phases|perfphases',
1592 [
1712 [
1593 (b'', b'full', False, b'include file reading time too'),
1713 (b'', b'full', False, b'include file reading time too'),
1594 ],
1714 ],
1595 b"",
1715 b"",
1596 )
1716 )
1597 def perfphases(ui, repo, **opts):
1717 def perfphases(ui, repo, **opts):
1598 """benchmark phasesets computation"""
1718 """benchmark phasesets computation"""
1599 opts = _byteskwargs(opts)
1719 opts = _byteskwargs(opts)
1600 timer, fm = gettimer(ui, opts)
1720 timer, fm = gettimer(ui, opts)
1601 _phases = repo._phasecache
1721 _phases = repo._phasecache
1602 full = opts.get(b'full')
1722 full = opts.get(b'full')
1603
1723
1604 def d():
1724 def d():
1605 phases = _phases
1725 phases = _phases
1606 if full:
1726 if full:
1607 clearfilecache(repo, b'_phasecache')
1727 clearfilecache(repo, b'_phasecache')
1608 phases = repo._phasecache
1728 phases = repo._phasecache
1609 phases.invalidate()
1729 phases.invalidate()
1610 phases.loadphaserevs(repo)
1730 phases.loadphaserevs(repo)
1611
1731
1612 timer(d)
1732 timer(d)
1613 fm.end()
1733 fm.end()
1614
1734
1615
1735
1616 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1736 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1617 def perfphasesremote(ui, repo, dest=None, **opts):
1737 def perfphasesremote(ui, repo, dest=None, **opts):
1618 """benchmark time needed to analyse phases of the remote server"""
1738 """benchmark time needed to analyse phases of the remote server"""
1619 from mercurial.node import bin
1739 from mercurial.node import bin
1620 from mercurial import (
1740 from mercurial import (
1621 exchange,
1741 exchange,
1622 hg,
1742 hg,
1623 phases,
1743 phases,
1624 )
1744 )
1625
1745
1626 opts = _byteskwargs(opts)
1746 opts = _byteskwargs(opts)
1627 timer, fm = gettimer(ui, opts)
1747 timer, fm = gettimer(ui, opts)
1628
1748
1629 path = ui.getpath(dest, default=(b'default-push', b'default'))
1749 path = ui.getpath(dest, default=(b'default-push', b'default'))
1630 if not path:
1750 if not path:
1631 raise error.Abort(
1751 raise error.Abort(
1632 b'default repository not configured!',
1752 b'default repository not configured!',
1633 hint=b"see 'hg help config.paths'",
1753 hint=b"see 'hg help config.paths'",
1634 )
1754 )
1635 if util.safehasattr(path, 'main_path'):
1755 if util.safehasattr(path, 'main_path'):
1636 path = path.get_push_variant()
1756 path = path.get_push_variant()
1637 dest = path.loc
1757 dest = path.loc
1638 else:
1758 else:
1639 dest = path.pushloc or path.loc
1759 dest = path.pushloc or path.loc
1640 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1760 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1641 other = hg.peer(repo, opts, dest)
1761 other = hg.peer(repo, opts, dest)
1642
1762
1643 # easier to perform discovery through the operation
1763 # easier to perform discovery through the operation
1644 op = exchange.pushoperation(repo, other)
1764 op = exchange.pushoperation(repo, other)
1645 exchange._pushdiscoverychangeset(op)
1765 exchange._pushdiscoverychangeset(op)
1646
1766
1647 remotesubset = op.fallbackheads
1767 remotesubset = op.fallbackheads
1648
1768
1649 with other.commandexecutor() as e:
1769 with other.commandexecutor() as e:
1650 remotephases = e.callcommand(
1770 remotephases = e.callcommand(
1651 b'listkeys', {b'namespace': b'phases'}
1771 b'listkeys', {b'namespace': b'phases'}
1652 ).result()
1772 ).result()
1653 del other
1773 del other
1654 publishing = remotephases.get(b'publishing', False)
1774 publishing = remotephases.get(b'publishing', False)
1655 if publishing:
1775 if publishing:
1656 ui.statusnoi18n(b'publishing: yes\n')
1776 ui.statusnoi18n(b'publishing: yes\n')
1657 else:
1777 else:
1658 ui.statusnoi18n(b'publishing: no\n')
1778 ui.statusnoi18n(b'publishing: no\n')
1659
1779
1660 has_node = getattr(repo.changelog.index, 'has_node', None)
1780 has_node = getattr(repo.changelog.index, 'has_node', None)
1661 if has_node is None:
1781 if has_node is None:
1662 has_node = repo.changelog.nodemap.__contains__
1782 has_node = repo.changelog.nodemap.__contains__
1663 nonpublishroots = 0
1783 nonpublishroots = 0
1664 for nhex, phase in remotephases.iteritems():
1784 for nhex, phase in remotephases.iteritems():
1665 if nhex == b'publishing': # ignore data related to publish option
1785 if nhex == b'publishing': # ignore data related to publish option
1666 continue
1786 continue
1667 node = bin(nhex)
1787 node = bin(nhex)
1668 if has_node(node) and int(phase):
1788 if has_node(node) and int(phase):
1669 nonpublishroots += 1
1789 nonpublishroots += 1
1670 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1790 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1671 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1791 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1672
1792
1673 def d():
1793 def d():
1674 phases.remotephasessummary(repo, remotesubset, remotephases)
1794 phases.remotephasessummary(repo, remotesubset, remotephases)
1675
1795
1676 timer(d)
1796 timer(d)
1677 fm.end()
1797 fm.end()
1678
1798
1679
1799
1680 @command(
1800 @command(
1681 b'perf::manifest|perfmanifest',
1801 b'perf::manifest|perfmanifest',
1682 [
1802 [
1683 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1803 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1684 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1804 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1685 ]
1805 ]
1686 + formatteropts,
1806 + formatteropts,
1687 b'REV|NODE',
1807 b'REV|NODE',
1688 )
1808 )
1689 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1809 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1690 """benchmark the time to read a manifest from disk and return a usable
1810 """benchmark the time to read a manifest from disk and return a usable
1691 dict-like object
1811 dict-like object
1692
1812
1693 Manifest caches are cleared before retrieval."""
1813 Manifest caches are cleared before retrieval."""
1694 opts = _byteskwargs(opts)
1814 opts = _byteskwargs(opts)
1695 timer, fm = gettimer(ui, opts)
1815 timer, fm = gettimer(ui, opts)
1696 if not manifest_rev:
1816 if not manifest_rev:
1697 ctx = scmutil.revsingle(repo, rev, rev)
1817 ctx = scmutil.revsingle(repo, rev, rev)
1698 t = ctx.manifestnode()
1818 t = ctx.manifestnode()
1699 else:
1819 else:
1700 from mercurial.node import bin
1820 from mercurial.node import bin
1701
1821
1702 if len(rev) == 40:
1822 if len(rev) == 40:
1703 t = bin(rev)
1823 t = bin(rev)
1704 else:
1824 else:
1705 try:
1825 try:
1706 rev = int(rev)
1826 rev = int(rev)
1707
1827
1708 if util.safehasattr(repo.manifestlog, b'getstorage'):
1828 if util.safehasattr(repo.manifestlog, b'getstorage'):
1709 t = repo.manifestlog.getstorage(b'').node(rev)
1829 t = repo.manifestlog.getstorage(b'').node(rev)
1710 else:
1830 else:
1711 t = repo.manifestlog._revlog.lookup(rev)
1831 t = repo.manifestlog._revlog.lookup(rev)
1712 except ValueError:
1832 except ValueError:
1713 raise error.Abort(
1833 raise error.Abort(
1714 b'manifest revision must be integer or full node'
1834 b'manifest revision must be integer or full node'
1715 )
1835 )
1716
1836
1717 def d():
1837 def d():
1718 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1838 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1719 repo.manifestlog[t].read()
1839 repo.manifestlog[t].read()
1720
1840
1721 timer(d)
1841 timer(d)
1722 fm.end()
1842 fm.end()
1723
1843
1724
1844
1725 @command(b'perf::changeset|perfchangeset', formatteropts)
1845 @command(b'perf::changeset|perfchangeset', formatteropts)
1726 def perfchangeset(ui, repo, rev, **opts):
1846 def perfchangeset(ui, repo, rev, **opts):
1727 opts = _byteskwargs(opts)
1847 opts = _byteskwargs(opts)
1728 timer, fm = gettimer(ui, opts)
1848 timer, fm = gettimer(ui, opts)
1729 n = scmutil.revsingle(repo, rev).node()
1849 n = scmutil.revsingle(repo, rev).node()
1730
1850
1731 def d():
1851 def d():
1732 repo.changelog.read(n)
1852 repo.changelog.read(n)
1733 # repo.changelog._cache = None
1853 # repo.changelog._cache = None
1734
1854
1735 timer(d)
1855 timer(d)
1736 fm.end()
1856 fm.end()
1737
1857
1738
1858
1739 @command(b'perf::ignore|perfignore', formatteropts)
1859 @command(b'perf::ignore|perfignore', formatteropts)
1740 def perfignore(ui, repo, **opts):
1860 def perfignore(ui, repo, **opts):
1741 """benchmark operation related to computing ignore"""
1861 """benchmark operation related to computing ignore"""
1742 opts = _byteskwargs(opts)
1862 opts = _byteskwargs(opts)
1743 timer, fm = gettimer(ui, opts)
1863 timer, fm = gettimer(ui, opts)
1744 dirstate = repo.dirstate
1864 dirstate = repo.dirstate
1745
1865
1746 def setupone():
1866 def setupone():
1747 dirstate.invalidate()
1867 dirstate.invalidate()
1748 clearfilecache(dirstate, b'_ignore')
1868 clearfilecache(dirstate, b'_ignore')
1749
1869
1750 def runone():
1870 def runone():
1751 dirstate._ignore
1871 dirstate._ignore
1752
1872
1753 timer(runone, setup=setupone, title=b"load")
1873 timer(runone, setup=setupone, title=b"load")
1754 fm.end()
1874 fm.end()
1755
1875
1756
1876
1757 @command(
1877 @command(
1758 b'perf::index|perfindex',
1878 b'perf::index|perfindex',
1759 [
1879 [
1760 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1880 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1761 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1881 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1762 ]
1882 ]
1763 + formatteropts,
1883 + formatteropts,
1764 )
1884 )
1765 def perfindex(ui, repo, **opts):
1885 def perfindex(ui, repo, **opts):
1766 """benchmark index creation time followed by a lookup
1886 """benchmark index creation time followed by a lookup
1767
1887
1768 The default is to look `tip` up. Depending on the index implementation,
1888 The default is to look `tip` up. Depending on the index implementation,
1769 the revision looked up can matters. For example, an implementation
1889 the revision looked up can matters. For example, an implementation
1770 scanning the index will have a faster lookup time for `--rev tip` than for
1890 scanning the index will have a faster lookup time for `--rev tip` than for
1771 `--rev 0`. The number of looked up revisions and their order can also
1891 `--rev 0`. The number of looked up revisions and their order can also
1772 matters.
1892 matters.
1773
1893
1774 Example of useful set to test:
1894 Example of useful set to test:
1775
1895
1776 * tip
1896 * tip
1777 * 0
1897 * 0
1778 * -10:
1898 * -10:
1779 * :10
1899 * :10
1780 * -10: + :10
1900 * -10: + :10
1781 * :10: + -10:
1901 * :10: + -10:
1782 * -10000:
1902 * -10000:
1783 * -10000: + 0
1903 * -10000: + 0
1784
1904
1785 It is not currently possible to check for lookup of a missing node. For
1905 It is not currently possible to check for lookup of a missing node. For
1786 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1906 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1787 import mercurial.revlog
1907 import mercurial.revlog
1788
1908
1789 opts = _byteskwargs(opts)
1909 opts = _byteskwargs(opts)
1790 timer, fm = gettimer(ui, opts)
1910 timer, fm = gettimer(ui, opts)
1791 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1911 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1792 if opts[b'no_lookup']:
1912 if opts[b'no_lookup']:
1793 if opts['rev']:
1913 if opts['rev']:
1794 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1914 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1795 nodes = []
1915 nodes = []
1796 elif not opts[b'rev']:
1916 elif not opts[b'rev']:
1797 nodes = [repo[b"tip"].node()]
1917 nodes = [repo[b"tip"].node()]
1798 else:
1918 else:
1799 revs = scmutil.revrange(repo, opts[b'rev'])
1919 revs = scmutil.revrange(repo, opts[b'rev'])
1800 cl = repo.changelog
1920 cl = repo.changelog
1801 nodes = [cl.node(r) for r in revs]
1921 nodes = [cl.node(r) for r in revs]
1802
1922
1803 unfi = repo.unfiltered()
1923 unfi = repo.unfiltered()
1804 # find the filecache func directly
1924 # find the filecache func directly
1805 # This avoid polluting the benchmark with the filecache logic
1925 # This avoid polluting the benchmark with the filecache logic
1806 makecl = unfi.__class__.changelog.func
1926 makecl = unfi.__class__.changelog.func
1807
1927
1808 def setup():
1928 def setup():
1809 # probably not necessary, but for good measure
1929 # probably not necessary, but for good measure
1810 clearchangelog(unfi)
1930 clearchangelog(unfi)
1811
1931
1812 def d():
1932 def d():
1813 cl = makecl(unfi)
1933 cl = makecl(unfi)
1814 for n in nodes:
1934 for n in nodes:
1815 cl.rev(n)
1935 cl.rev(n)
1816
1936
1817 timer(d, setup=setup)
1937 timer(d, setup=setup)
1818 fm.end()
1938 fm.end()
1819
1939
1820
1940
1821 @command(
1941 @command(
1822 b'perf::nodemap|perfnodemap',
1942 b'perf::nodemap|perfnodemap',
1823 [
1943 [
1824 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1944 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1825 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1945 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1826 ]
1946 ]
1827 + formatteropts,
1947 + formatteropts,
1828 )
1948 )
1829 def perfnodemap(ui, repo, **opts):
1949 def perfnodemap(ui, repo, **opts):
1830 """benchmark the time necessary to look up revision from a cold nodemap
1950 """benchmark the time necessary to look up revision from a cold nodemap
1831
1951
1832 Depending on the implementation, the amount and order of revision we look
1952 Depending on the implementation, the amount and order of revision we look
1833 up can varies. Example of useful set to test:
1953 up can varies. Example of useful set to test:
1834 * tip
1954 * tip
1835 * 0
1955 * 0
1836 * -10:
1956 * -10:
1837 * :10
1957 * :10
1838 * -10: + :10
1958 * -10: + :10
1839 * :10: + -10:
1959 * :10: + -10:
1840 * -10000:
1960 * -10000:
1841 * -10000: + 0
1961 * -10000: + 0
1842
1962
1843 The command currently focus on valid binary lookup. Benchmarking for
1963 The command currently focus on valid binary lookup. Benchmarking for
1844 hexlookup, prefix lookup and missing lookup would also be valuable.
1964 hexlookup, prefix lookup and missing lookup would also be valuable.
1845 """
1965 """
1846 import mercurial.revlog
1966 import mercurial.revlog
1847
1967
1848 opts = _byteskwargs(opts)
1968 opts = _byteskwargs(opts)
1849 timer, fm = gettimer(ui, opts)
1969 timer, fm = gettimer(ui, opts)
1850 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1970 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1851
1971
1852 unfi = repo.unfiltered()
1972 unfi = repo.unfiltered()
1853 clearcaches = opts[b'clear_caches']
1973 clearcaches = opts[b'clear_caches']
1854 # find the filecache func directly
1974 # find the filecache func directly
1855 # This avoid polluting the benchmark with the filecache logic
1975 # This avoid polluting the benchmark with the filecache logic
1856 makecl = unfi.__class__.changelog.func
1976 makecl = unfi.__class__.changelog.func
1857 if not opts[b'rev']:
1977 if not opts[b'rev']:
1858 raise error.Abort(b'use --rev to specify revisions to look up')
1978 raise error.Abort(b'use --rev to specify revisions to look up')
1859 revs = scmutil.revrange(repo, opts[b'rev'])
1979 revs = scmutil.revrange(repo, opts[b'rev'])
1860 cl = repo.changelog
1980 cl = repo.changelog
1861 nodes = [cl.node(r) for r in revs]
1981 nodes = [cl.node(r) for r in revs]
1862
1982
1863 # use a list to pass reference to a nodemap from one closure to the next
1983 # use a list to pass reference to a nodemap from one closure to the next
1864 nodeget = [None]
1984 nodeget = [None]
1865
1985
1866 def setnodeget():
1986 def setnodeget():
1867 # probably not necessary, but for good measure
1987 # probably not necessary, but for good measure
1868 clearchangelog(unfi)
1988 clearchangelog(unfi)
1869 cl = makecl(unfi)
1989 cl = makecl(unfi)
1870 if util.safehasattr(cl.index, 'get_rev'):
1990 if util.safehasattr(cl.index, 'get_rev'):
1871 nodeget[0] = cl.index.get_rev
1991 nodeget[0] = cl.index.get_rev
1872 else:
1992 else:
1873 nodeget[0] = cl.nodemap.get
1993 nodeget[0] = cl.nodemap.get
1874
1994
1875 def d():
1995 def d():
1876 get = nodeget[0]
1996 get = nodeget[0]
1877 for n in nodes:
1997 for n in nodes:
1878 get(n)
1998 get(n)
1879
1999
1880 setup = None
2000 setup = None
1881 if clearcaches:
2001 if clearcaches:
1882
2002
1883 def setup():
2003 def setup():
1884 setnodeget()
2004 setnodeget()
1885
2005
1886 else:
2006 else:
1887 setnodeget()
2007 setnodeget()
1888 d() # prewarm the data structure
2008 d() # prewarm the data structure
1889 timer(d, setup=setup)
2009 timer(d, setup=setup)
1890 fm.end()
2010 fm.end()
1891
2011
1892
2012
1893 @command(b'perf::startup|perfstartup', formatteropts)
2013 @command(b'perf::startup|perfstartup', formatteropts)
1894 def perfstartup(ui, repo, **opts):
2014 def perfstartup(ui, repo, **opts):
1895 opts = _byteskwargs(opts)
2015 opts = _byteskwargs(opts)
1896 timer, fm = gettimer(ui, opts)
2016 timer, fm = gettimer(ui, opts)
1897
2017
1898 def d():
2018 def d():
1899 if os.name != 'nt':
2019 if os.name != 'nt':
1900 os.system(
2020 os.system(
1901 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
2021 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1902 )
2022 )
1903 else:
2023 else:
1904 os.environ['HGRCPATH'] = r' '
2024 os.environ['HGRCPATH'] = r' '
1905 os.system("%s version -q > NUL" % sys.argv[0])
2025 os.system("%s version -q > NUL" % sys.argv[0])
1906
2026
1907 timer(d)
2027 timer(d)
1908 fm.end()
2028 fm.end()
1909
2029
1910
2030
1911 def _find_stream_generator(version):
2031 def _find_stream_generator(version):
1912 """find the proper generator function for this stream version"""
2032 """find the proper generator function for this stream version"""
1913 import mercurial.streamclone
2033 import mercurial.streamclone
1914
2034
1915 available = {}
2035 available = {}
1916
2036
1917 # try to fetch a v1 generator
2037 # try to fetch a v1 generator
1918 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
2038 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
1919 if generatev1 is not None:
2039 if generatev1 is not None:
1920
2040
1921 def generate(repo):
2041 def generate(repo):
1922 entries, bytes, data = generatev2(repo, None, None, True)
2042 entries, bytes, data = generatev2(repo, None, None, True)
1923 return data
2043 return data
1924
2044
1925 available[b'v1'] = generatev1
2045 available[b'v1'] = generatev1
1926 # try to fetch a v2 generator
2046 # try to fetch a v2 generator
1927 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
2047 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
1928 if generatev2 is not None:
2048 if generatev2 is not None:
1929
2049
1930 def generate(repo):
2050 def generate(repo):
1931 entries, bytes, data = generatev2(repo, None, None, True)
2051 entries, bytes, data = generatev2(repo, None, None, True)
1932 return data
2052 return data
1933
2053
1934 available[b'v2'] = generate
2054 available[b'v2'] = generate
1935 # try to fetch a v3 generator
2055 # try to fetch a v3 generator
1936 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
2056 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
1937 if generatev3 is not None:
2057 if generatev3 is not None:
1938
2058
1939 def generate(repo):
2059 def generate(repo):
1940 entries, bytes, data = generatev3(repo, None, None, True)
2060 entries, bytes, data = generatev3(repo, None, None, True)
1941 return data
2061 return data
1942
2062
1943 available[b'v3-exp'] = generate
2063 available[b'v3-exp'] = generate
1944
2064
1945 # resolve the request
2065 # resolve the request
1946 if version == b"latest":
2066 if version == b"latest":
1947 # latest is the highest non experimental version
2067 # latest is the highest non experimental version
1948 latest_key = max(v for v in available if b'-exp' not in v)
2068 latest_key = max(v for v in available if b'-exp' not in v)
1949 return available[latest_key]
2069 return available[latest_key]
1950 elif version in available:
2070 elif version in available:
1951 return available[version]
2071 return available[version]
1952 else:
2072 else:
1953 msg = b"unkown or unavailable version: %s"
2073 msg = b"unkown or unavailable version: %s"
1954 msg %= version
2074 msg %= version
1955 hint = b"available versions: %s"
2075 hint = b"available versions: %s"
1956 hint %= b', '.join(sorted(available))
2076 hint %= b', '.join(sorted(available))
1957 raise error.Abort(msg, hint=hint)
2077 raise error.Abort(msg, hint=hint)
1958
2078
1959
2079
1960 @command(
2080 @command(
1961 b'perf::stream-locked-section',
2081 b'perf::stream-locked-section',
1962 [
2082 [
1963 (
2083 (
1964 b'',
2084 b'',
1965 b'stream-version',
2085 b'stream-version',
1966 b'latest',
2086 b'latest',
1967 b'stream version to use ("v1", "v2", "v3" or "latest", (the default))',
2087 b'stream version to use ("v1", "v2", "v3" or "latest", (the default))',
1968 ),
2088 ),
1969 ]
2089 ]
1970 + formatteropts,
2090 + formatteropts,
1971 )
2091 )
1972 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
2092 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
1973 """benchmark the initial, repo-locked, section of a stream-clone"""
2093 """benchmark the initial, repo-locked, section of a stream-clone"""
1974
2094
1975 opts = _byteskwargs(opts)
2095 opts = _byteskwargs(opts)
1976 timer, fm = gettimer(ui, opts)
2096 timer, fm = gettimer(ui, opts)
1977
2097
1978 # deletion of the generator may trigger some cleanup that we do not want to
2098 # deletion of the generator may trigger some cleanup that we do not want to
1979 # measure
2099 # measure
1980 result_holder = [None]
2100 result_holder = [None]
1981
2101
1982 def setupone():
2102 def setupone():
1983 result_holder[0] = None
2103 result_holder[0] = None
1984
2104
1985 generate = _find_stream_generator(stream_version)
2105 generate = _find_stream_generator(stream_version)
1986
2106
1987 def runone():
2107 def runone():
1988 # the lock is held for the duration the initialisation
2108 # the lock is held for the duration the initialisation
1989 result_holder[0] = generate(repo)
2109 result_holder[0] = generate(repo)
1990
2110
1991 timer(runone, setup=setupone, title=b"load")
2111 timer(runone, setup=setupone, title=b"load")
1992 fm.end()
2112 fm.end()
1993
2113
1994
2114
1995 @command(
2115 @command(
1996 b'perf::stream-generate',
2116 b'perf::stream-generate',
1997 [
2117 [
1998 (
2118 (
1999 b'',
2119 b'',
2000 b'stream-version',
2120 b'stream-version',
2001 b'latest',
2121 b'latest',
2002 b'stream version to us ("v1", "v2" or "latest", (the default))',
2122 b'stream version to us ("v1", "v2" or "latest", (the default))',
2003 ),
2123 ),
2004 ]
2124 ]
2005 + formatteropts,
2125 + formatteropts,
2006 )
2126 )
2007 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2127 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2008 """benchmark the full generation of a stream clone"""
2128 """benchmark the full generation of a stream clone"""
2009
2129
2010 opts = _byteskwargs(opts)
2130 opts = _byteskwargs(opts)
2011 timer, fm = gettimer(ui, opts)
2131 timer, fm = gettimer(ui, opts)
2012
2132
2013 # deletion of the generator may trigger some cleanup that we do not want to
2133 # deletion of the generator may trigger some cleanup that we do not want to
2014 # measure
2134 # measure
2015
2135
2016 generate = _find_stream_generator(stream_version)
2136 generate = _find_stream_generator(stream_version)
2017
2137
2018 def runone():
2138 def runone():
2019 # the lock is held for the duration the initialisation
2139 # the lock is held for the duration the initialisation
2020 for chunk in generate(repo):
2140 for chunk in generate(repo):
2021 pass
2141 pass
2022
2142
2023 timer(runone, title=b"generate")
2143 timer(runone, title=b"generate")
2024 fm.end()
2144 fm.end()
2025
2145
2026
2146
2027 @command(
2147 @command(
2028 b'perf::stream-consume',
2148 b'perf::stream-consume',
2029 formatteropts,
2149 formatteropts,
2030 )
2150 )
2031 def perf_stream_clone_consume(ui, repo, filename, **opts):
2151 def perf_stream_clone_consume(ui, repo, filename, **opts):
2032 """benchmark the full application of a stream clone
2152 """benchmark the full application of a stream clone
2033
2153
2034 This include the creation of the repository
2154 This include the creation of the repository
2035 """
2155 """
2036 # try except to appease check code
2156 # try except to appease check code
2037 msg = b"mercurial too old, missing necessary module: %s"
2157 msg = b"mercurial too old, missing necessary module: %s"
2038 try:
2158 try:
2039 from mercurial import bundle2
2159 from mercurial import bundle2
2040 except ImportError as exc:
2160 except ImportError as exc:
2041 msg %= _bytestr(exc)
2161 msg %= _bytestr(exc)
2042 raise error.Abort(msg)
2162 raise error.Abort(msg)
2043 try:
2163 try:
2044 from mercurial import exchange
2164 from mercurial import exchange
2045 except ImportError as exc:
2165 except ImportError as exc:
2046 msg %= _bytestr(exc)
2166 msg %= _bytestr(exc)
2047 raise error.Abort(msg)
2167 raise error.Abort(msg)
2048 try:
2168 try:
2049 from mercurial import hg
2169 from mercurial import hg
2050 except ImportError as exc:
2170 except ImportError as exc:
2051 msg %= _bytestr(exc)
2171 msg %= _bytestr(exc)
2052 raise error.Abort(msg)
2172 raise error.Abort(msg)
2053 try:
2173 try:
2054 from mercurial import localrepo
2174 from mercurial import localrepo
2055 except ImportError as exc:
2175 except ImportError as exc:
2056 msg %= _bytestr(exc)
2176 msg %= _bytestr(exc)
2057 raise error.Abort(msg)
2177 raise error.Abort(msg)
2058
2178
2059 opts = _byteskwargs(opts)
2179 opts = _byteskwargs(opts)
2060 timer, fm = gettimer(ui, opts)
2180 timer, fm = gettimer(ui, opts)
2061
2181
2062 # deletion of the generator may trigger some cleanup that we do not want to
2182 # deletion of the generator may trigger some cleanup that we do not want to
2063 # measure
2183 # measure
2064 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2184 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2065 raise error.Abort("not a readable file: %s" % filename)
2185 raise error.Abort("not a readable file: %s" % filename)
2066
2186
2067 run_variables = [None, None]
2187 run_variables = [None, None]
2068
2188
2069 @contextlib.contextmanager
2189 @contextlib.contextmanager
2070 def context():
2190 def context():
2071 with open(filename, mode='rb') as bundle:
2191 with open(filename, mode='rb') as bundle:
2072 with tempfile.TemporaryDirectory() as tmp_dir:
2192 with tempfile.TemporaryDirectory() as tmp_dir:
2073 tmp_dir = fsencode(tmp_dir)
2193 tmp_dir = fsencode(tmp_dir)
2074 run_variables[0] = bundle
2194 run_variables[0] = bundle
2075 run_variables[1] = tmp_dir
2195 run_variables[1] = tmp_dir
2076 yield
2196 yield
2077 run_variables[0] = None
2197 run_variables[0] = None
2078 run_variables[1] = None
2198 run_variables[1] = None
2079
2199
2080 def runone():
2200 def runone():
2081 bundle = run_variables[0]
2201 bundle = run_variables[0]
2082 tmp_dir = run_variables[1]
2202 tmp_dir = run_variables[1]
2083 # only pass ui when no srcrepo
2203 # only pass ui when no srcrepo
2084 localrepo.createrepository(
2204 localrepo.createrepository(
2085 repo.ui, tmp_dir, requirements=repo.requirements
2205 repo.ui, tmp_dir, requirements=repo.requirements
2086 )
2206 )
2087 target = hg.repository(repo.ui, tmp_dir)
2207 target = hg.repository(repo.ui, tmp_dir)
2088 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2208 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2089 # stream v1
2209 # stream v1
2090 if util.safehasattr(gen, 'apply'):
2210 if util.safehasattr(gen, 'apply'):
2091 gen.apply(target)
2211 gen.apply(target)
2092 else:
2212 else:
2093 with target.transaction(b"perf::stream-consume") as tr:
2213 with target.transaction(b"perf::stream-consume") as tr:
2094 bundle2.applybundle(
2214 bundle2.applybundle(
2095 target,
2215 target,
2096 gen,
2216 gen,
2097 tr,
2217 tr,
2098 source=b'unbundle',
2218 source=b'unbundle',
2099 url=filename,
2219 url=filename,
2100 )
2220 )
2101
2221
2102 timer(runone, context=context, title=b"consume")
2222 timer(runone, context=context, title=b"consume")
2103 fm.end()
2223 fm.end()
2104
2224
2105
2225
2106 @command(b'perf::parents|perfparents', formatteropts)
2226 @command(b'perf::parents|perfparents', formatteropts)
2107 def perfparents(ui, repo, **opts):
2227 def perfparents(ui, repo, **opts):
2108 """benchmark the time necessary to fetch one changeset's parents.
2228 """benchmark the time necessary to fetch one changeset's parents.
2109
2229
2110 The fetch is done using the `node identifier`, traversing all object layers
2230 The fetch is done using the `node identifier`, traversing all object layers
2111 from the repository object. The first N revisions will be used for this
2231 from the repository object. The first N revisions will be used for this
2112 benchmark. N is controlled by the ``perf.parentscount`` config option
2232 benchmark. N is controlled by the ``perf.parentscount`` config option
2113 (default: 1000).
2233 (default: 1000).
2114 """
2234 """
2115 opts = _byteskwargs(opts)
2235 opts = _byteskwargs(opts)
2116 timer, fm = gettimer(ui, opts)
2236 timer, fm = gettimer(ui, opts)
2117 # control the number of commits perfparents iterates over
2237 # control the number of commits perfparents iterates over
2118 # experimental config: perf.parentscount
2238 # experimental config: perf.parentscount
2119 count = getint(ui, b"perf", b"parentscount", 1000)
2239 count = getint(ui, b"perf", b"parentscount", 1000)
2120 if len(repo.changelog) < count:
2240 if len(repo.changelog) < count:
2121 raise error.Abort(b"repo needs %d commits for this test" % count)
2241 raise error.Abort(b"repo needs %d commits for this test" % count)
2122 repo = repo.unfiltered()
2242 repo = repo.unfiltered()
2123 nl = [repo.changelog.node(i) for i in _xrange(count)]
2243 nl = [repo.changelog.node(i) for i in _xrange(count)]
2124
2244
2125 def d():
2245 def d():
2126 for n in nl:
2246 for n in nl:
2127 repo.changelog.parents(n)
2247 repo.changelog.parents(n)
2128
2248
2129 timer(d)
2249 timer(d)
2130 fm.end()
2250 fm.end()
2131
2251
2132
2252
2133 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2253 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2134 def perfctxfiles(ui, repo, x, **opts):
2254 def perfctxfiles(ui, repo, x, **opts):
2135 opts = _byteskwargs(opts)
2255 opts = _byteskwargs(opts)
2136 x = int(x)
2256 x = int(x)
2137 timer, fm = gettimer(ui, opts)
2257 timer, fm = gettimer(ui, opts)
2138
2258
2139 def d():
2259 def d():
2140 len(repo[x].files())
2260 len(repo[x].files())
2141
2261
2142 timer(d)
2262 timer(d)
2143 fm.end()
2263 fm.end()
2144
2264
2145
2265
2146 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2266 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2147 def perfrawfiles(ui, repo, x, **opts):
2267 def perfrawfiles(ui, repo, x, **opts):
2148 opts = _byteskwargs(opts)
2268 opts = _byteskwargs(opts)
2149 x = int(x)
2269 x = int(x)
2150 timer, fm = gettimer(ui, opts)
2270 timer, fm = gettimer(ui, opts)
2151 cl = repo.changelog
2271 cl = repo.changelog
2152
2272
2153 def d():
2273 def d():
2154 len(cl.read(x)[3])
2274 len(cl.read(x)[3])
2155
2275
2156 timer(d)
2276 timer(d)
2157 fm.end()
2277 fm.end()
2158
2278
2159
2279
2160 @command(b'perf::lookup|perflookup', formatteropts)
2280 @command(b'perf::lookup|perflookup', formatteropts)
2161 def perflookup(ui, repo, rev, **opts):
2281 def perflookup(ui, repo, rev, **opts):
2162 opts = _byteskwargs(opts)
2282 opts = _byteskwargs(opts)
2163 timer, fm = gettimer(ui, opts)
2283 timer, fm = gettimer(ui, opts)
2164 timer(lambda: len(repo.lookup(rev)))
2284 timer(lambda: len(repo.lookup(rev)))
2165 fm.end()
2285 fm.end()
2166
2286
2167
2287
2168 @command(
2288 @command(
2169 b'perf::linelogedits|perflinelogedits',
2289 b'perf::linelogedits|perflinelogedits',
2170 [
2290 [
2171 (b'n', b'edits', 10000, b'number of edits'),
2291 (b'n', b'edits', 10000, b'number of edits'),
2172 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2292 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2173 ],
2293 ],
2174 norepo=True,
2294 norepo=True,
2175 )
2295 )
2176 def perflinelogedits(ui, **opts):
2296 def perflinelogedits(ui, **opts):
2177 from mercurial import linelog
2297 from mercurial import linelog
2178
2298
2179 opts = _byteskwargs(opts)
2299 opts = _byteskwargs(opts)
2180
2300
2181 edits = opts[b'edits']
2301 edits = opts[b'edits']
2182 maxhunklines = opts[b'max_hunk_lines']
2302 maxhunklines = opts[b'max_hunk_lines']
2183
2303
2184 maxb1 = 100000
2304 maxb1 = 100000
2185 random.seed(0)
2305 random.seed(0)
2186 randint = random.randint
2306 randint = random.randint
2187 currentlines = 0
2307 currentlines = 0
2188 arglist = []
2308 arglist = []
2189 for rev in _xrange(edits):
2309 for rev in _xrange(edits):
2190 a1 = randint(0, currentlines)
2310 a1 = randint(0, currentlines)
2191 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2311 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2192 b1 = randint(0, maxb1)
2312 b1 = randint(0, maxb1)
2193 b2 = randint(b1, b1 + maxhunklines)
2313 b2 = randint(b1, b1 + maxhunklines)
2194 currentlines += (b2 - b1) - (a2 - a1)
2314 currentlines += (b2 - b1) - (a2 - a1)
2195 arglist.append((rev, a1, a2, b1, b2))
2315 arglist.append((rev, a1, a2, b1, b2))
2196
2316
2197 def d():
2317 def d():
2198 ll = linelog.linelog()
2318 ll = linelog.linelog()
2199 for args in arglist:
2319 for args in arglist:
2200 ll.replacelines(*args)
2320 ll.replacelines(*args)
2201
2321
2202 timer, fm = gettimer(ui, opts)
2322 timer, fm = gettimer(ui, opts)
2203 timer(d)
2323 timer(d)
2204 fm.end()
2324 fm.end()
2205
2325
2206
2326
2207 @command(b'perf::revrange|perfrevrange', formatteropts)
2327 @command(b'perf::revrange|perfrevrange', formatteropts)
2208 def perfrevrange(ui, repo, *specs, **opts):
2328 def perfrevrange(ui, repo, *specs, **opts):
2209 opts = _byteskwargs(opts)
2329 opts = _byteskwargs(opts)
2210 timer, fm = gettimer(ui, opts)
2330 timer, fm = gettimer(ui, opts)
2211 revrange = scmutil.revrange
2331 revrange = scmutil.revrange
2212 timer(lambda: len(revrange(repo, specs)))
2332 timer(lambda: len(revrange(repo, specs)))
2213 fm.end()
2333 fm.end()
2214
2334
2215
2335
2216 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2336 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2217 def perfnodelookup(ui, repo, rev, **opts):
2337 def perfnodelookup(ui, repo, rev, **opts):
2218 opts = _byteskwargs(opts)
2338 opts = _byteskwargs(opts)
2219 timer, fm = gettimer(ui, opts)
2339 timer, fm = gettimer(ui, opts)
2220 import mercurial.revlog
2340 import mercurial.revlog
2221
2341
2222 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2342 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2223 n = scmutil.revsingle(repo, rev).node()
2343 n = scmutil.revsingle(repo, rev).node()
2224
2344
2225 try:
2345 try:
2226 cl = revlog(getsvfs(repo), radix=b"00changelog")
2346 cl = revlog(getsvfs(repo), radix=b"00changelog")
2227 except TypeError:
2347 except TypeError:
2228 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2348 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2229
2349
2230 def d():
2350 def d():
2231 cl.rev(n)
2351 cl.rev(n)
2232 clearcaches(cl)
2352 clearcaches(cl)
2233
2353
2234 timer(d)
2354 timer(d)
2235 fm.end()
2355 fm.end()
2236
2356
2237
2357
2238 @command(
2358 @command(
2239 b'perf::log|perflog',
2359 b'perf::log|perflog',
2240 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2360 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2241 )
2361 )
2242 def perflog(ui, repo, rev=None, **opts):
2362 def perflog(ui, repo, rev=None, **opts):
2243 opts = _byteskwargs(opts)
2363 opts = _byteskwargs(opts)
2244 if rev is None:
2364 if rev is None:
2245 rev = []
2365 rev = []
2246 timer, fm = gettimer(ui, opts)
2366 timer, fm = gettimer(ui, opts)
2247 ui.pushbuffer()
2367 ui.pushbuffer()
2248 timer(
2368 timer(
2249 lambda: commands.log(
2369 lambda: commands.log(
2250 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2370 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2251 )
2371 )
2252 )
2372 )
2253 ui.popbuffer()
2373 ui.popbuffer()
2254 fm.end()
2374 fm.end()
2255
2375
2256
2376
2257 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2377 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2258 def perfmoonwalk(ui, repo, **opts):
2378 def perfmoonwalk(ui, repo, **opts):
2259 """benchmark walking the changelog backwards
2379 """benchmark walking the changelog backwards
2260
2380
2261 This also loads the changelog data for each revision in the changelog.
2381 This also loads the changelog data for each revision in the changelog.
2262 """
2382 """
2263 opts = _byteskwargs(opts)
2383 opts = _byteskwargs(opts)
2264 timer, fm = gettimer(ui, opts)
2384 timer, fm = gettimer(ui, opts)
2265
2385
2266 def moonwalk():
2386 def moonwalk():
2267 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2387 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2268 ctx = repo[i]
2388 ctx = repo[i]
2269 ctx.branch() # read changelog data (in addition to the index)
2389 ctx.branch() # read changelog data (in addition to the index)
2270
2390
2271 timer(moonwalk)
2391 timer(moonwalk)
2272 fm.end()
2392 fm.end()
2273
2393
2274
2394
2275 @command(
2395 @command(
2276 b'perf::templating|perftemplating',
2396 b'perf::templating|perftemplating',
2277 [
2397 [
2278 (b'r', b'rev', [], b'revisions to run the template on'),
2398 (b'r', b'rev', [], b'revisions to run the template on'),
2279 ]
2399 ]
2280 + formatteropts,
2400 + formatteropts,
2281 )
2401 )
2282 def perftemplating(ui, repo, testedtemplate=None, **opts):
2402 def perftemplating(ui, repo, testedtemplate=None, **opts):
2283 """test the rendering time of a given template"""
2403 """test the rendering time of a given template"""
2284 if makelogtemplater is None:
2404 if makelogtemplater is None:
2285 raise error.Abort(
2405 raise error.Abort(
2286 b"perftemplating not available with this Mercurial",
2406 b"perftemplating not available with this Mercurial",
2287 hint=b"use 4.3 or later",
2407 hint=b"use 4.3 or later",
2288 )
2408 )
2289
2409
2290 opts = _byteskwargs(opts)
2410 opts = _byteskwargs(opts)
2291
2411
2292 nullui = ui.copy()
2412 nullui = ui.copy()
2293 nullui.fout = open(os.devnull, 'wb')
2413 nullui.fout = open(os.devnull, 'wb')
2294 nullui.disablepager()
2414 nullui.disablepager()
2295 revs = opts.get(b'rev')
2415 revs = opts.get(b'rev')
2296 if not revs:
2416 if not revs:
2297 revs = [b'all()']
2417 revs = [b'all()']
2298 revs = list(scmutil.revrange(repo, revs))
2418 revs = list(scmutil.revrange(repo, revs))
2299
2419
2300 defaulttemplate = (
2420 defaulttemplate = (
2301 b'{date|shortdate} [{rev}:{node|short}]'
2421 b'{date|shortdate} [{rev}:{node|short}]'
2302 b' {author|person}: {desc|firstline}\n'
2422 b' {author|person}: {desc|firstline}\n'
2303 )
2423 )
2304 if testedtemplate is None:
2424 if testedtemplate is None:
2305 testedtemplate = defaulttemplate
2425 testedtemplate = defaulttemplate
2306 displayer = makelogtemplater(nullui, repo, testedtemplate)
2426 displayer = makelogtemplater(nullui, repo, testedtemplate)
2307
2427
2308 def format():
2428 def format():
2309 for r in revs:
2429 for r in revs:
2310 ctx = repo[r]
2430 ctx = repo[r]
2311 displayer.show(ctx)
2431 displayer.show(ctx)
2312 displayer.flush(ctx)
2432 displayer.flush(ctx)
2313
2433
2314 timer, fm = gettimer(ui, opts)
2434 timer, fm = gettimer(ui, opts)
2315 timer(format)
2435 timer(format)
2316 fm.end()
2436 fm.end()
2317
2437
2318
2438
2319 def _displaystats(ui, opts, entries, data):
2439 def _displaystats(ui, opts, entries, data):
2320 # use a second formatter because the data are quite different, not sure
2440 # use a second formatter because the data are quite different, not sure
2321 # how it flies with the templater.
2441 # how it flies with the templater.
2322 fm = ui.formatter(b'perf-stats', opts)
2442 fm = ui.formatter(b'perf-stats', opts)
2323 for key, title in entries:
2443 for key, title in entries:
2324 values = data[key]
2444 values = data[key]
2325 nbvalues = len(data)
2445 nbvalues = len(data)
2326 values.sort()
2446 values.sort()
2327 stats = {
2447 stats = {
2328 'key': key,
2448 'key': key,
2329 'title': title,
2449 'title': title,
2330 'nbitems': len(values),
2450 'nbitems': len(values),
2331 'min': values[0][0],
2451 'min': values[0][0],
2332 '10%': values[(nbvalues * 10) // 100][0],
2452 '10%': values[(nbvalues * 10) // 100][0],
2333 '25%': values[(nbvalues * 25) // 100][0],
2453 '25%': values[(nbvalues * 25) // 100][0],
2334 '50%': values[(nbvalues * 50) // 100][0],
2454 '50%': values[(nbvalues * 50) // 100][0],
2335 '75%': values[(nbvalues * 75) // 100][0],
2455 '75%': values[(nbvalues * 75) // 100][0],
2336 '80%': values[(nbvalues * 80) // 100][0],
2456 '80%': values[(nbvalues * 80) // 100][0],
2337 '85%': values[(nbvalues * 85) // 100][0],
2457 '85%': values[(nbvalues * 85) // 100][0],
2338 '90%': values[(nbvalues * 90) // 100][0],
2458 '90%': values[(nbvalues * 90) // 100][0],
2339 '95%': values[(nbvalues * 95) // 100][0],
2459 '95%': values[(nbvalues * 95) // 100][0],
2340 '99%': values[(nbvalues * 99) // 100][0],
2460 '99%': values[(nbvalues * 99) // 100][0],
2341 'max': values[-1][0],
2461 'max': values[-1][0],
2342 }
2462 }
2343 fm.startitem()
2463 fm.startitem()
2344 fm.data(**stats)
2464 fm.data(**stats)
2345 # make node pretty for the human output
2465 # make node pretty for the human output
2346 fm.plain('### %s (%d items)\n' % (title, len(values)))
2466 fm.plain('### %s (%d items)\n' % (title, len(values)))
2347 lines = [
2467 lines = [
2348 'min',
2468 'min',
2349 '10%',
2469 '10%',
2350 '25%',
2470 '25%',
2351 '50%',
2471 '50%',
2352 '75%',
2472 '75%',
2353 '80%',
2473 '80%',
2354 '85%',
2474 '85%',
2355 '90%',
2475 '90%',
2356 '95%',
2476 '95%',
2357 '99%',
2477 '99%',
2358 'max',
2478 'max',
2359 ]
2479 ]
2360 for l in lines:
2480 for l in lines:
2361 fm.plain('%s: %s\n' % (l, stats[l]))
2481 fm.plain('%s: %s\n' % (l, stats[l]))
2362 fm.end()
2482 fm.end()
2363
2483
2364
2484
2365 @command(
2485 @command(
2366 b'perf::helper-mergecopies|perfhelper-mergecopies',
2486 b'perf::helper-mergecopies|perfhelper-mergecopies',
2367 formatteropts
2487 formatteropts
2368 + [
2488 + [
2369 (b'r', b'revs', [], b'restrict search to these revisions'),
2489 (b'r', b'revs', [], b'restrict search to these revisions'),
2370 (b'', b'timing', False, b'provides extra data (costly)'),
2490 (b'', b'timing', False, b'provides extra data (costly)'),
2371 (b'', b'stats', False, b'provides statistic about the measured data'),
2491 (b'', b'stats', False, b'provides statistic about the measured data'),
2372 ],
2492 ],
2373 )
2493 )
2374 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2494 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2375 """find statistics about potential parameters for `perfmergecopies`
2495 """find statistics about potential parameters for `perfmergecopies`
2376
2496
2377 This command find (base, p1, p2) triplet relevant for copytracing
2497 This command find (base, p1, p2) triplet relevant for copytracing
2378 benchmarking in the context of a merge. It reports values for some of the
2498 benchmarking in the context of a merge. It reports values for some of the
2379 parameters that impact merge copy tracing time during merge.
2499 parameters that impact merge copy tracing time during merge.
2380
2500
2381 If `--timing` is set, rename detection is run and the associated timing
2501 If `--timing` is set, rename detection is run and the associated timing
2382 will be reported. The extra details come at the cost of slower command
2502 will be reported. The extra details come at the cost of slower command
2383 execution.
2503 execution.
2384
2504
2385 Since rename detection is only run once, other factors might easily
2505 Since rename detection is only run once, other factors might easily
2386 affect the precision of the timing. However it should give a good
2506 affect the precision of the timing. However it should give a good
2387 approximation of which revision triplets are very costly.
2507 approximation of which revision triplets are very costly.
2388 """
2508 """
2389 opts = _byteskwargs(opts)
2509 opts = _byteskwargs(opts)
2390 fm = ui.formatter(b'perf', opts)
2510 fm = ui.formatter(b'perf', opts)
2391 dotiming = opts[b'timing']
2511 dotiming = opts[b'timing']
2392 dostats = opts[b'stats']
2512 dostats = opts[b'stats']
2393
2513
2394 output_template = [
2514 output_template = [
2395 ("base", "%(base)12s"),
2515 ("base", "%(base)12s"),
2396 ("p1", "%(p1.node)12s"),
2516 ("p1", "%(p1.node)12s"),
2397 ("p2", "%(p2.node)12s"),
2517 ("p2", "%(p2.node)12s"),
2398 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2518 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2399 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2519 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2400 ("p1.renames", "%(p1.renamedfiles)12d"),
2520 ("p1.renames", "%(p1.renamedfiles)12d"),
2401 ("p1.time", "%(p1.time)12.3f"),
2521 ("p1.time", "%(p1.time)12.3f"),
2402 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2522 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2403 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2523 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2404 ("p2.renames", "%(p2.renamedfiles)12d"),
2524 ("p2.renames", "%(p2.renamedfiles)12d"),
2405 ("p2.time", "%(p2.time)12.3f"),
2525 ("p2.time", "%(p2.time)12.3f"),
2406 ("renames", "%(nbrenamedfiles)12d"),
2526 ("renames", "%(nbrenamedfiles)12d"),
2407 ("total.time", "%(time)12.3f"),
2527 ("total.time", "%(time)12.3f"),
2408 ]
2528 ]
2409 if not dotiming:
2529 if not dotiming:
2410 output_template = [
2530 output_template = [
2411 i
2531 i
2412 for i in output_template
2532 for i in output_template
2413 if not ('time' in i[0] or 'renames' in i[0])
2533 if not ('time' in i[0] or 'renames' in i[0])
2414 ]
2534 ]
2415 header_names = [h for (h, v) in output_template]
2535 header_names = [h for (h, v) in output_template]
2416 output = ' '.join([v for (h, v) in output_template]) + '\n'
2536 output = ' '.join([v for (h, v) in output_template]) + '\n'
2417 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2537 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2418 fm.plain(header % tuple(header_names))
2538 fm.plain(header % tuple(header_names))
2419
2539
2420 if not revs:
2540 if not revs:
2421 revs = ['all()']
2541 revs = ['all()']
2422 revs = scmutil.revrange(repo, revs)
2542 revs = scmutil.revrange(repo, revs)
2423
2543
2424 if dostats:
2544 if dostats:
2425 alldata = {
2545 alldata = {
2426 'nbrevs': [],
2546 'nbrevs': [],
2427 'nbmissingfiles': [],
2547 'nbmissingfiles': [],
2428 }
2548 }
2429 if dotiming:
2549 if dotiming:
2430 alldata['parentnbrenames'] = []
2550 alldata['parentnbrenames'] = []
2431 alldata['totalnbrenames'] = []
2551 alldata['totalnbrenames'] = []
2432 alldata['parenttime'] = []
2552 alldata['parenttime'] = []
2433 alldata['totaltime'] = []
2553 alldata['totaltime'] = []
2434
2554
2435 roi = repo.revs('merge() and %ld', revs)
2555 roi = repo.revs('merge() and %ld', revs)
2436 for r in roi:
2556 for r in roi:
2437 ctx = repo[r]
2557 ctx = repo[r]
2438 p1 = ctx.p1()
2558 p1 = ctx.p1()
2439 p2 = ctx.p2()
2559 p2 = ctx.p2()
2440 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2560 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2441 for b in bases:
2561 for b in bases:
2442 b = repo[b]
2562 b = repo[b]
2443 p1missing = copies._computeforwardmissing(b, p1)
2563 p1missing = copies._computeforwardmissing(b, p1)
2444 p2missing = copies._computeforwardmissing(b, p2)
2564 p2missing = copies._computeforwardmissing(b, p2)
2445 data = {
2565 data = {
2446 b'base': b.hex(),
2566 b'base': b.hex(),
2447 b'p1.node': p1.hex(),
2567 b'p1.node': p1.hex(),
2448 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2568 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2449 b'p1.nbmissingfiles': len(p1missing),
2569 b'p1.nbmissingfiles': len(p1missing),
2450 b'p2.node': p2.hex(),
2570 b'p2.node': p2.hex(),
2451 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2571 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2452 b'p2.nbmissingfiles': len(p2missing),
2572 b'p2.nbmissingfiles': len(p2missing),
2453 }
2573 }
2454 if dostats:
2574 if dostats:
2455 if p1missing:
2575 if p1missing:
2456 alldata['nbrevs'].append(
2576 alldata['nbrevs'].append(
2457 (data['p1.nbrevs'], b.hex(), p1.hex())
2577 (data['p1.nbrevs'], b.hex(), p1.hex())
2458 )
2578 )
2459 alldata['nbmissingfiles'].append(
2579 alldata['nbmissingfiles'].append(
2460 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2580 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2461 )
2581 )
2462 if p2missing:
2582 if p2missing:
2463 alldata['nbrevs'].append(
2583 alldata['nbrevs'].append(
2464 (data['p2.nbrevs'], b.hex(), p2.hex())
2584 (data['p2.nbrevs'], b.hex(), p2.hex())
2465 )
2585 )
2466 alldata['nbmissingfiles'].append(
2586 alldata['nbmissingfiles'].append(
2467 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2587 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2468 )
2588 )
2469 if dotiming:
2589 if dotiming:
2470 begin = util.timer()
2590 begin = util.timer()
2471 mergedata = copies.mergecopies(repo, p1, p2, b)
2591 mergedata = copies.mergecopies(repo, p1, p2, b)
2472 end = util.timer()
2592 end = util.timer()
2473 # not very stable timing since we did only one run
2593 # not very stable timing since we did only one run
2474 data['time'] = end - begin
2594 data['time'] = end - begin
2475 # mergedata contains five dicts: "copy", "movewithdir",
2595 # mergedata contains five dicts: "copy", "movewithdir",
2476 # "diverge", "renamedelete" and "dirmove".
2596 # "diverge", "renamedelete" and "dirmove".
2477 # The first 4 are about renamed file so lets count that.
2597 # The first 4 are about renamed file so lets count that.
2478 renames = len(mergedata[0])
2598 renames = len(mergedata[0])
2479 renames += len(mergedata[1])
2599 renames += len(mergedata[1])
2480 renames += len(mergedata[2])
2600 renames += len(mergedata[2])
2481 renames += len(mergedata[3])
2601 renames += len(mergedata[3])
2482 data['nbrenamedfiles'] = renames
2602 data['nbrenamedfiles'] = renames
2483 begin = util.timer()
2603 begin = util.timer()
2484 p1renames = copies.pathcopies(b, p1)
2604 p1renames = copies.pathcopies(b, p1)
2485 end = util.timer()
2605 end = util.timer()
2486 data['p1.time'] = end - begin
2606 data['p1.time'] = end - begin
2487 begin = util.timer()
2607 begin = util.timer()
2488 p2renames = copies.pathcopies(b, p2)
2608 p2renames = copies.pathcopies(b, p2)
2489 end = util.timer()
2609 end = util.timer()
2490 data['p2.time'] = end - begin
2610 data['p2.time'] = end - begin
2491 data['p1.renamedfiles'] = len(p1renames)
2611 data['p1.renamedfiles'] = len(p1renames)
2492 data['p2.renamedfiles'] = len(p2renames)
2612 data['p2.renamedfiles'] = len(p2renames)
2493
2613
2494 if dostats:
2614 if dostats:
2495 if p1missing:
2615 if p1missing:
2496 alldata['parentnbrenames'].append(
2616 alldata['parentnbrenames'].append(
2497 (data['p1.renamedfiles'], b.hex(), p1.hex())
2617 (data['p1.renamedfiles'], b.hex(), p1.hex())
2498 )
2618 )
2499 alldata['parenttime'].append(
2619 alldata['parenttime'].append(
2500 (data['p1.time'], b.hex(), p1.hex())
2620 (data['p1.time'], b.hex(), p1.hex())
2501 )
2621 )
2502 if p2missing:
2622 if p2missing:
2503 alldata['parentnbrenames'].append(
2623 alldata['parentnbrenames'].append(
2504 (data['p2.renamedfiles'], b.hex(), p2.hex())
2624 (data['p2.renamedfiles'], b.hex(), p2.hex())
2505 )
2625 )
2506 alldata['parenttime'].append(
2626 alldata['parenttime'].append(
2507 (data['p2.time'], b.hex(), p2.hex())
2627 (data['p2.time'], b.hex(), p2.hex())
2508 )
2628 )
2509 if p1missing or p2missing:
2629 if p1missing or p2missing:
2510 alldata['totalnbrenames'].append(
2630 alldata['totalnbrenames'].append(
2511 (
2631 (
2512 data['nbrenamedfiles'],
2632 data['nbrenamedfiles'],
2513 b.hex(),
2633 b.hex(),
2514 p1.hex(),
2634 p1.hex(),
2515 p2.hex(),
2635 p2.hex(),
2516 )
2636 )
2517 )
2637 )
2518 alldata['totaltime'].append(
2638 alldata['totaltime'].append(
2519 (data['time'], b.hex(), p1.hex(), p2.hex())
2639 (data['time'], b.hex(), p1.hex(), p2.hex())
2520 )
2640 )
2521 fm.startitem()
2641 fm.startitem()
2522 fm.data(**data)
2642 fm.data(**data)
2523 # make node pretty for the human output
2643 # make node pretty for the human output
2524 out = data.copy()
2644 out = data.copy()
2525 out['base'] = fm.hexfunc(b.node())
2645 out['base'] = fm.hexfunc(b.node())
2526 out['p1.node'] = fm.hexfunc(p1.node())
2646 out['p1.node'] = fm.hexfunc(p1.node())
2527 out['p2.node'] = fm.hexfunc(p2.node())
2647 out['p2.node'] = fm.hexfunc(p2.node())
2528 fm.plain(output % out)
2648 fm.plain(output % out)
2529
2649
2530 fm.end()
2650 fm.end()
2531 if dostats:
2651 if dostats:
2532 # use a second formatter because the data are quite different, not sure
2652 # use a second formatter because the data are quite different, not sure
2533 # how it flies with the templater.
2653 # how it flies with the templater.
2534 entries = [
2654 entries = [
2535 ('nbrevs', 'number of revision covered'),
2655 ('nbrevs', 'number of revision covered'),
2536 ('nbmissingfiles', 'number of missing files at head'),
2656 ('nbmissingfiles', 'number of missing files at head'),
2537 ]
2657 ]
2538 if dotiming:
2658 if dotiming:
2539 entries.append(
2659 entries.append(
2540 ('parentnbrenames', 'rename from one parent to base')
2660 ('parentnbrenames', 'rename from one parent to base')
2541 )
2661 )
2542 entries.append(('totalnbrenames', 'total number of renames'))
2662 entries.append(('totalnbrenames', 'total number of renames'))
2543 entries.append(('parenttime', 'time for one parent'))
2663 entries.append(('parenttime', 'time for one parent'))
2544 entries.append(('totaltime', 'time for both parents'))
2664 entries.append(('totaltime', 'time for both parents'))
2545 _displaystats(ui, opts, entries, alldata)
2665 _displaystats(ui, opts, entries, alldata)
2546
2666
2547
2667
2548 @command(
2668 @command(
2549 b'perf::helper-pathcopies|perfhelper-pathcopies',
2669 b'perf::helper-pathcopies|perfhelper-pathcopies',
2550 formatteropts
2670 formatteropts
2551 + [
2671 + [
2552 (b'r', b'revs', [], b'restrict search to these revisions'),
2672 (b'r', b'revs', [], b'restrict search to these revisions'),
2553 (b'', b'timing', False, b'provides extra data (costly)'),
2673 (b'', b'timing', False, b'provides extra data (costly)'),
2554 (b'', b'stats', False, b'provides statistic about the measured data'),
2674 (b'', b'stats', False, b'provides statistic about the measured data'),
2555 ],
2675 ],
2556 )
2676 )
2557 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2677 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2558 """find statistic about potential parameters for the `perftracecopies`
2678 """find statistic about potential parameters for the `perftracecopies`
2559
2679
2560 This command find source-destination pair relevant for copytracing testing.
2680 This command find source-destination pair relevant for copytracing testing.
2561 It report value for some of the parameters that impact copy tracing time.
2681 It report value for some of the parameters that impact copy tracing time.
2562
2682
2563 If `--timing` is set, rename detection is run and the associated timing
2683 If `--timing` is set, rename detection is run and the associated timing
2564 will be reported. The extra details comes at the cost of a slower command
2684 will be reported. The extra details comes at the cost of a slower command
2565 execution.
2685 execution.
2566
2686
2567 Since the rename detection is only run once, other factors might easily
2687 Since the rename detection is only run once, other factors might easily
2568 affect the precision of the timing. However it should give a good
2688 affect the precision of the timing. However it should give a good
2569 approximation of which revision pairs are very costly.
2689 approximation of which revision pairs are very costly.
2570 """
2690 """
2571 opts = _byteskwargs(opts)
2691 opts = _byteskwargs(opts)
2572 fm = ui.formatter(b'perf', opts)
2692 fm = ui.formatter(b'perf', opts)
2573 dotiming = opts[b'timing']
2693 dotiming = opts[b'timing']
2574 dostats = opts[b'stats']
2694 dostats = opts[b'stats']
2575
2695
2576 if dotiming:
2696 if dotiming:
2577 header = '%12s %12s %12s %12s %12s %12s\n'
2697 header = '%12s %12s %12s %12s %12s %12s\n'
2578 output = (
2698 output = (
2579 "%(source)12s %(destination)12s "
2699 "%(source)12s %(destination)12s "
2580 "%(nbrevs)12d %(nbmissingfiles)12d "
2700 "%(nbrevs)12d %(nbmissingfiles)12d "
2581 "%(nbrenamedfiles)12d %(time)18.5f\n"
2701 "%(nbrenamedfiles)12d %(time)18.5f\n"
2582 )
2702 )
2583 header_names = (
2703 header_names = (
2584 "source",
2704 "source",
2585 "destination",
2705 "destination",
2586 "nb-revs",
2706 "nb-revs",
2587 "nb-files",
2707 "nb-files",
2588 "nb-renames",
2708 "nb-renames",
2589 "time",
2709 "time",
2590 )
2710 )
2591 fm.plain(header % header_names)
2711 fm.plain(header % header_names)
2592 else:
2712 else:
2593 header = '%12s %12s %12s %12s\n'
2713 header = '%12s %12s %12s %12s\n'
2594 output = (
2714 output = (
2595 "%(source)12s %(destination)12s "
2715 "%(source)12s %(destination)12s "
2596 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2716 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2597 )
2717 )
2598 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2718 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2599
2719
2600 if not revs:
2720 if not revs:
2601 revs = ['all()']
2721 revs = ['all()']
2602 revs = scmutil.revrange(repo, revs)
2722 revs = scmutil.revrange(repo, revs)
2603
2723
2604 if dostats:
2724 if dostats:
2605 alldata = {
2725 alldata = {
2606 'nbrevs': [],
2726 'nbrevs': [],
2607 'nbmissingfiles': [],
2727 'nbmissingfiles': [],
2608 }
2728 }
2609 if dotiming:
2729 if dotiming:
2610 alldata['nbrenames'] = []
2730 alldata['nbrenames'] = []
2611 alldata['time'] = []
2731 alldata['time'] = []
2612
2732
2613 roi = repo.revs('merge() and %ld', revs)
2733 roi = repo.revs('merge() and %ld', revs)
2614 for r in roi:
2734 for r in roi:
2615 ctx = repo[r]
2735 ctx = repo[r]
2616 p1 = ctx.p1().rev()
2736 p1 = ctx.p1().rev()
2617 p2 = ctx.p2().rev()
2737 p2 = ctx.p2().rev()
2618 bases = repo.changelog._commonancestorsheads(p1, p2)
2738 bases = repo.changelog._commonancestorsheads(p1, p2)
2619 for p in (p1, p2):
2739 for p in (p1, p2):
2620 for b in bases:
2740 for b in bases:
2621 base = repo[b]
2741 base = repo[b]
2622 parent = repo[p]
2742 parent = repo[p]
2623 missing = copies._computeforwardmissing(base, parent)
2743 missing = copies._computeforwardmissing(base, parent)
2624 if not missing:
2744 if not missing:
2625 continue
2745 continue
2626 data = {
2746 data = {
2627 b'source': base.hex(),
2747 b'source': base.hex(),
2628 b'destination': parent.hex(),
2748 b'destination': parent.hex(),
2629 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2749 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2630 b'nbmissingfiles': len(missing),
2750 b'nbmissingfiles': len(missing),
2631 }
2751 }
2632 if dostats:
2752 if dostats:
2633 alldata['nbrevs'].append(
2753 alldata['nbrevs'].append(
2634 (
2754 (
2635 data['nbrevs'],
2755 data['nbrevs'],
2636 base.hex(),
2756 base.hex(),
2637 parent.hex(),
2757 parent.hex(),
2638 )
2758 )
2639 )
2759 )
2640 alldata['nbmissingfiles'].append(
2760 alldata['nbmissingfiles'].append(
2641 (
2761 (
2642 data['nbmissingfiles'],
2762 data['nbmissingfiles'],
2643 base.hex(),
2763 base.hex(),
2644 parent.hex(),
2764 parent.hex(),
2645 )
2765 )
2646 )
2766 )
2647 if dotiming:
2767 if dotiming:
2648 begin = util.timer()
2768 begin = util.timer()
2649 renames = copies.pathcopies(base, parent)
2769 renames = copies.pathcopies(base, parent)
2650 end = util.timer()
2770 end = util.timer()
2651 # not very stable timing since we did only one run
2771 # not very stable timing since we did only one run
2652 data['time'] = end - begin
2772 data['time'] = end - begin
2653 data['nbrenamedfiles'] = len(renames)
2773 data['nbrenamedfiles'] = len(renames)
2654 if dostats:
2774 if dostats:
2655 alldata['time'].append(
2775 alldata['time'].append(
2656 (
2776 (
2657 data['time'],
2777 data['time'],
2658 base.hex(),
2778 base.hex(),
2659 parent.hex(),
2779 parent.hex(),
2660 )
2780 )
2661 )
2781 )
2662 alldata['nbrenames'].append(
2782 alldata['nbrenames'].append(
2663 (
2783 (
2664 data['nbrenamedfiles'],
2784 data['nbrenamedfiles'],
2665 base.hex(),
2785 base.hex(),
2666 parent.hex(),
2786 parent.hex(),
2667 )
2787 )
2668 )
2788 )
2669 fm.startitem()
2789 fm.startitem()
2670 fm.data(**data)
2790 fm.data(**data)
2671 out = data.copy()
2791 out = data.copy()
2672 out['source'] = fm.hexfunc(base.node())
2792 out['source'] = fm.hexfunc(base.node())
2673 out['destination'] = fm.hexfunc(parent.node())
2793 out['destination'] = fm.hexfunc(parent.node())
2674 fm.plain(output % out)
2794 fm.plain(output % out)
2675
2795
2676 fm.end()
2796 fm.end()
2677 if dostats:
2797 if dostats:
2678 entries = [
2798 entries = [
2679 ('nbrevs', 'number of revision covered'),
2799 ('nbrevs', 'number of revision covered'),
2680 ('nbmissingfiles', 'number of missing files at head'),
2800 ('nbmissingfiles', 'number of missing files at head'),
2681 ]
2801 ]
2682 if dotiming:
2802 if dotiming:
2683 entries.append(('nbrenames', 'renamed files'))
2803 entries.append(('nbrenames', 'renamed files'))
2684 entries.append(('time', 'time'))
2804 entries.append(('time', 'time'))
2685 _displaystats(ui, opts, entries, alldata)
2805 _displaystats(ui, opts, entries, alldata)
2686
2806
2687
2807
2688 @command(b'perf::cca|perfcca', formatteropts)
2808 @command(b'perf::cca|perfcca', formatteropts)
2689 def perfcca(ui, repo, **opts):
2809 def perfcca(ui, repo, **opts):
2690 opts = _byteskwargs(opts)
2810 opts = _byteskwargs(opts)
2691 timer, fm = gettimer(ui, opts)
2811 timer, fm = gettimer(ui, opts)
2692 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2812 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2693 fm.end()
2813 fm.end()
2694
2814
2695
2815
2696 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2816 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2697 def perffncacheload(ui, repo, **opts):
2817 def perffncacheload(ui, repo, **opts):
2698 opts = _byteskwargs(opts)
2818 opts = _byteskwargs(opts)
2699 timer, fm = gettimer(ui, opts)
2819 timer, fm = gettimer(ui, opts)
2700 s = repo.store
2820 s = repo.store
2701
2821
2702 def d():
2822 def d():
2703 s.fncache._load()
2823 s.fncache._load()
2704
2824
2705 timer(d)
2825 timer(d)
2706 fm.end()
2826 fm.end()
2707
2827
2708
2828
2709 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2829 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2710 def perffncachewrite(ui, repo, **opts):
2830 def perffncachewrite(ui, repo, **opts):
2711 opts = _byteskwargs(opts)
2831 opts = _byteskwargs(opts)
2712 timer, fm = gettimer(ui, opts)
2832 timer, fm = gettimer(ui, opts)
2713 s = repo.store
2833 s = repo.store
2714 lock = repo.lock()
2834 lock = repo.lock()
2715 s.fncache._load()
2835 s.fncache._load()
2716 tr = repo.transaction(b'perffncachewrite')
2836 tr = repo.transaction(b'perffncachewrite')
2717 tr.addbackup(b'fncache')
2837 tr.addbackup(b'fncache')
2718
2838
2719 def d():
2839 def d():
2720 s.fncache._dirty = True
2840 s.fncache._dirty = True
2721 s.fncache.write(tr)
2841 s.fncache.write(tr)
2722
2842
2723 timer(d)
2843 timer(d)
2724 tr.close()
2844 tr.close()
2725 lock.release()
2845 lock.release()
2726 fm.end()
2846 fm.end()
2727
2847
2728
2848
2729 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2849 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2730 def perffncacheencode(ui, repo, **opts):
2850 def perffncacheencode(ui, repo, **opts):
2731 opts = _byteskwargs(opts)
2851 opts = _byteskwargs(opts)
2732 timer, fm = gettimer(ui, opts)
2852 timer, fm = gettimer(ui, opts)
2733 s = repo.store
2853 s = repo.store
2734 s.fncache._load()
2854 s.fncache._load()
2735
2855
2736 def d():
2856 def d():
2737 for p in s.fncache.entries:
2857 for p in s.fncache.entries:
2738 s.encode(p)
2858 s.encode(p)
2739
2859
2740 timer(d)
2860 timer(d)
2741 fm.end()
2861 fm.end()
2742
2862
2743
2863
2744 def _bdiffworker(q, blocks, xdiff, ready, done):
2864 def _bdiffworker(q, blocks, xdiff, ready, done):
2745 while not done.is_set():
2865 while not done.is_set():
2746 pair = q.get()
2866 pair = q.get()
2747 while pair is not None:
2867 while pair is not None:
2748 if xdiff:
2868 if xdiff:
2749 mdiff.bdiff.xdiffblocks(*pair)
2869 mdiff.bdiff.xdiffblocks(*pair)
2750 elif blocks:
2870 elif blocks:
2751 mdiff.bdiff.blocks(*pair)
2871 mdiff.bdiff.blocks(*pair)
2752 else:
2872 else:
2753 mdiff.textdiff(*pair)
2873 mdiff.textdiff(*pair)
2754 q.task_done()
2874 q.task_done()
2755 pair = q.get()
2875 pair = q.get()
2756 q.task_done() # for the None one
2876 q.task_done() # for the None one
2757 with ready:
2877 with ready:
2758 ready.wait()
2878 ready.wait()
2759
2879
2760
2880
2761 def _manifestrevision(repo, mnode):
2881 def _manifestrevision(repo, mnode):
2762 ml = repo.manifestlog
2882 ml = repo.manifestlog
2763
2883
2764 if util.safehasattr(ml, b'getstorage'):
2884 if util.safehasattr(ml, b'getstorage'):
2765 store = ml.getstorage(b'')
2885 store = ml.getstorage(b'')
2766 else:
2886 else:
2767 store = ml._revlog
2887 store = ml._revlog
2768
2888
2769 return store.revision(mnode)
2889 return store.revision(mnode)
2770
2890
2771
2891
2772 @command(
2892 @command(
2773 b'perf::bdiff|perfbdiff',
2893 b'perf::bdiff|perfbdiff',
2774 revlogopts
2894 revlogopts
2775 + formatteropts
2895 + formatteropts
2776 + [
2896 + [
2777 (
2897 (
2778 b'',
2898 b'',
2779 b'count',
2899 b'count',
2780 1,
2900 1,
2781 b'number of revisions to test (when using --startrev)',
2901 b'number of revisions to test (when using --startrev)',
2782 ),
2902 ),
2783 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2903 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2784 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2904 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2785 (b'', b'blocks', False, b'test computing diffs into blocks'),
2905 (b'', b'blocks', False, b'test computing diffs into blocks'),
2786 (b'', b'xdiff', False, b'use xdiff algorithm'),
2906 (b'', b'xdiff', False, b'use xdiff algorithm'),
2787 ],
2907 ],
2788 b'-c|-m|FILE REV',
2908 b'-c|-m|FILE REV',
2789 )
2909 )
2790 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2910 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2791 """benchmark a bdiff between revisions
2911 """benchmark a bdiff between revisions
2792
2912
2793 By default, benchmark a bdiff between its delta parent and itself.
2913 By default, benchmark a bdiff between its delta parent and itself.
2794
2914
2795 With ``--count``, benchmark bdiffs between delta parents and self for N
2915 With ``--count``, benchmark bdiffs between delta parents and self for N
2796 revisions starting at the specified revision.
2916 revisions starting at the specified revision.
2797
2917
2798 With ``--alldata``, assume the requested revision is a changeset and
2918 With ``--alldata``, assume the requested revision is a changeset and
2799 measure bdiffs for all changes related to that changeset (manifest
2919 measure bdiffs for all changes related to that changeset (manifest
2800 and filelogs).
2920 and filelogs).
2801 """
2921 """
2802 opts = _byteskwargs(opts)
2922 opts = _byteskwargs(opts)
2803
2923
2804 if opts[b'xdiff'] and not opts[b'blocks']:
2924 if opts[b'xdiff'] and not opts[b'blocks']:
2805 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2925 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2806
2926
2807 if opts[b'alldata']:
2927 if opts[b'alldata']:
2808 opts[b'changelog'] = True
2928 opts[b'changelog'] = True
2809
2929
2810 if opts.get(b'changelog') or opts.get(b'manifest'):
2930 if opts.get(b'changelog') or opts.get(b'manifest'):
2811 file_, rev = None, file_
2931 file_, rev = None, file_
2812 elif rev is None:
2932 elif rev is None:
2813 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2933 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2814
2934
2815 blocks = opts[b'blocks']
2935 blocks = opts[b'blocks']
2816 xdiff = opts[b'xdiff']
2936 xdiff = opts[b'xdiff']
2817 textpairs = []
2937 textpairs = []
2818
2938
2819 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2939 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2820
2940
2821 startrev = r.rev(r.lookup(rev))
2941 startrev = r.rev(r.lookup(rev))
2822 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2942 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2823 if opts[b'alldata']:
2943 if opts[b'alldata']:
2824 # Load revisions associated with changeset.
2944 # Load revisions associated with changeset.
2825 ctx = repo[rev]
2945 ctx = repo[rev]
2826 mtext = _manifestrevision(repo, ctx.manifestnode())
2946 mtext = _manifestrevision(repo, ctx.manifestnode())
2827 for pctx in ctx.parents():
2947 for pctx in ctx.parents():
2828 pman = _manifestrevision(repo, pctx.manifestnode())
2948 pman = _manifestrevision(repo, pctx.manifestnode())
2829 textpairs.append((pman, mtext))
2949 textpairs.append((pman, mtext))
2830
2950
2831 # Load filelog revisions by iterating manifest delta.
2951 # Load filelog revisions by iterating manifest delta.
2832 man = ctx.manifest()
2952 man = ctx.manifest()
2833 pman = ctx.p1().manifest()
2953 pman = ctx.p1().manifest()
2834 for filename, change in pman.diff(man).items():
2954 for filename, change in pman.diff(man).items():
2835 fctx = repo.file(filename)
2955 fctx = repo.file(filename)
2836 f1 = fctx.revision(change[0][0] or -1)
2956 f1 = fctx.revision(change[0][0] or -1)
2837 f2 = fctx.revision(change[1][0] or -1)
2957 f2 = fctx.revision(change[1][0] or -1)
2838 textpairs.append((f1, f2))
2958 textpairs.append((f1, f2))
2839 else:
2959 else:
2840 dp = r.deltaparent(rev)
2960 dp = r.deltaparent(rev)
2841 textpairs.append((r.revision(dp), r.revision(rev)))
2961 textpairs.append((r.revision(dp), r.revision(rev)))
2842
2962
2843 withthreads = threads > 0
2963 withthreads = threads > 0
2844 if not withthreads:
2964 if not withthreads:
2845
2965
2846 def d():
2966 def d():
2847 for pair in textpairs:
2967 for pair in textpairs:
2848 if xdiff:
2968 if xdiff:
2849 mdiff.bdiff.xdiffblocks(*pair)
2969 mdiff.bdiff.xdiffblocks(*pair)
2850 elif blocks:
2970 elif blocks:
2851 mdiff.bdiff.blocks(*pair)
2971 mdiff.bdiff.blocks(*pair)
2852 else:
2972 else:
2853 mdiff.textdiff(*pair)
2973 mdiff.textdiff(*pair)
2854
2974
2855 else:
2975 else:
2856 q = queue()
2976 q = queue()
2857 for i in _xrange(threads):
2977 for i in _xrange(threads):
2858 q.put(None)
2978 q.put(None)
2859 ready = threading.Condition()
2979 ready = threading.Condition()
2860 done = threading.Event()
2980 done = threading.Event()
2861 for i in _xrange(threads):
2981 for i in _xrange(threads):
2862 threading.Thread(
2982 threading.Thread(
2863 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2983 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2864 ).start()
2984 ).start()
2865 q.join()
2985 q.join()
2866
2986
2867 def d():
2987 def d():
2868 for pair in textpairs:
2988 for pair in textpairs:
2869 q.put(pair)
2989 q.put(pair)
2870 for i in _xrange(threads):
2990 for i in _xrange(threads):
2871 q.put(None)
2991 q.put(None)
2872 with ready:
2992 with ready:
2873 ready.notify_all()
2993 ready.notify_all()
2874 q.join()
2994 q.join()
2875
2995
2876 timer, fm = gettimer(ui, opts)
2996 timer, fm = gettimer(ui, opts)
2877 timer(d)
2997 timer(d)
2878 fm.end()
2998 fm.end()
2879
2999
2880 if withthreads:
3000 if withthreads:
2881 done.set()
3001 done.set()
2882 for i in _xrange(threads):
3002 for i in _xrange(threads):
2883 q.put(None)
3003 q.put(None)
2884 with ready:
3004 with ready:
2885 ready.notify_all()
3005 ready.notify_all()
2886
3006
2887
3007
2888 @command(
3008 @command(
2889 b'perf::unbundle',
3009 b'perf::unbundle',
2890 formatteropts,
3010 formatteropts,
2891 b'BUNDLE_FILE',
3011 b'BUNDLE_FILE',
2892 )
3012 )
2893 def perf_unbundle(ui, repo, fname, **opts):
3013 def perf_unbundle(ui, repo, fname, **opts):
2894 """benchmark application of a bundle in a repository.
3014 """benchmark application of a bundle in a repository.
2895
3015
2896 This does not include the final transaction processing"""
3016 This does not include the final transaction processing"""
2897
3017
2898 from mercurial import exchange
3018 from mercurial import exchange
2899 from mercurial import bundle2
3019 from mercurial import bundle2
2900 from mercurial import transaction
3020 from mercurial import transaction
2901
3021
2902 opts = _byteskwargs(opts)
3022 opts = _byteskwargs(opts)
2903
3023
2904 ### some compatibility hotfix
3024 ### some compatibility hotfix
2905 #
3025 #
2906 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
3026 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
2907 # critical regression that break transaction rollback for files that are
3027 # critical regression that break transaction rollback for files that are
2908 # de-inlined.
3028 # de-inlined.
2909 method = transaction.transaction._addentry
3029 method = transaction.transaction._addentry
2910 pre_63edc384d3b7 = "data" in getargspec(method).args
3030 pre_63edc384d3b7 = "data" in getargspec(method).args
2911 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
3031 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
2912 # a changeset that is a close descendant of 18415fc918a1, the changeset
3032 # a changeset that is a close descendant of 18415fc918a1, the changeset
2913 # that conclude the fix run for the bug introduced in 63edc384d3b7.
3033 # that conclude the fix run for the bug introduced in 63edc384d3b7.
2914 args = getargspec(error.Abort.__init__).args
3034 args = getargspec(error.Abort.__init__).args
2915 post_18415fc918a1 = "detailed_exit_code" in args
3035 post_18415fc918a1 = "detailed_exit_code" in args
2916
3036
2917 old_max_inline = None
3037 old_max_inline = None
2918 try:
3038 try:
2919 if not (pre_63edc384d3b7 or post_18415fc918a1):
3039 if not (pre_63edc384d3b7 or post_18415fc918a1):
2920 # disable inlining
3040 # disable inlining
2921 old_max_inline = mercurial.revlog._maxinline
3041 old_max_inline = mercurial.revlog._maxinline
2922 # large enough to never happen
3042 # large enough to never happen
2923 mercurial.revlog._maxinline = 2 ** 50
3043 mercurial.revlog._maxinline = 2 ** 50
2924
3044
2925 with repo.lock():
3045 with repo.lock():
2926 bundle = [None, None]
3046 bundle = [None, None]
2927 orig_quiet = repo.ui.quiet
3047 orig_quiet = repo.ui.quiet
2928 try:
3048 try:
2929 repo.ui.quiet = True
3049 repo.ui.quiet = True
2930 with open(fname, mode="rb") as f:
3050 with open(fname, mode="rb") as f:
2931
3051
2932 def noop_report(*args, **kwargs):
3052 def noop_report(*args, **kwargs):
2933 pass
3053 pass
2934
3054
2935 def setup():
3055 def setup():
2936 gen, tr = bundle
3056 gen, tr = bundle
2937 if tr is not None:
3057 if tr is not None:
2938 tr.abort()
3058 tr.abort()
2939 bundle[:] = [None, None]
3059 bundle[:] = [None, None]
2940 f.seek(0)
3060 f.seek(0)
2941 bundle[0] = exchange.readbundle(ui, f, fname)
3061 bundle[0] = exchange.readbundle(ui, f, fname)
2942 bundle[1] = repo.transaction(b'perf::unbundle')
3062 bundle[1] = repo.transaction(b'perf::unbundle')
2943 # silence the transaction
3063 # silence the transaction
2944 bundle[1]._report = noop_report
3064 bundle[1]._report = noop_report
2945
3065
2946 def apply():
3066 def apply():
2947 gen, tr = bundle
3067 gen, tr = bundle
2948 bundle2.applybundle(
3068 bundle2.applybundle(
2949 repo,
3069 repo,
2950 gen,
3070 gen,
2951 tr,
3071 tr,
2952 source=b'perf::unbundle',
3072 source=b'perf::unbundle',
2953 url=fname,
3073 url=fname,
2954 )
3074 )
2955
3075
2956 timer, fm = gettimer(ui, opts)
3076 timer, fm = gettimer(ui, opts)
2957 timer(apply, setup=setup)
3077 timer(apply, setup=setup)
2958 fm.end()
3078 fm.end()
2959 finally:
3079 finally:
2960 repo.ui.quiet == orig_quiet
3080 repo.ui.quiet == orig_quiet
2961 gen, tr = bundle
3081 gen, tr = bundle
2962 if tr is not None:
3082 if tr is not None:
2963 tr.abort()
3083 tr.abort()
2964 finally:
3084 finally:
2965 if old_max_inline is not None:
3085 if old_max_inline is not None:
2966 mercurial.revlog._maxinline = old_max_inline
3086 mercurial.revlog._maxinline = old_max_inline
2967
3087
2968
3088
2969 @command(
3089 @command(
2970 b'perf::unidiff|perfunidiff',
3090 b'perf::unidiff|perfunidiff',
2971 revlogopts
3091 revlogopts
2972 + formatteropts
3092 + formatteropts
2973 + [
3093 + [
2974 (
3094 (
2975 b'',
3095 b'',
2976 b'count',
3096 b'count',
2977 1,
3097 1,
2978 b'number of revisions to test (when using --startrev)',
3098 b'number of revisions to test (when using --startrev)',
2979 ),
3099 ),
2980 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
3100 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2981 ],
3101 ],
2982 b'-c|-m|FILE REV',
3102 b'-c|-m|FILE REV',
2983 )
3103 )
2984 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
3104 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2985 """benchmark a unified diff between revisions
3105 """benchmark a unified diff between revisions
2986
3106
2987 This doesn't include any copy tracing - it's just a unified diff
3107 This doesn't include any copy tracing - it's just a unified diff
2988 of the texts.
3108 of the texts.
2989
3109
2990 By default, benchmark a diff between its delta parent and itself.
3110 By default, benchmark a diff between its delta parent and itself.
2991
3111
2992 With ``--count``, benchmark diffs between delta parents and self for N
3112 With ``--count``, benchmark diffs between delta parents and self for N
2993 revisions starting at the specified revision.
3113 revisions starting at the specified revision.
2994
3114
2995 With ``--alldata``, assume the requested revision is a changeset and
3115 With ``--alldata``, assume the requested revision is a changeset and
2996 measure diffs for all changes related to that changeset (manifest
3116 measure diffs for all changes related to that changeset (manifest
2997 and filelogs).
3117 and filelogs).
2998 """
3118 """
2999 opts = _byteskwargs(opts)
3119 opts = _byteskwargs(opts)
3000 if opts[b'alldata']:
3120 if opts[b'alldata']:
3001 opts[b'changelog'] = True
3121 opts[b'changelog'] = True
3002
3122
3003 if opts.get(b'changelog') or opts.get(b'manifest'):
3123 if opts.get(b'changelog') or opts.get(b'manifest'):
3004 file_, rev = None, file_
3124 file_, rev = None, file_
3005 elif rev is None:
3125 elif rev is None:
3006 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3126 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3007
3127
3008 textpairs = []
3128 textpairs = []
3009
3129
3010 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3130 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3011
3131
3012 startrev = r.rev(r.lookup(rev))
3132 startrev = r.rev(r.lookup(rev))
3013 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3133 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3014 if opts[b'alldata']:
3134 if opts[b'alldata']:
3015 # Load revisions associated with changeset.
3135 # Load revisions associated with changeset.
3016 ctx = repo[rev]
3136 ctx = repo[rev]
3017 mtext = _manifestrevision(repo, ctx.manifestnode())
3137 mtext = _manifestrevision(repo, ctx.manifestnode())
3018 for pctx in ctx.parents():
3138 for pctx in ctx.parents():
3019 pman = _manifestrevision(repo, pctx.manifestnode())
3139 pman = _manifestrevision(repo, pctx.manifestnode())
3020 textpairs.append((pman, mtext))
3140 textpairs.append((pman, mtext))
3021
3141
3022 # Load filelog revisions by iterating manifest delta.
3142 # Load filelog revisions by iterating manifest delta.
3023 man = ctx.manifest()
3143 man = ctx.manifest()
3024 pman = ctx.p1().manifest()
3144 pman = ctx.p1().manifest()
3025 for filename, change in pman.diff(man).items():
3145 for filename, change in pman.diff(man).items():
3026 fctx = repo.file(filename)
3146 fctx = repo.file(filename)
3027 f1 = fctx.revision(change[0][0] or -1)
3147 f1 = fctx.revision(change[0][0] or -1)
3028 f2 = fctx.revision(change[1][0] or -1)
3148 f2 = fctx.revision(change[1][0] or -1)
3029 textpairs.append((f1, f2))
3149 textpairs.append((f1, f2))
3030 else:
3150 else:
3031 dp = r.deltaparent(rev)
3151 dp = r.deltaparent(rev)
3032 textpairs.append((r.revision(dp), r.revision(rev)))
3152 textpairs.append((r.revision(dp), r.revision(rev)))
3033
3153
3034 def d():
3154 def d():
3035 for left, right in textpairs:
3155 for left, right in textpairs:
3036 # The date strings don't matter, so we pass empty strings.
3156 # The date strings don't matter, so we pass empty strings.
3037 headerlines, hunks = mdiff.unidiff(
3157 headerlines, hunks = mdiff.unidiff(
3038 left, b'', right, b'', b'left', b'right', binary=False
3158 left, b'', right, b'', b'left', b'right', binary=False
3039 )
3159 )
3040 # consume iterators in roughly the way patch.py does
3160 # consume iterators in roughly the way patch.py does
3041 b'\n'.join(headerlines)
3161 b'\n'.join(headerlines)
3042 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3162 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3043
3163
3044 timer, fm = gettimer(ui, opts)
3164 timer, fm = gettimer(ui, opts)
3045 timer(d)
3165 timer(d)
3046 fm.end()
3166 fm.end()
3047
3167
3048
3168
3049 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3169 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3050 def perfdiffwd(ui, repo, **opts):
3170 def perfdiffwd(ui, repo, **opts):
3051 """Profile diff of working directory changes"""
3171 """Profile diff of working directory changes"""
3052 opts = _byteskwargs(opts)
3172 opts = _byteskwargs(opts)
3053 timer, fm = gettimer(ui, opts)
3173 timer, fm = gettimer(ui, opts)
3054 options = {
3174 options = {
3055 'w': 'ignore_all_space',
3175 'w': 'ignore_all_space',
3056 'b': 'ignore_space_change',
3176 'b': 'ignore_space_change',
3057 'B': 'ignore_blank_lines',
3177 'B': 'ignore_blank_lines',
3058 }
3178 }
3059
3179
3060 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3180 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3061 opts = {options[c]: b'1' for c in diffopt}
3181 opts = {options[c]: b'1' for c in diffopt}
3062
3182
3063 def d():
3183 def d():
3064 ui.pushbuffer()
3184 ui.pushbuffer()
3065 commands.diff(ui, repo, **opts)
3185 commands.diff(ui, repo, **opts)
3066 ui.popbuffer()
3186 ui.popbuffer()
3067
3187
3068 diffopt = diffopt.encode('ascii')
3188 diffopt = diffopt.encode('ascii')
3069 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3189 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3070 timer(d, title=title)
3190 timer(d, title=title)
3071 fm.end()
3191 fm.end()
3072
3192
3073
3193
3074 @command(
3194 @command(
3075 b'perf::revlogindex|perfrevlogindex',
3195 b'perf::revlogindex|perfrevlogindex',
3076 revlogopts + formatteropts,
3196 revlogopts + formatteropts,
3077 b'-c|-m|FILE',
3197 b'-c|-m|FILE',
3078 )
3198 )
3079 def perfrevlogindex(ui, repo, file_=None, **opts):
3199 def perfrevlogindex(ui, repo, file_=None, **opts):
3080 """Benchmark operations against a revlog index.
3200 """Benchmark operations against a revlog index.
3081
3201
3082 This tests constructing a revlog instance, reading index data,
3202 This tests constructing a revlog instance, reading index data,
3083 parsing index data, and performing various operations related to
3203 parsing index data, and performing various operations related to
3084 index data.
3204 index data.
3085 """
3205 """
3086
3206
3087 opts = _byteskwargs(opts)
3207 opts = _byteskwargs(opts)
3088
3208
3089 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3209 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3090
3210
3091 opener = getattr(rl, 'opener') # trick linter
3211 opener = getattr(rl, 'opener') # trick linter
3092 # compat with hg <= 5.8
3212 # compat with hg <= 5.8
3093 radix = getattr(rl, 'radix', None)
3213 radix = getattr(rl, 'radix', None)
3094 indexfile = getattr(rl, '_indexfile', None)
3214 indexfile = getattr(rl, '_indexfile', None)
3095 if indexfile is None:
3215 if indexfile is None:
3096 # compatibility with <= hg-5.8
3216 # compatibility with <= hg-5.8
3097 indexfile = getattr(rl, 'indexfile')
3217 indexfile = getattr(rl, 'indexfile')
3098 data = opener.read(indexfile)
3218 data = opener.read(indexfile)
3099
3219
3100 header = struct.unpack(b'>I', data[0:4])[0]
3220 header = struct.unpack(b'>I', data[0:4])[0]
3101 version = header & 0xFFFF
3221 version = header & 0xFFFF
3102 if version == 1:
3222 if version == 1:
3103 inline = header & (1 << 16)
3223 inline = header & (1 << 16)
3104 else:
3224 else:
3105 raise error.Abort(b'unsupported revlog version: %d' % version)
3225 raise error.Abort(b'unsupported revlog version: %d' % version)
3106
3226
3107 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3227 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3108 if parse_index_v1 is None:
3228 if parse_index_v1 is None:
3109 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3229 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3110
3230
3111 rllen = len(rl)
3231 rllen = len(rl)
3112
3232
3113 node0 = rl.node(0)
3233 node0 = rl.node(0)
3114 node25 = rl.node(rllen // 4)
3234 node25 = rl.node(rllen // 4)
3115 node50 = rl.node(rllen // 2)
3235 node50 = rl.node(rllen // 2)
3116 node75 = rl.node(rllen // 4 * 3)
3236 node75 = rl.node(rllen // 4 * 3)
3117 node100 = rl.node(rllen - 1)
3237 node100 = rl.node(rllen - 1)
3118
3238
3119 allrevs = range(rllen)
3239 allrevs = range(rllen)
3120 allrevsrev = list(reversed(allrevs))
3240 allrevsrev = list(reversed(allrevs))
3121 allnodes = [rl.node(rev) for rev in range(rllen)]
3241 allnodes = [rl.node(rev) for rev in range(rllen)]
3122 allnodesrev = list(reversed(allnodes))
3242 allnodesrev = list(reversed(allnodes))
3123
3243
3124 def constructor():
3244 def constructor():
3125 if radix is not None:
3245 if radix is not None:
3126 revlog(opener, radix=radix)
3246 revlog(opener, radix=radix)
3127 else:
3247 else:
3128 # hg <= 5.8
3248 # hg <= 5.8
3129 revlog(opener, indexfile=indexfile)
3249 revlog(opener, indexfile=indexfile)
3130
3250
3131 def read():
3251 def read():
3132 with opener(indexfile) as fh:
3252 with opener(indexfile) as fh:
3133 fh.read()
3253 fh.read()
3134
3254
3135 def parseindex():
3255 def parseindex():
3136 parse_index_v1(data, inline)
3256 parse_index_v1(data, inline)
3137
3257
3138 def getentry(revornode):
3258 def getentry(revornode):
3139 index = parse_index_v1(data, inline)[0]
3259 index = parse_index_v1(data, inline)[0]
3140 index[revornode]
3260 index[revornode]
3141
3261
3142 def getentries(revs, count=1):
3262 def getentries(revs, count=1):
3143 index = parse_index_v1(data, inline)[0]
3263 index = parse_index_v1(data, inline)[0]
3144
3264
3145 for i in range(count):
3265 for i in range(count):
3146 for rev in revs:
3266 for rev in revs:
3147 index[rev]
3267 index[rev]
3148
3268
3149 def resolvenode(node):
3269 def resolvenode(node):
3150 index = parse_index_v1(data, inline)[0]
3270 index = parse_index_v1(data, inline)[0]
3151 rev = getattr(index, 'rev', None)
3271 rev = getattr(index, 'rev', None)
3152 if rev is None:
3272 if rev is None:
3153 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3273 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3154 # This only works for the C code.
3274 # This only works for the C code.
3155 if nodemap is None:
3275 if nodemap is None:
3156 return
3276 return
3157 rev = nodemap.__getitem__
3277 rev = nodemap.__getitem__
3158
3278
3159 try:
3279 try:
3160 rev(node)
3280 rev(node)
3161 except error.RevlogError:
3281 except error.RevlogError:
3162 pass
3282 pass
3163
3283
3164 def resolvenodes(nodes, count=1):
3284 def resolvenodes(nodes, count=1):
3165 index = parse_index_v1(data, inline)[0]
3285 index = parse_index_v1(data, inline)[0]
3166 rev = getattr(index, 'rev', None)
3286 rev = getattr(index, 'rev', None)
3167 if rev is None:
3287 if rev is None:
3168 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3288 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3169 # This only works for the C code.
3289 # This only works for the C code.
3170 if nodemap is None:
3290 if nodemap is None:
3171 return
3291 return
3172 rev = nodemap.__getitem__
3292 rev = nodemap.__getitem__
3173
3293
3174 for i in range(count):
3294 for i in range(count):
3175 for node in nodes:
3295 for node in nodes:
3176 try:
3296 try:
3177 rev(node)
3297 rev(node)
3178 except error.RevlogError:
3298 except error.RevlogError:
3179 pass
3299 pass
3180
3300
3181 benches = [
3301 benches = [
3182 (constructor, b'revlog constructor'),
3302 (constructor, b'revlog constructor'),
3183 (read, b'read'),
3303 (read, b'read'),
3184 (parseindex, b'create index object'),
3304 (parseindex, b'create index object'),
3185 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3305 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3186 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3306 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3187 (lambda: resolvenode(node0), b'look up node at rev 0'),
3307 (lambda: resolvenode(node0), b'look up node at rev 0'),
3188 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3308 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3189 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3309 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3190 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3310 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3191 (lambda: resolvenode(node100), b'look up node at tip'),
3311 (lambda: resolvenode(node100), b'look up node at tip'),
3192 # 2x variation is to measure caching impact.
3312 # 2x variation is to measure caching impact.
3193 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3313 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3194 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3314 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3195 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3315 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3196 (
3316 (
3197 lambda: resolvenodes(allnodesrev, 2),
3317 lambda: resolvenodes(allnodesrev, 2),
3198 b'look up all nodes 2x (reverse)',
3318 b'look up all nodes 2x (reverse)',
3199 ),
3319 ),
3200 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3320 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3201 (
3321 (
3202 lambda: getentries(allrevs, 2),
3322 lambda: getentries(allrevs, 2),
3203 b'retrieve all index entries 2x (forward)',
3323 b'retrieve all index entries 2x (forward)',
3204 ),
3324 ),
3205 (
3325 (
3206 lambda: getentries(allrevsrev),
3326 lambda: getentries(allrevsrev),
3207 b'retrieve all index entries (reverse)',
3327 b'retrieve all index entries (reverse)',
3208 ),
3328 ),
3209 (
3329 (
3210 lambda: getentries(allrevsrev, 2),
3330 lambda: getentries(allrevsrev, 2),
3211 b'retrieve all index entries 2x (reverse)',
3331 b'retrieve all index entries 2x (reverse)',
3212 ),
3332 ),
3213 ]
3333 ]
3214
3334
3215 for fn, title in benches:
3335 for fn, title in benches:
3216 timer, fm = gettimer(ui, opts)
3336 timer, fm = gettimer(ui, opts)
3217 timer(fn, title=title)
3337 timer(fn, title=title)
3218 fm.end()
3338 fm.end()
3219
3339
3220
3340
3221 @command(
3341 @command(
3222 b'perf::revlogrevisions|perfrevlogrevisions',
3342 b'perf::revlogrevisions|perfrevlogrevisions',
3223 revlogopts
3343 revlogopts
3224 + formatteropts
3344 + formatteropts
3225 + [
3345 + [
3226 (b'd', b'dist', 100, b'distance between the revisions'),
3346 (b'd', b'dist', 100, b'distance between the revisions'),
3227 (b's', b'startrev', 0, b'revision to start reading at'),
3347 (b's', b'startrev', 0, b'revision to start reading at'),
3228 (b'', b'reverse', False, b'read in reverse'),
3348 (b'', b'reverse', False, b'read in reverse'),
3229 ],
3349 ],
3230 b'-c|-m|FILE',
3350 b'-c|-m|FILE',
3231 )
3351 )
3232 def perfrevlogrevisions(
3352 def perfrevlogrevisions(
3233 ui, repo, file_=None, startrev=0, reverse=False, **opts
3353 ui, repo, file_=None, startrev=0, reverse=False, **opts
3234 ):
3354 ):
3235 """Benchmark reading a series of revisions from a revlog.
3355 """Benchmark reading a series of revisions from a revlog.
3236
3356
3237 By default, we read every ``-d/--dist`` revision from 0 to tip of
3357 By default, we read every ``-d/--dist`` revision from 0 to tip of
3238 the specified revlog.
3358 the specified revlog.
3239
3359
3240 The start revision can be defined via ``-s/--startrev``.
3360 The start revision can be defined via ``-s/--startrev``.
3241 """
3361 """
3242 opts = _byteskwargs(opts)
3362 opts = _byteskwargs(opts)
3243
3363
3244 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3364 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3245 rllen = getlen(ui)(rl)
3365 rllen = getlen(ui)(rl)
3246
3366
3247 if startrev < 0:
3367 if startrev < 0:
3248 startrev = rllen + startrev
3368 startrev = rllen + startrev
3249
3369
3250 def d():
3370 def d():
3251 rl.clearcaches()
3371 rl.clearcaches()
3252
3372
3253 beginrev = startrev
3373 beginrev = startrev
3254 endrev = rllen
3374 endrev = rllen
3255 dist = opts[b'dist']
3375 dist = opts[b'dist']
3256
3376
3257 if reverse:
3377 if reverse:
3258 beginrev, endrev = endrev - 1, beginrev - 1
3378 beginrev, endrev = endrev - 1, beginrev - 1
3259 dist = -1 * dist
3379 dist = -1 * dist
3260
3380
3261 for x in _xrange(beginrev, endrev, dist):
3381 for x in _xrange(beginrev, endrev, dist):
3262 # Old revisions don't support passing int.
3382 # Old revisions don't support passing int.
3263 n = rl.node(x)
3383 n = rl.node(x)
3264 rl.revision(n)
3384 rl.revision(n)
3265
3385
3266 timer, fm = gettimer(ui, opts)
3386 timer, fm = gettimer(ui, opts)
3267 timer(d)
3387 timer(d)
3268 fm.end()
3388 fm.end()
3269
3389
3270
3390
3271 @command(
3391 @command(
3272 b'perf::revlogwrite|perfrevlogwrite',
3392 b'perf::revlogwrite|perfrevlogwrite',
3273 revlogopts
3393 revlogopts
3274 + formatteropts
3394 + formatteropts
3275 + [
3395 + [
3276 (b's', b'startrev', 1000, b'revision to start writing at'),
3396 (b's', b'startrev', 1000, b'revision to start writing at'),
3277 (b'', b'stoprev', -1, b'last revision to write'),
3397 (b'', b'stoprev', -1, b'last revision to write'),
3278 (b'', b'count', 3, b'number of passes to perform'),
3398 (b'', b'count', 3, b'number of passes to perform'),
3279 (b'', b'details', False, b'print timing for every revisions tested'),
3399 (b'', b'details', False, b'print timing for every revisions tested'),
3280 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3400 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3281 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3401 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3282 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3402 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3283 ],
3403 ],
3284 b'-c|-m|FILE',
3404 b'-c|-m|FILE',
3285 )
3405 )
3286 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3406 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3287 """Benchmark writing a series of revisions to a revlog.
3407 """Benchmark writing a series of revisions to a revlog.
3288
3408
3289 Possible source values are:
3409 Possible source values are:
3290 * `full`: add from a full text (default).
3410 * `full`: add from a full text (default).
3291 * `parent-1`: add from a delta to the first parent
3411 * `parent-1`: add from a delta to the first parent
3292 * `parent-2`: add from a delta to the second parent if it exists
3412 * `parent-2`: add from a delta to the second parent if it exists
3293 (use a delta from the first parent otherwise)
3413 (use a delta from the first parent otherwise)
3294 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3414 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3295 * `storage`: add from the existing precomputed deltas
3415 * `storage`: add from the existing precomputed deltas
3296
3416
3297 Note: This performance command measures performance in a custom way. As a
3417 Note: This performance command measures performance in a custom way. As a
3298 result some of the global configuration of the 'perf' command does not
3418 result some of the global configuration of the 'perf' command does not
3299 apply to it:
3419 apply to it:
3300
3420
3301 * ``pre-run``: disabled
3421 * ``pre-run``: disabled
3302
3422
3303 * ``profile-benchmark``: disabled
3423 * ``profile-benchmark``: disabled
3304
3424
3305 * ``run-limits``: disabled use --count instead
3425 * ``run-limits``: disabled use --count instead
3306 """
3426 """
3307 opts = _byteskwargs(opts)
3427 opts = _byteskwargs(opts)
3308
3428
3309 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3429 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3310 rllen = getlen(ui)(rl)
3430 rllen = getlen(ui)(rl)
3311 if startrev < 0:
3431 if startrev < 0:
3312 startrev = rllen + startrev
3432 startrev = rllen + startrev
3313 if stoprev < 0:
3433 if stoprev < 0:
3314 stoprev = rllen + stoprev
3434 stoprev = rllen + stoprev
3315
3435
3316 lazydeltabase = opts['lazydeltabase']
3436 lazydeltabase = opts['lazydeltabase']
3317 source = opts['source']
3437 source = opts['source']
3318 clearcaches = opts['clear_caches']
3438 clearcaches = opts['clear_caches']
3319 validsource = (
3439 validsource = (
3320 b'full',
3440 b'full',
3321 b'parent-1',
3441 b'parent-1',
3322 b'parent-2',
3442 b'parent-2',
3323 b'parent-smallest',
3443 b'parent-smallest',
3324 b'storage',
3444 b'storage',
3325 )
3445 )
3326 if source not in validsource:
3446 if source not in validsource:
3327 raise error.Abort('invalid source type: %s' % source)
3447 raise error.Abort('invalid source type: %s' % source)
3328
3448
3329 ### actually gather results
3449 ### actually gather results
3330 count = opts['count']
3450 count = opts['count']
3331 if count <= 0:
3451 if count <= 0:
3332 raise error.Abort('invalide run count: %d' % count)
3452 raise error.Abort('invalide run count: %d' % count)
3333 allresults = []
3453 allresults = []
3334 for c in range(count):
3454 for c in range(count):
3335 timing = _timeonewrite(
3455 timing = _timeonewrite(
3336 ui,
3456 ui,
3337 rl,
3457 rl,
3338 source,
3458 source,
3339 startrev,
3459 startrev,
3340 stoprev,
3460 stoprev,
3341 c + 1,
3461 c + 1,
3342 lazydeltabase=lazydeltabase,
3462 lazydeltabase=lazydeltabase,
3343 clearcaches=clearcaches,
3463 clearcaches=clearcaches,
3344 )
3464 )
3345 allresults.append(timing)
3465 allresults.append(timing)
3346
3466
3347 ### consolidate the results in a single list
3467 ### consolidate the results in a single list
3348 results = []
3468 results = []
3349 for idx, (rev, t) in enumerate(allresults[0]):
3469 for idx, (rev, t) in enumerate(allresults[0]):
3350 ts = [t]
3470 ts = [t]
3351 for other in allresults[1:]:
3471 for other in allresults[1:]:
3352 orev, ot = other[idx]
3472 orev, ot = other[idx]
3353 assert orev == rev
3473 assert orev == rev
3354 ts.append(ot)
3474 ts.append(ot)
3355 results.append((rev, ts))
3475 results.append((rev, ts))
3356 resultcount = len(results)
3476 resultcount = len(results)
3357
3477
3358 ### Compute and display relevant statistics
3478 ### Compute and display relevant statistics
3359
3479
3360 # get a formatter
3480 # get a formatter
3361 fm = ui.formatter(b'perf', opts)
3481 fm = ui.formatter(b'perf', opts)
3362 displayall = ui.configbool(b"perf", b"all-timing", True)
3482 displayall = ui.configbool(b"perf", b"all-timing", True)
3363
3483
3364 # print individual details if requested
3484 # print individual details if requested
3365 if opts['details']:
3485 if opts['details']:
3366 for idx, item in enumerate(results, 1):
3486 for idx, item in enumerate(results, 1):
3367 rev, data = item
3487 rev, data = item
3368 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3488 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3369 formatone(fm, data, title=title, displayall=displayall)
3489 formatone(fm, data, title=title, displayall=displayall)
3370
3490
3371 # sorts results by median time
3491 # sorts results by median time
3372 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3492 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3373 # list of (name, index) to display)
3493 # list of (name, index) to display)
3374 relevants = [
3494 relevants = [
3375 ("min", 0),
3495 ("min", 0),
3376 ("10%", resultcount * 10 // 100),
3496 ("10%", resultcount * 10 // 100),
3377 ("25%", resultcount * 25 // 100),
3497 ("25%", resultcount * 25 // 100),
3378 ("50%", resultcount * 70 // 100),
3498 ("50%", resultcount * 70 // 100),
3379 ("75%", resultcount * 75 // 100),
3499 ("75%", resultcount * 75 // 100),
3380 ("90%", resultcount * 90 // 100),
3500 ("90%", resultcount * 90 // 100),
3381 ("95%", resultcount * 95 // 100),
3501 ("95%", resultcount * 95 // 100),
3382 ("99%", resultcount * 99 // 100),
3502 ("99%", resultcount * 99 // 100),
3383 ("99.9%", resultcount * 999 // 1000),
3503 ("99.9%", resultcount * 999 // 1000),
3384 ("99.99%", resultcount * 9999 // 10000),
3504 ("99.99%", resultcount * 9999 // 10000),
3385 ("99.999%", resultcount * 99999 // 100000),
3505 ("99.999%", resultcount * 99999 // 100000),
3386 ("max", -1),
3506 ("max", -1),
3387 ]
3507 ]
3388 if not ui.quiet:
3508 if not ui.quiet:
3389 for name, idx in relevants:
3509 for name, idx in relevants:
3390 data = results[idx]
3510 data = results[idx]
3391 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3511 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3392 formatone(fm, data[1], title=title, displayall=displayall)
3512 formatone(fm, data[1], title=title, displayall=displayall)
3393
3513
3394 # XXX summing that many float will not be very precise, we ignore this fact
3514 # XXX summing that many float will not be very precise, we ignore this fact
3395 # for now
3515 # for now
3396 totaltime = []
3516 totaltime = []
3397 for item in allresults:
3517 for item in allresults:
3398 totaltime.append(
3518 totaltime.append(
3399 (
3519 (
3400 sum(x[1][0] for x in item),
3520 sum(x[1][0] for x in item),
3401 sum(x[1][1] for x in item),
3521 sum(x[1][1] for x in item),
3402 sum(x[1][2] for x in item),
3522 sum(x[1][2] for x in item),
3403 )
3523 )
3404 )
3524 )
3405 formatone(
3525 formatone(
3406 fm,
3526 fm,
3407 totaltime,
3527 totaltime,
3408 title="total time (%d revs)" % resultcount,
3528 title="total time (%d revs)" % resultcount,
3409 displayall=displayall,
3529 displayall=displayall,
3410 )
3530 )
3411 fm.end()
3531 fm.end()
3412
3532
3413
3533
3414 class _faketr:
3534 class _faketr:
3415 def add(s, x, y, z=None):
3535 def add(s, x, y, z=None):
3416 return None
3536 return None
3417
3537
3418
3538
3419 def _timeonewrite(
3539 def _timeonewrite(
3420 ui,
3540 ui,
3421 orig,
3541 orig,
3422 source,
3542 source,
3423 startrev,
3543 startrev,
3424 stoprev,
3544 stoprev,
3425 runidx=None,
3545 runidx=None,
3426 lazydeltabase=True,
3546 lazydeltabase=True,
3427 clearcaches=True,
3547 clearcaches=True,
3428 ):
3548 ):
3429 timings = []
3549 timings = []
3430 tr = _faketr()
3550 tr = _faketr()
3431 with _temprevlog(ui, orig, startrev) as dest:
3551 with _temprevlog(ui, orig, startrev) as dest:
3432 dest._lazydeltabase = lazydeltabase
3552 dest._lazydeltabase = lazydeltabase
3433 revs = list(orig.revs(startrev, stoprev))
3553 revs = list(orig.revs(startrev, stoprev))
3434 total = len(revs)
3554 total = len(revs)
3435 topic = 'adding'
3555 topic = 'adding'
3436 if runidx is not None:
3556 if runidx is not None:
3437 topic += ' (run #%d)' % runidx
3557 topic += ' (run #%d)' % runidx
3438 # Support both old and new progress API
3558 # Support both old and new progress API
3439 if util.safehasattr(ui, 'makeprogress'):
3559 if util.safehasattr(ui, 'makeprogress'):
3440 progress = ui.makeprogress(topic, unit='revs', total=total)
3560 progress = ui.makeprogress(topic, unit='revs', total=total)
3441
3561
3442 def updateprogress(pos):
3562 def updateprogress(pos):
3443 progress.update(pos)
3563 progress.update(pos)
3444
3564
3445 def completeprogress():
3565 def completeprogress():
3446 progress.complete()
3566 progress.complete()
3447
3567
3448 else:
3568 else:
3449
3569
3450 def updateprogress(pos):
3570 def updateprogress(pos):
3451 ui.progress(topic, pos, unit='revs', total=total)
3571 ui.progress(topic, pos, unit='revs', total=total)
3452
3572
3453 def completeprogress():
3573 def completeprogress():
3454 ui.progress(topic, None, unit='revs', total=total)
3574 ui.progress(topic, None, unit='revs', total=total)
3455
3575
3456 for idx, rev in enumerate(revs):
3576 for idx, rev in enumerate(revs):
3457 updateprogress(idx)
3577 updateprogress(idx)
3458 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3578 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3459 if clearcaches:
3579 if clearcaches:
3460 dest.index.clearcaches()
3580 dest.index.clearcaches()
3461 dest.clearcaches()
3581 dest.clearcaches()
3462 with timeone() as r:
3582 with timeone() as r:
3463 dest.addrawrevision(*addargs, **addkwargs)
3583 dest.addrawrevision(*addargs, **addkwargs)
3464 timings.append((rev, r[0]))
3584 timings.append((rev, r[0]))
3465 updateprogress(total)
3585 updateprogress(total)
3466 completeprogress()
3586 completeprogress()
3467 return timings
3587 return timings
3468
3588
3469
3589
3470 def _getrevisionseed(orig, rev, tr, source):
3590 def _getrevisionseed(orig, rev, tr, source):
3471 from mercurial.node import nullid
3591 from mercurial.node import nullid
3472
3592
3473 linkrev = orig.linkrev(rev)
3593 linkrev = orig.linkrev(rev)
3474 node = orig.node(rev)
3594 node = orig.node(rev)
3475 p1, p2 = orig.parents(node)
3595 p1, p2 = orig.parents(node)
3476 flags = orig.flags(rev)
3596 flags = orig.flags(rev)
3477 cachedelta = None
3597 cachedelta = None
3478 text = None
3598 text = None
3479
3599
3480 if source == b'full':
3600 if source == b'full':
3481 text = orig.revision(rev)
3601 text = orig.revision(rev)
3482 elif source == b'parent-1':
3602 elif source == b'parent-1':
3483 baserev = orig.rev(p1)
3603 baserev = orig.rev(p1)
3484 cachedelta = (baserev, orig.revdiff(p1, rev))
3604 cachedelta = (baserev, orig.revdiff(p1, rev))
3485 elif source == b'parent-2':
3605 elif source == b'parent-2':
3486 parent = p2
3606 parent = p2
3487 if p2 == nullid:
3607 if p2 == nullid:
3488 parent = p1
3608 parent = p1
3489 baserev = orig.rev(parent)
3609 baserev = orig.rev(parent)
3490 cachedelta = (baserev, orig.revdiff(parent, rev))
3610 cachedelta = (baserev, orig.revdiff(parent, rev))
3491 elif source == b'parent-smallest':
3611 elif source == b'parent-smallest':
3492 p1diff = orig.revdiff(p1, rev)
3612 p1diff = orig.revdiff(p1, rev)
3493 parent = p1
3613 parent = p1
3494 diff = p1diff
3614 diff = p1diff
3495 if p2 != nullid:
3615 if p2 != nullid:
3496 p2diff = orig.revdiff(p2, rev)
3616 p2diff = orig.revdiff(p2, rev)
3497 if len(p1diff) > len(p2diff):
3617 if len(p1diff) > len(p2diff):
3498 parent = p2
3618 parent = p2
3499 diff = p2diff
3619 diff = p2diff
3500 baserev = orig.rev(parent)
3620 baserev = orig.rev(parent)
3501 cachedelta = (baserev, diff)
3621 cachedelta = (baserev, diff)
3502 elif source == b'storage':
3622 elif source == b'storage':
3503 baserev = orig.deltaparent(rev)
3623 baserev = orig.deltaparent(rev)
3504 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3624 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3505
3625
3506 return (
3626 return (
3507 (text, tr, linkrev, p1, p2),
3627 (text, tr, linkrev, p1, p2),
3508 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3628 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3509 )
3629 )
3510
3630
3511
3631
3512 @contextlib.contextmanager
3632 @contextlib.contextmanager
3513 def _temprevlog(ui, orig, truncaterev):
3633 def _temprevlog(ui, orig, truncaterev):
3514 from mercurial import vfs as vfsmod
3634 from mercurial import vfs as vfsmod
3515
3635
3516 if orig._inline:
3636 if orig._inline:
3517 raise error.Abort('not supporting inline revlog (yet)')
3637 raise error.Abort('not supporting inline revlog (yet)')
3518 revlogkwargs = {}
3638 revlogkwargs = {}
3519 k = 'upperboundcomp'
3639 k = 'upperboundcomp'
3520 if util.safehasattr(orig, k):
3640 if util.safehasattr(orig, k):
3521 revlogkwargs[k] = getattr(orig, k)
3641 revlogkwargs[k] = getattr(orig, k)
3522
3642
3523 indexfile = getattr(orig, '_indexfile', None)
3643 indexfile = getattr(orig, '_indexfile', None)
3524 if indexfile is None:
3644 if indexfile is None:
3525 # compatibility with <= hg-5.8
3645 # compatibility with <= hg-5.8
3526 indexfile = getattr(orig, 'indexfile')
3646 indexfile = getattr(orig, 'indexfile')
3527 origindexpath = orig.opener.join(indexfile)
3647 origindexpath = orig.opener.join(indexfile)
3528
3648
3529 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3649 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3530 origdatapath = orig.opener.join(datafile)
3650 origdatapath = orig.opener.join(datafile)
3531 radix = b'revlog'
3651 radix = b'revlog'
3532 indexname = b'revlog.i'
3652 indexname = b'revlog.i'
3533 dataname = b'revlog.d'
3653 dataname = b'revlog.d'
3534
3654
3535 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3655 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3536 try:
3656 try:
3537 # copy the data file in a temporary directory
3657 # copy the data file in a temporary directory
3538 ui.debug('copying data in %s\n' % tmpdir)
3658 ui.debug('copying data in %s\n' % tmpdir)
3539 destindexpath = os.path.join(tmpdir, 'revlog.i')
3659 destindexpath = os.path.join(tmpdir, 'revlog.i')
3540 destdatapath = os.path.join(tmpdir, 'revlog.d')
3660 destdatapath = os.path.join(tmpdir, 'revlog.d')
3541 shutil.copyfile(origindexpath, destindexpath)
3661 shutil.copyfile(origindexpath, destindexpath)
3542 shutil.copyfile(origdatapath, destdatapath)
3662 shutil.copyfile(origdatapath, destdatapath)
3543
3663
3544 # remove the data we want to add again
3664 # remove the data we want to add again
3545 ui.debug('truncating data to be rewritten\n')
3665 ui.debug('truncating data to be rewritten\n')
3546 with open(destindexpath, 'ab') as index:
3666 with open(destindexpath, 'ab') as index:
3547 index.seek(0)
3667 index.seek(0)
3548 index.truncate(truncaterev * orig._io.size)
3668 index.truncate(truncaterev * orig._io.size)
3549 with open(destdatapath, 'ab') as data:
3669 with open(destdatapath, 'ab') as data:
3550 data.seek(0)
3670 data.seek(0)
3551 data.truncate(orig.start(truncaterev))
3671 data.truncate(orig.start(truncaterev))
3552
3672
3553 # instantiate a new revlog from the temporary copy
3673 # instantiate a new revlog from the temporary copy
3554 ui.debug('truncating adding to be rewritten\n')
3674 ui.debug('truncating adding to be rewritten\n')
3555 vfs = vfsmod.vfs(tmpdir)
3675 vfs = vfsmod.vfs(tmpdir)
3556 vfs.options = getattr(orig.opener, 'options', None)
3676 vfs.options = getattr(orig.opener, 'options', None)
3557
3677
3558 try:
3678 try:
3559 dest = revlog(vfs, radix=radix, **revlogkwargs)
3679 dest = revlog(vfs, radix=radix, **revlogkwargs)
3560 except TypeError:
3680 except TypeError:
3561 dest = revlog(
3681 dest = revlog(
3562 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3682 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3563 )
3683 )
3564 if dest._inline:
3684 if dest._inline:
3565 raise error.Abort('not supporting inline revlog (yet)')
3685 raise error.Abort('not supporting inline revlog (yet)')
3566 # make sure internals are initialized
3686 # make sure internals are initialized
3567 dest.revision(len(dest) - 1)
3687 dest.revision(len(dest) - 1)
3568 yield dest
3688 yield dest
3569 del dest, vfs
3689 del dest, vfs
3570 finally:
3690 finally:
3571 shutil.rmtree(tmpdir, True)
3691 shutil.rmtree(tmpdir, True)
3572
3692
3573
3693
3574 @command(
3694 @command(
3575 b'perf::revlogchunks|perfrevlogchunks',
3695 b'perf::revlogchunks|perfrevlogchunks',
3576 revlogopts
3696 revlogopts
3577 + formatteropts
3697 + formatteropts
3578 + [
3698 + [
3579 (b'e', b'engines', b'', b'compression engines to use'),
3699 (b'e', b'engines', b'', b'compression engines to use'),
3580 (b's', b'startrev', 0, b'revision to start at'),
3700 (b's', b'startrev', 0, b'revision to start at'),
3581 ],
3701 ],
3582 b'-c|-m|FILE',
3702 b'-c|-m|FILE',
3583 )
3703 )
3584 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3704 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3585 """Benchmark operations on revlog chunks.
3705 """Benchmark operations on revlog chunks.
3586
3706
3587 Logically, each revlog is a collection of fulltext revisions. However,
3707 Logically, each revlog is a collection of fulltext revisions. However,
3588 stored within each revlog are "chunks" of possibly compressed data. This
3708 stored within each revlog are "chunks" of possibly compressed data. This
3589 data needs to be read and decompressed or compressed and written.
3709 data needs to be read and decompressed or compressed and written.
3590
3710
3591 This command measures the time it takes to read+decompress and recompress
3711 This command measures the time it takes to read+decompress and recompress
3592 chunks in a revlog. It effectively isolates I/O and compression performance.
3712 chunks in a revlog. It effectively isolates I/O and compression performance.
3593 For measurements of higher-level operations like resolving revisions,
3713 For measurements of higher-level operations like resolving revisions,
3594 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3714 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3595 """
3715 """
3596 opts = _byteskwargs(opts)
3716 opts = _byteskwargs(opts)
3597
3717
3598 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3718 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3599
3719
3600 # _chunkraw was renamed to _getsegmentforrevs.
3720 # _chunkraw was renamed to _getsegmentforrevs.
3601 try:
3721 try:
3602 segmentforrevs = rl._getsegmentforrevs
3722 segmentforrevs = rl._getsegmentforrevs
3603 except AttributeError:
3723 except AttributeError:
3604 segmentforrevs = rl._chunkraw
3724 segmentforrevs = rl._chunkraw
3605
3725
3606 # Verify engines argument.
3726 # Verify engines argument.
3607 if engines:
3727 if engines:
3608 engines = {e.strip() for e in engines.split(b',')}
3728 engines = {e.strip() for e in engines.split(b',')}
3609 for engine in engines:
3729 for engine in engines:
3610 try:
3730 try:
3611 util.compressionengines[engine]
3731 util.compressionengines[engine]
3612 except KeyError:
3732 except KeyError:
3613 raise error.Abort(b'unknown compression engine: %s' % engine)
3733 raise error.Abort(b'unknown compression engine: %s' % engine)
3614 else:
3734 else:
3615 engines = []
3735 engines = []
3616 for e in util.compengines:
3736 for e in util.compengines:
3617 engine = util.compengines[e]
3737 engine = util.compengines[e]
3618 try:
3738 try:
3619 if engine.available():
3739 if engine.available():
3620 engine.revlogcompressor().compress(b'dummy')
3740 engine.revlogcompressor().compress(b'dummy')
3621 engines.append(e)
3741 engines.append(e)
3622 except NotImplementedError:
3742 except NotImplementedError:
3623 pass
3743 pass
3624
3744
3625 revs = list(rl.revs(startrev, len(rl) - 1))
3745 revs = list(rl.revs(startrev, len(rl) - 1))
3626
3746
3627 def rlfh(rl):
3747 def rlfh(rl):
3628 if rl._inline:
3748 if rl._inline:
3629 indexfile = getattr(rl, '_indexfile', None)
3749 indexfile = getattr(rl, '_indexfile', None)
3630 if indexfile is None:
3750 if indexfile is None:
3631 # compatibility with <= hg-5.8
3751 # compatibility with <= hg-5.8
3632 indexfile = getattr(rl, 'indexfile')
3752 indexfile = getattr(rl, 'indexfile')
3633 return getsvfs(repo)(indexfile)
3753 return getsvfs(repo)(indexfile)
3634 else:
3754 else:
3635 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3755 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3636 return getsvfs(repo)(datafile)
3756 return getsvfs(repo)(datafile)
3637
3757
3638 def doread():
3758 def doread():
3639 rl.clearcaches()
3759 rl.clearcaches()
3640 for rev in revs:
3760 for rev in revs:
3641 segmentforrevs(rev, rev)
3761 segmentforrevs(rev, rev)
3642
3762
3643 def doreadcachedfh():
3763 def doreadcachedfh():
3644 rl.clearcaches()
3764 rl.clearcaches()
3645 fh = rlfh(rl)
3765 fh = rlfh(rl)
3646 for rev in revs:
3766 for rev in revs:
3647 segmentforrevs(rev, rev, df=fh)
3767 segmentforrevs(rev, rev, df=fh)
3648
3768
3649 def doreadbatch():
3769 def doreadbatch():
3650 rl.clearcaches()
3770 rl.clearcaches()
3651 segmentforrevs(revs[0], revs[-1])
3771 segmentforrevs(revs[0], revs[-1])
3652
3772
3653 def doreadbatchcachedfh():
3773 def doreadbatchcachedfh():
3654 rl.clearcaches()
3774 rl.clearcaches()
3655 fh = rlfh(rl)
3775 fh = rlfh(rl)
3656 segmentforrevs(revs[0], revs[-1], df=fh)
3776 segmentforrevs(revs[0], revs[-1], df=fh)
3657
3777
3658 def dochunk():
3778 def dochunk():
3659 rl.clearcaches()
3779 rl.clearcaches()
3660 fh = rlfh(rl)
3780 fh = rlfh(rl)
3661 for rev in revs:
3781 for rev in revs:
3662 rl._chunk(rev, df=fh)
3782 rl._chunk(rev, df=fh)
3663
3783
3664 chunks = [None]
3784 chunks = [None]
3665
3785
3666 def dochunkbatch():
3786 def dochunkbatch():
3667 rl.clearcaches()
3787 rl.clearcaches()
3668 fh = rlfh(rl)
3788 fh = rlfh(rl)
3669 # Save chunks as a side-effect.
3789 # Save chunks as a side-effect.
3670 chunks[0] = rl._chunks(revs, df=fh)
3790 chunks[0] = rl._chunks(revs, df=fh)
3671
3791
3672 def docompress(compressor):
3792 def docompress(compressor):
3673 rl.clearcaches()
3793 rl.clearcaches()
3674
3794
3675 try:
3795 try:
3676 # Swap in the requested compression engine.
3796 # Swap in the requested compression engine.
3677 oldcompressor = rl._compressor
3797 oldcompressor = rl._compressor
3678 rl._compressor = compressor
3798 rl._compressor = compressor
3679 for chunk in chunks[0]:
3799 for chunk in chunks[0]:
3680 rl.compress(chunk)
3800 rl.compress(chunk)
3681 finally:
3801 finally:
3682 rl._compressor = oldcompressor
3802 rl._compressor = oldcompressor
3683
3803
3684 benches = [
3804 benches = [
3685 (lambda: doread(), b'read'),
3805 (lambda: doread(), b'read'),
3686 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3806 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3687 (lambda: doreadbatch(), b'read batch'),
3807 (lambda: doreadbatch(), b'read batch'),
3688 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3808 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3689 (lambda: dochunk(), b'chunk'),
3809 (lambda: dochunk(), b'chunk'),
3690 (lambda: dochunkbatch(), b'chunk batch'),
3810 (lambda: dochunkbatch(), b'chunk batch'),
3691 ]
3811 ]
3692
3812
3693 for engine in sorted(engines):
3813 for engine in sorted(engines):
3694 compressor = util.compengines[engine].revlogcompressor()
3814 compressor = util.compengines[engine].revlogcompressor()
3695 benches.append(
3815 benches.append(
3696 (
3816 (
3697 functools.partial(docompress, compressor),
3817 functools.partial(docompress, compressor),
3698 b'compress w/ %s' % engine,
3818 b'compress w/ %s' % engine,
3699 )
3819 )
3700 )
3820 )
3701
3821
3702 for fn, title in benches:
3822 for fn, title in benches:
3703 timer, fm = gettimer(ui, opts)
3823 timer, fm = gettimer(ui, opts)
3704 timer(fn, title=title)
3824 timer(fn, title=title)
3705 fm.end()
3825 fm.end()
3706
3826
3707
3827
3708 @command(
3828 @command(
3709 b'perf::revlogrevision|perfrevlogrevision',
3829 b'perf::revlogrevision|perfrevlogrevision',
3710 revlogopts
3830 revlogopts
3711 + formatteropts
3831 + formatteropts
3712 + [(b'', b'cache', False, b'use caches instead of clearing')],
3832 + [(b'', b'cache', False, b'use caches instead of clearing')],
3713 b'-c|-m|FILE REV',
3833 b'-c|-m|FILE REV',
3714 )
3834 )
3715 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3835 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3716 """Benchmark obtaining a revlog revision.
3836 """Benchmark obtaining a revlog revision.
3717
3837
3718 Obtaining a revlog revision consists of roughly the following steps:
3838 Obtaining a revlog revision consists of roughly the following steps:
3719
3839
3720 1. Compute the delta chain
3840 1. Compute the delta chain
3721 2. Slice the delta chain if applicable
3841 2. Slice the delta chain if applicable
3722 3. Obtain the raw chunks for that delta chain
3842 3. Obtain the raw chunks for that delta chain
3723 4. Decompress each raw chunk
3843 4. Decompress each raw chunk
3724 5. Apply binary patches to obtain fulltext
3844 5. Apply binary patches to obtain fulltext
3725 6. Verify hash of fulltext
3845 6. Verify hash of fulltext
3726
3846
3727 This command measures the time spent in each of these phases.
3847 This command measures the time spent in each of these phases.
3728 """
3848 """
3729 opts = _byteskwargs(opts)
3849 opts = _byteskwargs(opts)
3730
3850
3731 if opts.get(b'changelog') or opts.get(b'manifest'):
3851 if opts.get(b'changelog') or opts.get(b'manifest'):
3732 file_, rev = None, file_
3852 file_, rev = None, file_
3733 elif rev is None:
3853 elif rev is None:
3734 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3854 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3735
3855
3736 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3856 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3737
3857
3738 # _chunkraw was renamed to _getsegmentforrevs.
3858 # _chunkraw was renamed to _getsegmentforrevs.
3739 try:
3859 try:
3740 segmentforrevs = r._getsegmentforrevs
3860 segmentforrevs = r._getsegmentforrevs
3741 except AttributeError:
3861 except AttributeError:
3742 segmentforrevs = r._chunkraw
3862 segmentforrevs = r._chunkraw
3743
3863
3744 node = r.lookup(rev)
3864 node = r.lookup(rev)
3745 rev = r.rev(node)
3865 rev = r.rev(node)
3746
3866
3747 def getrawchunks(data, chain):
3867 def getrawchunks(data, chain):
3748 start = r.start
3868 start = r.start
3749 length = r.length
3869 length = r.length
3750 inline = r._inline
3870 inline = r._inline
3751 try:
3871 try:
3752 iosize = r.index.entry_size
3872 iosize = r.index.entry_size
3753 except AttributeError:
3873 except AttributeError:
3754 iosize = r._io.size
3874 iosize = r._io.size
3755 buffer = util.buffer
3875 buffer = util.buffer
3756
3876
3757 chunks = []
3877 chunks = []
3758 ladd = chunks.append
3878 ladd = chunks.append
3759 for idx, item in enumerate(chain):
3879 for idx, item in enumerate(chain):
3760 offset = start(item[0])
3880 offset = start(item[0])
3761 bits = data[idx]
3881 bits = data[idx]
3762 for rev in item:
3882 for rev in item:
3763 chunkstart = start(rev)
3883 chunkstart = start(rev)
3764 if inline:
3884 if inline:
3765 chunkstart += (rev + 1) * iosize
3885 chunkstart += (rev + 1) * iosize
3766 chunklength = length(rev)
3886 chunklength = length(rev)
3767 ladd(buffer(bits, chunkstart - offset, chunklength))
3887 ladd(buffer(bits, chunkstart - offset, chunklength))
3768
3888
3769 return chunks
3889 return chunks
3770
3890
3771 def dodeltachain(rev):
3891 def dodeltachain(rev):
3772 if not cache:
3892 if not cache:
3773 r.clearcaches()
3893 r.clearcaches()
3774 r._deltachain(rev)
3894 r._deltachain(rev)
3775
3895
3776 def doread(chain):
3896 def doread(chain):
3777 if not cache:
3897 if not cache:
3778 r.clearcaches()
3898 r.clearcaches()
3779 for item in slicedchain:
3899 for item in slicedchain:
3780 segmentforrevs(item[0], item[-1])
3900 segmentforrevs(item[0], item[-1])
3781
3901
3782 def doslice(r, chain, size):
3902 def doslice(r, chain, size):
3783 for s in slicechunk(r, chain, targetsize=size):
3903 for s in slicechunk(r, chain, targetsize=size):
3784 pass
3904 pass
3785
3905
3786 def dorawchunks(data, chain):
3906 def dorawchunks(data, chain):
3787 if not cache:
3907 if not cache:
3788 r.clearcaches()
3908 r.clearcaches()
3789 getrawchunks(data, chain)
3909 getrawchunks(data, chain)
3790
3910
3791 def dodecompress(chunks):
3911 def dodecompress(chunks):
3792 decomp = r.decompress
3912 decomp = r.decompress
3793 for chunk in chunks:
3913 for chunk in chunks:
3794 decomp(chunk)
3914 decomp(chunk)
3795
3915
3796 def dopatch(text, bins):
3916 def dopatch(text, bins):
3797 if not cache:
3917 if not cache:
3798 r.clearcaches()
3918 r.clearcaches()
3799 mdiff.patches(text, bins)
3919 mdiff.patches(text, bins)
3800
3920
3801 def dohash(text):
3921 def dohash(text):
3802 if not cache:
3922 if not cache:
3803 r.clearcaches()
3923 r.clearcaches()
3804 r.checkhash(text, node, rev=rev)
3924 r.checkhash(text, node, rev=rev)
3805
3925
3806 def dorevision():
3926 def dorevision():
3807 if not cache:
3927 if not cache:
3808 r.clearcaches()
3928 r.clearcaches()
3809 r.revision(node)
3929 r.revision(node)
3810
3930
3811 try:
3931 try:
3812 from mercurial.revlogutils.deltas import slicechunk
3932 from mercurial.revlogutils.deltas import slicechunk
3813 except ImportError:
3933 except ImportError:
3814 slicechunk = getattr(revlog, '_slicechunk', None)
3934 slicechunk = getattr(revlog, '_slicechunk', None)
3815
3935
3816 size = r.length(rev)
3936 size = r.length(rev)
3817 chain = r._deltachain(rev)[0]
3937 chain = r._deltachain(rev)[0]
3818 if not getattr(r, '_withsparseread', False):
3938 if not getattr(r, '_withsparseread', False):
3819 slicedchain = (chain,)
3939 slicedchain = (chain,)
3820 else:
3940 else:
3821 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3941 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3822 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3942 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3823 rawchunks = getrawchunks(data, slicedchain)
3943 rawchunks = getrawchunks(data, slicedchain)
3824 bins = r._chunks(chain)
3944 bins = r._chunks(chain)
3825 text = bytes(bins[0])
3945 text = bytes(bins[0])
3826 bins = bins[1:]
3946 bins = bins[1:]
3827 text = mdiff.patches(text, bins)
3947 text = mdiff.patches(text, bins)
3828
3948
3829 benches = [
3949 benches = [
3830 (lambda: dorevision(), b'full'),
3950 (lambda: dorevision(), b'full'),
3831 (lambda: dodeltachain(rev), b'deltachain'),
3951 (lambda: dodeltachain(rev), b'deltachain'),
3832 (lambda: doread(chain), b'read'),
3952 (lambda: doread(chain), b'read'),
3833 ]
3953 ]
3834
3954
3835 if getattr(r, '_withsparseread', False):
3955 if getattr(r, '_withsparseread', False):
3836 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3956 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3837 benches.append(slicing)
3957 benches.append(slicing)
3838
3958
3839 benches.extend(
3959 benches.extend(
3840 [
3960 [
3841 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3961 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3842 (lambda: dodecompress(rawchunks), b'decompress'),
3962 (lambda: dodecompress(rawchunks), b'decompress'),
3843 (lambda: dopatch(text, bins), b'patch'),
3963 (lambda: dopatch(text, bins), b'patch'),
3844 (lambda: dohash(text), b'hash'),
3964 (lambda: dohash(text), b'hash'),
3845 ]
3965 ]
3846 )
3966 )
3847
3967
3848 timer, fm = gettimer(ui, opts)
3968 timer, fm = gettimer(ui, opts)
3849 for fn, title in benches:
3969 for fn, title in benches:
3850 timer(fn, title=title)
3970 timer(fn, title=title)
3851 fm.end()
3971 fm.end()
3852
3972
3853
3973
3854 @command(
3974 @command(
3855 b'perf::revset|perfrevset',
3975 b'perf::revset|perfrevset',
3856 [
3976 [
3857 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3977 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3858 (b'', b'contexts', False, b'obtain changectx for each revision'),
3978 (b'', b'contexts', False, b'obtain changectx for each revision'),
3859 ]
3979 ]
3860 + formatteropts,
3980 + formatteropts,
3861 b"REVSET",
3981 b"REVSET",
3862 )
3982 )
3863 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3983 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3864 """benchmark the execution time of a revset
3984 """benchmark the execution time of a revset
3865
3985
3866 Use the --clean option if need to evaluate the impact of build volatile
3986 Use the --clean option if need to evaluate the impact of build volatile
3867 revisions set cache on the revset execution. Volatile cache hold filtered
3987 revisions set cache on the revset execution. Volatile cache hold filtered
3868 and obsolete related cache."""
3988 and obsolete related cache."""
3869 opts = _byteskwargs(opts)
3989 opts = _byteskwargs(opts)
3870
3990
3871 timer, fm = gettimer(ui, opts)
3991 timer, fm = gettimer(ui, opts)
3872
3992
3873 def d():
3993 def d():
3874 if clear:
3994 if clear:
3875 repo.invalidatevolatilesets()
3995 repo.invalidatevolatilesets()
3876 if contexts:
3996 if contexts:
3877 for ctx in repo.set(expr):
3997 for ctx in repo.set(expr):
3878 pass
3998 pass
3879 else:
3999 else:
3880 for r in repo.revs(expr):
4000 for r in repo.revs(expr):
3881 pass
4001 pass
3882
4002
3883 timer(d)
4003 timer(d)
3884 fm.end()
4004 fm.end()
3885
4005
3886
4006
3887 @command(
4007 @command(
3888 b'perf::volatilesets|perfvolatilesets',
4008 b'perf::volatilesets|perfvolatilesets',
3889 [
4009 [
3890 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
4010 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3891 ]
4011 ]
3892 + formatteropts,
4012 + formatteropts,
3893 )
4013 )
3894 def perfvolatilesets(ui, repo, *names, **opts):
4014 def perfvolatilesets(ui, repo, *names, **opts):
3895 """benchmark the computation of various volatile set
4015 """benchmark the computation of various volatile set
3896
4016
3897 Volatile set computes element related to filtering and obsolescence."""
4017 Volatile set computes element related to filtering and obsolescence."""
3898 opts = _byteskwargs(opts)
4018 opts = _byteskwargs(opts)
3899 timer, fm = gettimer(ui, opts)
4019 timer, fm = gettimer(ui, opts)
3900 repo = repo.unfiltered()
4020 repo = repo.unfiltered()
3901
4021
3902 def getobs(name):
4022 def getobs(name):
3903 def d():
4023 def d():
3904 repo.invalidatevolatilesets()
4024 repo.invalidatevolatilesets()
3905 if opts[b'clear_obsstore']:
4025 if opts[b'clear_obsstore']:
3906 clearfilecache(repo, b'obsstore')
4026 clearfilecache(repo, b'obsstore')
3907 obsolete.getrevs(repo, name)
4027 obsolete.getrevs(repo, name)
3908
4028
3909 return d
4029 return d
3910
4030
3911 allobs = sorted(obsolete.cachefuncs)
4031 allobs = sorted(obsolete.cachefuncs)
3912 if names:
4032 if names:
3913 allobs = [n for n in allobs if n in names]
4033 allobs = [n for n in allobs if n in names]
3914
4034
3915 for name in allobs:
4035 for name in allobs:
3916 timer(getobs(name), title=name)
4036 timer(getobs(name), title=name)
3917
4037
3918 def getfiltered(name):
4038 def getfiltered(name):
3919 def d():
4039 def d():
3920 repo.invalidatevolatilesets()
4040 repo.invalidatevolatilesets()
3921 if opts[b'clear_obsstore']:
4041 if opts[b'clear_obsstore']:
3922 clearfilecache(repo, b'obsstore')
4042 clearfilecache(repo, b'obsstore')
3923 repoview.filterrevs(repo, name)
4043 repoview.filterrevs(repo, name)
3924
4044
3925 return d
4045 return d
3926
4046
3927 allfilter = sorted(repoview.filtertable)
4047 allfilter = sorted(repoview.filtertable)
3928 if names:
4048 if names:
3929 allfilter = [n for n in allfilter if n in names]
4049 allfilter = [n for n in allfilter if n in names]
3930
4050
3931 for name in allfilter:
4051 for name in allfilter:
3932 timer(getfiltered(name), title=name)
4052 timer(getfiltered(name), title=name)
3933 fm.end()
4053 fm.end()
3934
4054
3935
4055
3936 @command(
4056 @command(
3937 b'perf::branchmap|perfbranchmap',
4057 b'perf::branchmap|perfbranchmap',
3938 [
4058 [
3939 (b'f', b'full', False, b'Includes build time of subset'),
4059 (b'f', b'full', False, b'Includes build time of subset'),
3940 (
4060 (
3941 b'',
4061 b'',
3942 b'clear-revbranch',
4062 b'clear-revbranch',
3943 False,
4063 False,
3944 b'purge the revbranch cache between computation',
4064 b'purge the revbranch cache between computation',
3945 ),
4065 ),
3946 ]
4066 ]
3947 + formatteropts,
4067 + formatteropts,
3948 )
4068 )
3949 def perfbranchmap(ui, repo, *filternames, **opts):
4069 def perfbranchmap(ui, repo, *filternames, **opts):
3950 """benchmark the update of a branchmap
4070 """benchmark the update of a branchmap
3951
4071
3952 This benchmarks the full repo.branchmap() call with read and write disabled
4072 This benchmarks the full repo.branchmap() call with read and write disabled
3953 """
4073 """
3954 opts = _byteskwargs(opts)
4074 opts = _byteskwargs(opts)
3955 full = opts.get(b"full", False)
4075 full = opts.get(b"full", False)
3956 clear_revbranch = opts.get(b"clear_revbranch", False)
4076 clear_revbranch = opts.get(b"clear_revbranch", False)
3957 timer, fm = gettimer(ui, opts)
4077 timer, fm = gettimer(ui, opts)
3958
4078
3959 def getbranchmap(filtername):
4079 def getbranchmap(filtername):
3960 """generate a benchmark function for the filtername"""
4080 """generate a benchmark function for the filtername"""
3961 if filtername is None:
4081 if filtername is None:
3962 view = repo
4082 view = repo
3963 else:
4083 else:
3964 view = repo.filtered(filtername)
4084 view = repo.filtered(filtername)
3965 if util.safehasattr(view._branchcaches, '_per_filter'):
4085 if util.safehasattr(view._branchcaches, '_per_filter'):
3966 filtered = view._branchcaches._per_filter
4086 filtered = view._branchcaches._per_filter
3967 else:
4087 else:
3968 # older versions
4088 # older versions
3969 filtered = view._branchcaches
4089 filtered = view._branchcaches
3970
4090
3971 def d():
4091 def d():
3972 if clear_revbranch:
4092 if clear_revbranch:
3973 repo.revbranchcache()._clear()
4093 repo.revbranchcache()._clear()
3974 if full:
4094 if full:
3975 view._branchcaches.clear()
4095 view._branchcaches.clear()
3976 else:
4096 else:
3977 filtered.pop(filtername, None)
4097 filtered.pop(filtername, None)
3978 view.branchmap()
4098 view.branchmap()
3979
4099
3980 return d
4100 return d
3981
4101
3982 # add filter in smaller subset to bigger subset
4102 # add filter in smaller subset to bigger subset
3983 possiblefilters = set(repoview.filtertable)
4103 possiblefilters = set(repoview.filtertable)
3984 if filternames:
4104 if filternames:
3985 possiblefilters &= set(filternames)
4105 possiblefilters &= set(filternames)
3986 subsettable = getbranchmapsubsettable()
4106 subsettable = getbranchmapsubsettable()
3987 allfilters = []
4107 allfilters = []
3988 while possiblefilters:
4108 while possiblefilters:
3989 for name in possiblefilters:
4109 for name in possiblefilters:
3990 subset = subsettable.get(name)
4110 subset = subsettable.get(name)
3991 if subset not in possiblefilters:
4111 if subset not in possiblefilters:
3992 break
4112 break
3993 else:
4113 else:
3994 assert False, b'subset cycle %s!' % possiblefilters
4114 assert False, b'subset cycle %s!' % possiblefilters
3995 allfilters.append(name)
4115 allfilters.append(name)
3996 possiblefilters.remove(name)
4116 possiblefilters.remove(name)
3997
4117
3998 # warm the cache
4118 # warm the cache
3999 if not full:
4119 if not full:
4000 for name in allfilters:
4120 for name in allfilters:
4001 repo.filtered(name).branchmap()
4121 repo.filtered(name).branchmap()
4002 if not filternames or b'unfiltered' in filternames:
4122 if not filternames or b'unfiltered' in filternames:
4003 # add unfiltered
4123 # add unfiltered
4004 allfilters.append(None)
4124 allfilters.append(None)
4005
4125
4006 if util.safehasattr(branchmap.branchcache, 'fromfile'):
4126 if util.safehasattr(branchmap.branchcache, 'fromfile'):
4007 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4127 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4008 branchcacheread.set(classmethod(lambda *args: None))
4128 branchcacheread.set(classmethod(lambda *args: None))
4009 else:
4129 else:
4010 # older versions
4130 # older versions
4011 branchcacheread = safeattrsetter(branchmap, b'read')
4131 branchcacheread = safeattrsetter(branchmap, b'read')
4012 branchcacheread.set(lambda *args: None)
4132 branchcacheread.set(lambda *args: None)
4013 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4133 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4014 branchcachewrite.set(lambda *args: None)
4134 branchcachewrite.set(lambda *args: None)
4015 try:
4135 try:
4016 for name in allfilters:
4136 for name in allfilters:
4017 printname = name
4137 printname = name
4018 if name is None:
4138 if name is None:
4019 printname = b'unfiltered'
4139 printname = b'unfiltered'
4020 timer(getbranchmap(name), title=printname)
4140 timer(getbranchmap(name), title=printname)
4021 finally:
4141 finally:
4022 branchcacheread.restore()
4142 branchcacheread.restore()
4023 branchcachewrite.restore()
4143 branchcachewrite.restore()
4024 fm.end()
4144 fm.end()
4025
4145
4026
4146
4027 @command(
4147 @command(
4028 b'perf::branchmapupdate|perfbranchmapupdate',
4148 b'perf::branchmapupdate|perfbranchmapupdate',
4029 [
4149 [
4030 (b'', b'base', [], b'subset of revision to start from'),
4150 (b'', b'base', [], b'subset of revision to start from'),
4031 (b'', b'target', [], b'subset of revision to end with'),
4151 (b'', b'target', [], b'subset of revision to end with'),
4032 (b'', b'clear-caches', False, b'clear cache between each runs'),
4152 (b'', b'clear-caches', False, b'clear cache between each runs'),
4033 ]
4153 ]
4034 + formatteropts,
4154 + formatteropts,
4035 )
4155 )
4036 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4156 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4037 """benchmark branchmap update from for <base> revs to <target> revs
4157 """benchmark branchmap update from for <base> revs to <target> revs
4038
4158
4039 If `--clear-caches` is passed, the following items will be reset before
4159 If `--clear-caches` is passed, the following items will be reset before
4040 each update:
4160 each update:
4041 * the changelog instance and associated indexes
4161 * the changelog instance and associated indexes
4042 * the rev-branch-cache instance
4162 * the rev-branch-cache instance
4043
4163
4044 Examples:
4164 Examples:
4045
4165
4046 # update for the one last revision
4166 # update for the one last revision
4047 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4167 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4048
4168
4049 $ update for change coming with a new branch
4169 $ update for change coming with a new branch
4050 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4170 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4051 """
4171 """
4052 from mercurial import branchmap
4172 from mercurial import branchmap
4053 from mercurial import repoview
4173 from mercurial import repoview
4054
4174
4055 opts = _byteskwargs(opts)
4175 opts = _byteskwargs(opts)
4056 timer, fm = gettimer(ui, opts)
4176 timer, fm = gettimer(ui, opts)
4057 clearcaches = opts[b'clear_caches']
4177 clearcaches = opts[b'clear_caches']
4058 unfi = repo.unfiltered()
4178 unfi = repo.unfiltered()
4059 x = [None] # used to pass data between closure
4179 x = [None] # used to pass data between closure
4060
4180
4061 # we use a `list` here to avoid possible side effect from smartset
4181 # we use a `list` here to avoid possible side effect from smartset
4062 baserevs = list(scmutil.revrange(repo, base))
4182 baserevs = list(scmutil.revrange(repo, base))
4063 targetrevs = list(scmutil.revrange(repo, target))
4183 targetrevs = list(scmutil.revrange(repo, target))
4064 if not baserevs:
4184 if not baserevs:
4065 raise error.Abort(b'no revisions selected for --base')
4185 raise error.Abort(b'no revisions selected for --base')
4066 if not targetrevs:
4186 if not targetrevs:
4067 raise error.Abort(b'no revisions selected for --target')
4187 raise error.Abort(b'no revisions selected for --target')
4068
4188
4069 # make sure the target branchmap also contains the one in the base
4189 # make sure the target branchmap also contains the one in the base
4070 targetrevs = list(set(baserevs) | set(targetrevs))
4190 targetrevs = list(set(baserevs) | set(targetrevs))
4071 targetrevs.sort()
4191 targetrevs.sort()
4072
4192
4073 cl = repo.changelog
4193 cl = repo.changelog
4074 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4194 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4075 allbaserevs.sort()
4195 allbaserevs.sort()
4076 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4196 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4077
4197
4078 newrevs = list(alltargetrevs.difference(allbaserevs))
4198 newrevs = list(alltargetrevs.difference(allbaserevs))
4079 newrevs.sort()
4199 newrevs.sort()
4080
4200
4081 allrevs = frozenset(unfi.changelog.revs())
4201 allrevs = frozenset(unfi.changelog.revs())
4082 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4202 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4083 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4203 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4084
4204
4085 def basefilter(repo, visibilityexceptions=None):
4205 def basefilter(repo, visibilityexceptions=None):
4086 return basefilterrevs
4206 return basefilterrevs
4087
4207
4088 def targetfilter(repo, visibilityexceptions=None):
4208 def targetfilter(repo, visibilityexceptions=None):
4089 return targetfilterrevs
4209 return targetfilterrevs
4090
4210
4091 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4211 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4092 ui.status(msg % (len(allbaserevs), len(newrevs)))
4212 ui.status(msg % (len(allbaserevs), len(newrevs)))
4093 if targetfilterrevs:
4213 if targetfilterrevs:
4094 msg = b'(%d revisions still filtered)\n'
4214 msg = b'(%d revisions still filtered)\n'
4095 ui.status(msg % len(targetfilterrevs))
4215 ui.status(msg % len(targetfilterrevs))
4096
4216
4097 try:
4217 try:
4098 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4218 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4099 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4219 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4100
4220
4101 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4221 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4102 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4222 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4103
4223
4104 # try to find an existing branchmap to reuse
4224 # try to find an existing branchmap to reuse
4105 subsettable = getbranchmapsubsettable()
4225 subsettable = getbranchmapsubsettable()
4106 candidatefilter = subsettable.get(None)
4226 candidatefilter = subsettable.get(None)
4107 while candidatefilter is not None:
4227 while candidatefilter is not None:
4108 candidatebm = repo.filtered(candidatefilter).branchmap()
4228 candidatebm = repo.filtered(candidatefilter).branchmap()
4109 if candidatebm.validfor(baserepo):
4229 if candidatebm.validfor(baserepo):
4110 filtered = repoview.filterrevs(repo, candidatefilter)
4230 filtered = repoview.filterrevs(repo, candidatefilter)
4111 missing = [r for r in allbaserevs if r in filtered]
4231 missing = [r for r in allbaserevs if r in filtered]
4112 base = candidatebm.copy()
4232 base = candidatebm.copy()
4113 base.update(baserepo, missing)
4233 base.update(baserepo, missing)
4114 break
4234 break
4115 candidatefilter = subsettable.get(candidatefilter)
4235 candidatefilter = subsettable.get(candidatefilter)
4116 else:
4236 else:
4117 # no suitable subset where found
4237 # no suitable subset where found
4118 base = branchmap.branchcache()
4238 base = branchmap.branchcache()
4119 base.update(baserepo, allbaserevs)
4239 base.update(baserepo, allbaserevs)
4120
4240
4121 def setup():
4241 def setup():
4122 x[0] = base.copy()
4242 x[0] = base.copy()
4123 if clearcaches:
4243 if clearcaches:
4124 unfi._revbranchcache = None
4244 unfi._revbranchcache = None
4125 clearchangelog(repo)
4245 clearchangelog(repo)
4126
4246
4127 def bench():
4247 def bench():
4128 x[0].update(targetrepo, newrevs)
4248 x[0].update(targetrepo, newrevs)
4129
4249
4130 timer(bench, setup=setup)
4250 timer(bench, setup=setup)
4131 fm.end()
4251 fm.end()
4132 finally:
4252 finally:
4133 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4253 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4134 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4254 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4135
4255
4136
4256
4137 @command(
4257 @command(
4138 b'perf::branchmapload|perfbranchmapload',
4258 b'perf::branchmapload|perfbranchmapload',
4139 [
4259 [
4140 (b'f', b'filter', b'', b'Specify repoview filter'),
4260 (b'f', b'filter', b'', b'Specify repoview filter'),
4141 (b'', b'list', False, b'List brachmap filter caches'),
4261 (b'', b'list', False, b'List brachmap filter caches'),
4142 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4262 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4143 ]
4263 ]
4144 + formatteropts,
4264 + formatteropts,
4145 )
4265 )
4146 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4266 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4147 """benchmark reading the branchmap"""
4267 """benchmark reading the branchmap"""
4148 opts = _byteskwargs(opts)
4268 opts = _byteskwargs(opts)
4149 clearrevlogs = opts[b'clear_revlogs']
4269 clearrevlogs = opts[b'clear_revlogs']
4150
4270
4151 if list:
4271 if list:
4152 for name, kind, st in repo.cachevfs.readdir(stat=True):
4272 for name, kind, st in repo.cachevfs.readdir(stat=True):
4153 if name.startswith(b'branch2'):
4273 if name.startswith(b'branch2'):
4154 filtername = name.partition(b'-')[2] or b'unfiltered'
4274 filtername = name.partition(b'-')[2] or b'unfiltered'
4155 ui.status(
4275 ui.status(
4156 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4276 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4157 )
4277 )
4158 return
4278 return
4159 if not filter:
4279 if not filter:
4160 filter = None
4280 filter = None
4161 subsettable = getbranchmapsubsettable()
4281 subsettable = getbranchmapsubsettable()
4162 if filter is None:
4282 if filter is None:
4163 repo = repo.unfiltered()
4283 repo = repo.unfiltered()
4164 else:
4284 else:
4165 repo = repoview.repoview(repo, filter)
4285 repo = repoview.repoview(repo, filter)
4166
4286
4167 repo.branchmap() # make sure we have a relevant, up to date branchmap
4287 repo.branchmap() # make sure we have a relevant, up to date branchmap
4168
4288
4169 try:
4289 try:
4170 fromfile = branchmap.branchcache.fromfile
4290 fromfile = branchmap.branchcache.fromfile
4171 except AttributeError:
4291 except AttributeError:
4172 # older versions
4292 # older versions
4173 fromfile = branchmap.read
4293 fromfile = branchmap.read
4174
4294
4175 currentfilter = filter
4295 currentfilter = filter
4176 # try once without timer, the filter may not be cached
4296 # try once without timer, the filter may not be cached
4177 while fromfile(repo) is None:
4297 while fromfile(repo) is None:
4178 currentfilter = subsettable.get(currentfilter)
4298 currentfilter = subsettable.get(currentfilter)
4179 if currentfilter is None:
4299 if currentfilter is None:
4180 raise error.Abort(
4300 raise error.Abort(
4181 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4301 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4182 )
4302 )
4183 repo = repo.filtered(currentfilter)
4303 repo = repo.filtered(currentfilter)
4184 timer, fm = gettimer(ui, opts)
4304 timer, fm = gettimer(ui, opts)
4185
4305
4186 def setup():
4306 def setup():
4187 if clearrevlogs:
4307 if clearrevlogs:
4188 clearchangelog(repo)
4308 clearchangelog(repo)
4189
4309
4190 def bench():
4310 def bench():
4191 fromfile(repo)
4311 fromfile(repo)
4192
4312
4193 timer(bench, setup=setup)
4313 timer(bench, setup=setup)
4194 fm.end()
4314 fm.end()
4195
4315
4196
4316
4197 @command(b'perf::loadmarkers|perfloadmarkers')
4317 @command(b'perf::loadmarkers|perfloadmarkers')
4198 def perfloadmarkers(ui, repo):
4318 def perfloadmarkers(ui, repo):
4199 """benchmark the time to parse the on-disk markers for a repo
4319 """benchmark the time to parse the on-disk markers for a repo
4200
4320
4201 Result is the number of markers in the repo."""
4321 Result is the number of markers in the repo."""
4202 timer, fm = gettimer(ui)
4322 timer, fm = gettimer(ui)
4203 svfs = getsvfs(repo)
4323 svfs = getsvfs(repo)
4204 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4324 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4205 fm.end()
4325 fm.end()
4206
4326
4207
4327
4208 @command(
4328 @command(
4209 b'perf::lrucachedict|perflrucachedict',
4329 b'perf::lrucachedict|perflrucachedict',
4210 formatteropts
4330 formatteropts
4211 + [
4331 + [
4212 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4332 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4213 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4333 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4214 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4334 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4215 (b'', b'size', 4, b'size of cache'),
4335 (b'', b'size', 4, b'size of cache'),
4216 (b'', b'gets', 10000, b'number of key lookups'),
4336 (b'', b'gets', 10000, b'number of key lookups'),
4217 (b'', b'sets', 10000, b'number of key sets'),
4337 (b'', b'sets', 10000, b'number of key sets'),
4218 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4338 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4219 (
4339 (
4220 b'',
4340 b'',
4221 b'mixedgetfreq',
4341 b'mixedgetfreq',
4222 50,
4342 50,
4223 b'frequency of get vs set ops in mixed mode',
4343 b'frequency of get vs set ops in mixed mode',
4224 ),
4344 ),
4225 ],
4345 ],
4226 norepo=True,
4346 norepo=True,
4227 )
4347 )
4228 def perflrucache(
4348 def perflrucache(
4229 ui,
4349 ui,
4230 mincost=0,
4350 mincost=0,
4231 maxcost=100,
4351 maxcost=100,
4232 costlimit=0,
4352 costlimit=0,
4233 size=4,
4353 size=4,
4234 gets=10000,
4354 gets=10000,
4235 sets=10000,
4355 sets=10000,
4236 mixed=10000,
4356 mixed=10000,
4237 mixedgetfreq=50,
4357 mixedgetfreq=50,
4238 **opts
4358 **opts
4239 ):
4359 ):
4240 opts = _byteskwargs(opts)
4360 opts = _byteskwargs(opts)
4241
4361
4242 def doinit():
4362 def doinit():
4243 for i in _xrange(10000):
4363 for i in _xrange(10000):
4244 util.lrucachedict(size)
4364 util.lrucachedict(size)
4245
4365
4246 costrange = list(range(mincost, maxcost + 1))
4366 costrange = list(range(mincost, maxcost + 1))
4247
4367
4248 values = []
4368 values = []
4249 for i in _xrange(size):
4369 for i in _xrange(size):
4250 values.append(random.randint(0, _maxint))
4370 values.append(random.randint(0, _maxint))
4251
4371
4252 # Get mode fills the cache and tests raw lookup performance with no
4372 # Get mode fills the cache and tests raw lookup performance with no
4253 # eviction.
4373 # eviction.
4254 getseq = []
4374 getseq = []
4255 for i in _xrange(gets):
4375 for i in _xrange(gets):
4256 getseq.append(random.choice(values))
4376 getseq.append(random.choice(values))
4257
4377
4258 def dogets():
4378 def dogets():
4259 d = util.lrucachedict(size)
4379 d = util.lrucachedict(size)
4260 for v in values:
4380 for v in values:
4261 d[v] = v
4381 d[v] = v
4262 for key in getseq:
4382 for key in getseq:
4263 value = d[key]
4383 value = d[key]
4264 value # silence pyflakes warning
4384 value # silence pyflakes warning
4265
4385
4266 def dogetscost():
4386 def dogetscost():
4267 d = util.lrucachedict(size, maxcost=costlimit)
4387 d = util.lrucachedict(size, maxcost=costlimit)
4268 for i, v in enumerate(values):
4388 for i, v in enumerate(values):
4269 d.insert(v, v, cost=costs[i])
4389 d.insert(v, v, cost=costs[i])
4270 for key in getseq:
4390 for key in getseq:
4271 try:
4391 try:
4272 value = d[key]
4392 value = d[key]
4273 value # silence pyflakes warning
4393 value # silence pyflakes warning
4274 except KeyError:
4394 except KeyError:
4275 pass
4395 pass
4276
4396
4277 # Set mode tests insertion speed with cache eviction.
4397 # Set mode tests insertion speed with cache eviction.
4278 setseq = []
4398 setseq = []
4279 costs = []
4399 costs = []
4280 for i in _xrange(sets):
4400 for i in _xrange(sets):
4281 setseq.append(random.randint(0, _maxint))
4401 setseq.append(random.randint(0, _maxint))
4282 costs.append(random.choice(costrange))
4402 costs.append(random.choice(costrange))
4283
4403
4284 def doinserts():
4404 def doinserts():
4285 d = util.lrucachedict(size)
4405 d = util.lrucachedict(size)
4286 for v in setseq:
4406 for v in setseq:
4287 d.insert(v, v)
4407 d.insert(v, v)
4288
4408
4289 def doinsertscost():
4409 def doinsertscost():
4290 d = util.lrucachedict(size, maxcost=costlimit)
4410 d = util.lrucachedict(size, maxcost=costlimit)
4291 for i, v in enumerate(setseq):
4411 for i, v in enumerate(setseq):
4292 d.insert(v, v, cost=costs[i])
4412 d.insert(v, v, cost=costs[i])
4293
4413
4294 def dosets():
4414 def dosets():
4295 d = util.lrucachedict(size)
4415 d = util.lrucachedict(size)
4296 for v in setseq:
4416 for v in setseq:
4297 d[v] = v
4417 d[v] = v
4298
4418
4299 # Mixed mode randomly performs gets and sets with eviction.
4419 # Mixed mode randomly performs gets and sets with eviction.
4300 mixedops = []
4420 mixedops = []
4301 for i in _xrange(mixed):
4421 for i in _xrange(mixed):
4302 r = random.randint(0, 100)
4422 r = random.randint(0, 100)
4303 if r < mixedgetfreq:
4423 if r < mixedgetfreq:
4304 op = 0
4424 op = 0
4305 else:
4425 else:
4306 op = 1
4426 op = 1
4307
4427
4308 mixedops.append(
4428 mixedops.append(
4309 (op, random.randint(0, size * 2), random.choice(costrange))
4429 (op, random.randint(0, size * 2), random.choice(costrange))
4310 )
4430 )
4311
4431
4312 def domixed():
4432 def domixed():
4313 d = util.lrucachedict(size)
4433 d = util.lrucachedict(size)
4314
4434
4315 for op, v, cost in mixedops:
4435 for op, v, cost in mixedops:
4316 if op == 0:
4436 if op == 0:
4317 try:
4437 try:
4318 d[v]
4438 d[v]
4319 except KeyError:
4439 except KeyError:
4320 pass
4440 pass
4321 else:
4441 else:
4322 d[v] = v
4442 d[v] = v
4323
4443
4324 def domixedcost():
4444 def domixedcost():
4325 d = util.lrucachedict(size, maxcost=costlimit)
4445 d = util.lrucachedict(size, maxcost=costlimit)
4326
4446
4327 for op, v, cost in mixedops:
4447 for op, v, cost in mixedops:
4328 if op == 0:
4448 if op == 0:
4329 try:
4449 try:
4330 d[v]
4450 d[v]
4331 except KeyError:
4451 except KeyError:
4332 pass
4452 pass
4333 else:
4453 else:
4334 d.insert(v, v, cost=cost)
4454 d.insert(v, v, cost=cost)
4335
4455
4336 benches = [
4456 benches = [
4337 (doinit, b'init'),
4457 (doinit, b'init'),
4338 ]
4458 ]
4339
4459
4340 if costlimit:
4460 if costlimit:
4341 benches.extend(
4461 benches.extend(
4342 [
4462 [
4343 (dogetscost, b'gets w/ cost limit'),
4463 (dogetscost, b'gets w/ cost limit'),
4344 (doinsertscost, b'inserts w/ cost limit'),
4464 (doinsertscost, b'inserts w/ cost limit'),
4345 (domixedcost, b'mixed w/ cost limit'),
4465 (domixedcost, b'mixed w/ cost limit'),
4346 ]
4466 ]
4347 )
4467 )
4348 else:
4468 else:
4349 benches.extend(
4469 benches.extend(
4350 [
4470 [
4351 (dogets, b'gets'),
4471 (dogets, b'gets'),
4352 (doinserts, b'inserts'),
4472 (doinserts, b'inserts'),
4353 (dosets, b'sets'),
4473 (dosets, b'sets'),
4354 (domixed, b'mixed'),
4474 (domixed, b'mixed'),
4355 ]
4475 ]
4356 )
4476 )
4357
4477
4358 for fn, title in benches:
4478 for fn, title in benches:
4359 timer, fm = gettimer(ui, opts)
4479 timer, fm = gettimer(ui, opts)
4360 timer(fn, title=title)
4480 timer(fn, title=title)
4361 fm.end()
4481 fm.end()
4362
4482
4363
4483
4364 @command(
4484 @command(
4365 b'perf::write|perfwrite',
4485 b'perf::write|perfwrite',
4366 formatteropts
4486 formatteropts
4367 + [
4487 + [
4368 (b'', b'write-method', b'write', b'ui write method'),
4488 (b'', b'write-method', b'write', b'ui write method'),
4369 (b'', b'nlines', 100, b'number of lines'),
4489 (b'', b'nlines', 100, b'number of lines'),
4370 (b'', b'nitems', 100, b'number of items (per line)'),
4490 (b'', b'nitems', 100, b'number of items (per line)'),
4371 (b'', b'item', b'x', b'item that is written'),
4491 (b'', b'item', b'x', b'item that is written'),
4372 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4492 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4373 (b'', b'flush-line', None, b'flush after each line'),
4493 (b'', b'flush-line', None, b'flush after each line'),
4374 ],
4494 ],
4375 )
4495 )
4376 def perfwrite(ui, repo, **opts):
4496 def perfwrite(ui, repo, **opts):
4377 """microbenchmark ui.write (and others)"""
4497 """microbenchmark ui.write (and others)"""
4378 opts = _byteskwargs(opts)
4498 opts = _byteskwargs(opts)
4379
4499
4380 write = getattr(ui, _sysstr(opts[b'write_method']))
4500 write = getattr(ui, _sysstr(opts[b'write_method']))
4381 nlines = int(opts[b'nlines'])
4501 nlines = int(opts[b'nlines'])
4382 nitems = int(opts[b'nitems'])
4502 nitems = int(opts[b'nitems'])
4383 item = opts[b'item']
4503 item = opts[b'item']
4384 batch_line = opts.get(b'batch_line')
4504 batch_line = opts.get(b'batch_line')
4385 flush_line = opts.get(b'flush_line')
4505 flush_line = opts.get(b'flush_line')
4386
4506
4387 if batch_line:
4507 if batch_line:
4388 line = item * nitems + b'\n'
4508 line = item * nitems + b'\n'
4389
4509
4390 def benchmark():
4510 def benchmark():
4391 for i in pycompat.xrange(nlines):
4511 for i in pycompat.xrange(nlines):
4392 if batch_line:
4512 if batch_line:
4393 write(line)
4513 write(line)
4394 else:
4514 else:
4395 for i in pycompat.xrange(nitems):
4515 for i in pycompat.xrange(nitems):
4396 write(item)
4516 write(item)
4397 write(b'\n')
4517 write(b'\n')
4398 if flush_line:
4518 if flush_line:
4399 ui.flush()
4519 ui.flush()
4400 ui.flush()
4520 ui.flush()
4401
4521
4402 timer, fm = gettimer(ui, opts)
4522 timer, fm = gettimer(ui, opts)
4403 timer(benchmark)
4523 timer(benchmark)
4404 fm.end()
4524 fm.end()
4405
4525
4406
4526
4407 def uisetup(ui):
4527 def uisetup(ui):
4408 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4528 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4409 commands, b'debugrevlogopts'
4529 commands, b'debugrevlogopts'
4410 ):
4530 ):
4411 # for "historical portability":
4531 # for "historical portability":
4412 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4532 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4413 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4533 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4414 # openrevlog() should cause failure, because it has been
4534 # openrevlog() should cause failure, because it has been
4415 # available since 3.5 (or 49c583ca48c4).
4535 # available since 3.5 (or 49c583ca48c4).
4416 def openrevlog(orig, repo, cmd, file_, opts):
4536 def openrevlog(orig, repo, cmd, file_, opts):
4417 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4537 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4418 raise error.Abort(
4538 raise error.Abort(
4419 b"This version doesn't support --dir option",
4539 b"This version doesn't support --dir option",
4420 hint=b"use 3.5 or later",
4540 hint=b"use 3.5 or later",
4421 )
4541 )
4422 return orig(repo, cmd, file_, opts)
4542 return orig(repo, cmd, file_, opts)
4423
4543
4424 name = _sysstr(b'openrevlog')
4544 name = _sysstr(b'openrevlog')
4425 extensions.wrapfunction(cmdutil, name, openrevlog)
4545 extensions.wrapfunction(cmdutil, name, openrevlog)
4426
4546
4427
4547
4428 @command(
4548 @command(
4429 b'perf::progress|perfprogress',
4549 b'perf::progress|perfprogress',
4430 formatteropts
4550 formatteropts
4431 + [
4551 + [
4432 (b'', b'topic', b'topic', b'topic for progress messages'),
4552 (b'', b'topic', b'topic', b'topic for progress messages'),
4433 (b'c', b'total', 1000000, b'total value we are progressing to'),
4553 (b'c', b'total', 1000000, b'total value we are progressing to'),
4434 ],
4554 ],
4435 norepo=True,
4555 norepo=True,
4436 )
4556 )
4437 def perfprogress(ui, topic=None, total=None, **opts):
4557 def perfprogress(ui, topic=None, total=None, **opts):
4438 """printing of progress bars"""
4558 """printing of progress bars"""
4439 opts = _byteskwargs(opts)
4559 opts = _byteskwargs(opts)
4440
4560
4441 timer, fm = gettimer(ui, opts)
4561 timer, fm = gettimer(ui, opts)
4442
4562
4443 def doprogress():
4563 def doprogress():
4444 with ui.makeprogress(topic, total=total) as progress:
4564 with ui.makeprogress(topic, total=total) as progress:
4445 for i in _xrange(total):
4565 for i in _xrange(total):
4446 progress.increment()
4566 progress.increment()
4447
4567
4448 timer(doprogress)
4568 timer(doprogress)
4449 fm.end()
4569 fm.end()
@@ -1,207 +1,211 b''
1 # blackbox.py - log repository events to a file for post-mortem debugging
1 # blackbox.py - log repository events to a file for post-mortem debugging
2 #
2 #
3 # Copyright 2010 Nicolas Dumazet
3 # Copyright 2010 Nicolas Dumazet
4 # Copyright 2013 Facebook, Inc.
4 # Copyright 2013 Facebook, Inc.
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """log repository events to a blackbox for debugging
9 """log repository events to a blackbox for debugging
10
10
11 Logs event information to .hg/blackbox.log to help debug and diagnose problems.
11 Logs event information to .hg/blackbox.log to help debug and diagnose problems.
12 The events that get logged can be configured via the blackbox.track and
12 The events that get logged can be configured via the blackbox.track and
13 blackbox.ignore config keys.
13 blackbox.ignore config keys.
14
14
15 Examples::
15 Examples::
16
16
17 [blackbox]
17 [blackbox]
18 track = *
18 track = *
19 ignore = pythonhook
19 ignore = pythonhook
20 # dirty is *EXPENSIVE* (slow);
20 # dirty is *EXPENSIVE* (slow);
21 # each log entry indicates `+` if the repository is dirty, like :hg:`id`.
21 # each log entry indicates `+` if the repository is dirty, like :hg:`id`.
22 dirty = True
22 dirty = True
23 # record the source of log messages
23 # record the source of log messages
24 logsource = True
24 logsource = True
25
25
26 [blackbox]
26 [blackbox]
27 track = command, commandfinish, commandexception, exthook, pythonhook
27 track = command, commandfinish, commandexception, exthook, pythonhook
28
28
29 [blackbox]
29 [blackbox]
30 track = incoming
30 track = incoming
31
31
32 [blackbox]
32 [blackbox]
33 # limit the size of a log file
33 # limit the size of a log file
34 maxsize = 1.5 MB
34 maxsize = 1.5 MB
35 # rotate up to N log files when the current one gets too big
35 # rotate up to N log files when the current one gets too big
36 maxfiles = 3
36 maxfiles = 3
37
37
38 [blackbox]
38 [blackbox]
39 # Include microseconds in log entries with %f (see Python function
39 # Include microseconds in log entries with %f (see Python function
40 # datetime.datetime.strftime)
40 # datetime.datetime.strftime)
41 date-format = %Y-%m-%d @ %H:%M:%S.%f
41 date-format = %Y-%m-%d @ %H:%M:%S.%f
42
42
43 """
43 """
44
44
45
45
46 import re
46 import re
47
47
48 from mercurial.i18n import _
48 from mercurial.i18n import _
49 from mercurial.node import hex
49 from mercurial.node import hex
50
50
51 from mercurial import (
51 from mercurial import (
52 encoding,
52 encoding,
53 loggingutil,
53 loggingutil,
54 registrar,
54 registrar,
55 )
55 )
56 from mercurial.utils import (
56 from mercurial.utils import (
57 dateutil,
57 dateutil,
58 procutil,
58 procutil,
59 )
59 )
60
60
61 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
61 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
62 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
62 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
63 # be specifying the version(s) of Mercurial they are tested with, or
63 # be specifying the version(s) of Mercurial they are tested with, or
64 # leave the attribute unspecified.
64 # leave the attribute unspecified.
65 testedwith = b'ships-with-hg-core'
65 testedwith = b'ships-with-hg-core'
66
66
67 cmdtable = {}
67 cmdtable = {}
68 command = registrar.command(cmdtable)
68 command = registrar.command(cmdtable)
69
69
70 _lastlogger = loggingutil.proxylogger()
70 _lastlogger = loggingutil.proxylogger()
71
71
72
72
73 class blackboxlogger:
73 class blackboxlogger:
74 def __init__(self, ui, repo):
74 def __init__(self, ui, repo):
75 self._repo = repo
75 self._repo = repo
76 self._trackedevents = set(ui.configlist(b'blackbox', b'track'))
76 self._trackedevents = set(ui.configlist(b'blackbox', b'track'))
77 self._ignoredevents = set(ui.configlist(b'blackbox', b'ignore'))
77 self._ignoredevents = set(ui.configlist(b'blackbox', b'ignore'))
78 self._maxfiles = ui.configint(b'blackbox', b'maxfiles')
78 self._maxfiles = ui.configint(b'blackbox', b'maxfiles')
79 self._maxsize = ui.configbytes(b'blackbox', b'maxsize')
79 self._maxsize = ui.configbytes(b'blackbox', b'maxsize')
80 self._inlog = False
80 self._inlog = False
81
81
82 def tracked(self, event):
82 def tracked(self, event):
83 return (
83 return (
84 b'*' in self._trackedevents and event not in self._ignoredevents
84 b'*' in self._trackedevents and event not in self._ignoredevents
85 ) or event in self._trackedevents
85 ) or event in self._trackedevents
86
86
87 def log(self, ui, event, msg, opts):
87 def log(self, ui, event, msg, opts):
88 # self._log() -> ctx.dirty() may create new subrepo instance, which
88 # self._log() -> ctx.dirty() may create new subrepo instance, which
89 # ui is derived from baseui. So the recursion guard in ui.log()
89 # ui is derived from baseui. So the recursion guard in ui.log()
90 # doesn't work as it's local to the ui instance.
90 # doesn't work as it's local to the ui instance.
91 if self._inlog:
91 if self._inlog:
92 return
92 return
93 self._inlog = True
93 self._inlog = True
94 try:
94 try:
95 self._log(ui, event, msg, opts)
95 self._log(ui, event, msg, opts)
96 finally:
96 finally:
97 self._inlog = False
97 self._inlog = False
98
98
99 def _log(self, ui, event, msg, opts):
99 def _log(self, ui, event, msg, opts):
100 default = ui.configdate(b'devel', b'default-date')
100 default = ui.configdate(b'devel', b'default-date')
101 dateformat = ui.config(b'blackbox', b'date-format')
101 dateformat = ui.config(b'blackbox', b'date-format')
102 debug_to_stderr = ui.configbool(b'blackbox', b'debug.to-stderr')
102 if dateformat:
103 if dateformat:
103 date = dateutil.datestr(default, dateformat)
104 date = dateutil.datestr(default, dateformat)
104 else:
105 else:
105 # We want to display milliseconds (more precision seems
106 # We want to display milliseconds (more precision seems
106 # unnecessary). Since %.3f is not supported, use %f and truncate
107 # unnecessary). Since %.3f is not supported, use %f and truncate
107 # microseconds.
108 # microseconds.
108 date = dateutil.datestr(default, b'%Y-%m-%d %H:%M:%S.%f')[:-3]
109 date = dateutil.datestr(default, b'%Y-%m-%d %H:%M:%S.%f')[:-3]
109 user = procutil.getuser()
110 user = procutil.getuser()
110 pid = b'%d' % procutil.getpid()
111 pid = b'%d' % procutil.getpid()
111 changed = b''
112 changed = b''
112 ctx = self._repo[None]
113 ctx = self._repo[None]
113 parents = ctx.parents()
114 parents = ctx.parents()
114 rev = b'+'.join([hex(p.node()) for p in parents])
115 rev = b'+'.join([hex(p.node()) for p in parents])
115 if ui.configbool(b'blackbox', b'dirty') and ctx.dirty(
116 if ui.configbool(b'blackbox', b'dirty') and ctx.dirty(
116 missing=True, merge=False, branch=False
117 missing=True, merge=False, branch=False
117 ):
118 ):
118 changed = b'+'
119 changed = b'+'
119 if ui.configbool(b'blackbox', b'logsource'):
120 if ui.configbool(b'blackbox', b'logsource'):
120 src = b' [%s]' % event
121 src = b' [%s]' % event
121 else:
122 else:
122 src = b''
123 src = b''
123 try:
124 try:
124 fmt = b'%s %s @%s%s (%s)%s> %s'
125 fmt = b'%s %s @%s%s (%s)%s> %s'
125 args = (date, user, rev, changed, pid, src, msg)
126 args = (date, user, rev, changed, pid, src, msg)
126 with loggingutil.openlogfile(
127 with loggingutil.openlogfile(
127 ui,
128 ui,
128 self._repo.vfs,
129 self._repo.vfs,
129 name=b'blackbox.log',
130 name=b'blackbox.log',
130 maxfiles=self._maxfiles,
131 maxfiles=self._maxfiles,
131 maxsize=self._maxsize,
132 maxsize=self._maxsize,
132 ) as fp:
133 ) as fp:
133 fp.write(fmt % args)
134 msg = fmt % args
135 fp.write(msg)
136 if debug_to_stderr:
137 ui.write_err(msg)
134 except (IOError, OSError) as err:
138 except (IOError, OSError) as err:
135 # deactivate this to avoid failed logging again
139 # deactivate this to avoid failed logging again
136 self._trackedevents.clear()
140 self._trackedevents.clear()
137 ui.debug(
141 ui.debug(
138 b'warning: cannot write to blackbox.log: %s\n'
142 b'warning: cannot write to blackbox.log: %s\n'
139 % encoding.strtolocal(err.strerror)
143 % encoding.strtolocal(err.strerror)
140 )
144 )
141 return
145 return
142 _lastlogger.logger = self
146 _lastlogger.logger = self
143
147
144
148
145 def uipopulate(ui):
149 def uipopulate(ui):
146 ui.setlogger(b'blackbox', _lastlogger)
150 ui.setlogger(b'blackbox', _lastlogger)
147
151
148
152
149 def reposetup(ui, repo):
153 def reposetup(ui, repo):
150 # During 'hg pull' a httppeer repo is created to represent the remote repo.
154 # During 'hg pull' a httppeer repo is created to represent the remote repo.
151 # It doesn't have a .hg directory to put a blackbox in, so we don't do
155 # It doesn't have a .hg directory to put a blackbox in, so we don't do
152 # the blackbox setup for it.
156 # the blackbox setup for it.
153 if not repo.local():
157 if not repo.local():
154 return
158 return
155
159
156 # Since blackbox.log is stored in the repo directory, the logger should be
160 # Since blackbox.log is stored in the repo directory, the logger should be
157 # instantiated per repository.
161 # instantiated per repository.
158 logger = blackboxlogger(ui, repo)
162 logger = blackboxlogger(ui, repo)
159 ui.setlogger(b'blackbox', logger)
163 ui.setlogger(b'blackbox', logger)
160
164
161 # Set _lastlogger even if ui.log is not called. This gives blackbox a
165 # Set _lastlogger even if ui.log is not called. This gives blackbox a
162 # fallback place to log
166 # fallback place to log
163 if _lastlogger.logger is None:
167 if _lastlogger.logger is None:
164 _lastlogger.logger = logger
168 _lastlogger.logger = logger
165
169
166 repo._wlockfreeprefix.add(b'blackbox.log')
170 repo._wlockfreeprefix.add(b'blackbox.log')
167
171
168
172
169 @command(
173 @command(
170 b'blackbox',
174 b'blackbox',
171 [
175 [
172 (b'l', b'limit', 10, _(b'the number of events to show')),
176 (b'l', b'limit', 10, _(b'the number of events to show')),
173 ],
177 ],
174 _(b'hg blackbox [OPTION]...'),
178 _(b'hg blackbox [OPTION]...'),
175 helpcategory=command.CATEGORY_MAINTENANCE,
179 helpcategory=command.CATEGORY_MAINTENANCE,
176 helpbasic=True,
180 helpbasic=True,
177 )
181 )
178 def blackbox(ui, repo, *revs, **opts):
182 def blackbox(ui, repo, *revs, **opts):
179 """view the recent repository events"""
183 """view the recent repository events"""
180
184
181 if not repo.vfs.exists(b'blackbox.log'):
185 if not repo.vfs.exists(b'blackbox.log'):
182 return
186 return
183
187
184 limit = opts.get('limit')
188 limit = opts.get('limit')
185 assert limit is not None # help pytype
189 assert limit is not None # help pytype
186
190
187 fp = repo.vfs(b'blackbox.log', b'r')
191 fp = repo.vfs(b'blackbox.log', b'r')
188 lines = fp.read().split(b'\n')
192 lines = fp.read().split(b'\n')
189
193
190 count = 0
194 count = 0
191 output = []
195 output = []
192 for line in reversed(lines):
196 for line in reversed(lines):
193 if count >= limit:
197 if count >= limit:
194 break
198 break
195
199
196 # count the commands by matching lines like:
200 # count the commands by matching lines like:
197 # 2013/01/23 19:13:36 root>
201 # 2013/01/23 19:13:36 root>
198 # 2013/01/23 19:13:36 root (1234)>
202 # 2013/01/23 19:13:36 root (1234)>
199 # 2013/01/23 19:13:36 root @0000000000000000000000000000000000000000 (1234)>
203 # 2013/01/23 19:13:36 root @0000000000000000000000000000000000000000 (1234)>
200 # 2013-01-23 19:13:36.000 root @0000000000000000000000000000000000000000 (1234)>
204 # 2013-01-23 19:13:36.000 root @0000000000000000000000000000000000000000 (1234)>
201 if re.match(
205 if re.match(
202 br'^\d{4}[-/]\d{2}[-/]\d{2} \d{2}:\d{2}:\d{2}(.\d*)? .*> .*', line
206 br'^\d{4}[-/]\d{2}[-/]\d{2} \d{2}:\d{2}:\d{2}(.\d*)? .*> .*', line
203 ):
207 ):
204 count += 1
208 count += 1
205 output.append(line)
209 output.append(line)
206
210
207 ui.status(b'\n'.join(reversed(output)))
211 ui.status(b'\n'.join(reversed(output)))
@@ -1,2670 +1,2670 b''
1 # bundle2.py - generic container format to transmit arbitrary data.
1 # bundle2.py - generic container format to transmit arbitrary data.
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """Handling of the new bundle2 format
7 """Handling of the new bundle2 format
8
8
9 The goal of bundle2 is to act as an atomically packet to transmit a set of
9 The goal of bundle2 is to act as an atomically packet to transmit a set of
10 payloads in an application agnostic way. It consist in a sequence of "parts"
10 payloads in an application agnostic way. It consist in a sequence of "parts"
11 that will be handed to and processed by the application layer.
11 that will be handed to and processed by the application layer.
12
12
13
13
14 General format architecture
14 General format architecture
15 ===========================
15 ===========================
16
16
17 The format is architectured as follow
17 The format is architectured as follow
18
18
19 - magic string
19 - magic string
20 - stream level parameters
20 - stream level parameters
21 - payload parts (any number)
21 - payload parts (any number)
22 - end of stream marker.
22 - end of stream marker.
23
23
24 the Binary format
24 the Binary format
25 ============================
25 ============================
26
26
27 All numbers are unsigned and big-endian.
27 All numbers are unsigned and big-endian.
28
28
29 stream level parameters
29 stream level parameters
30 ------------------------
30 ------------------------
31
31
32 Binary format is as follow
32 Binary format is as follow
33
33
34 :params size: int32
34 :params size: int32
35
35
36 The total number of Bytes used by the parameters
36 The total number of Bytes used by the parameters
37
37
38 :params value: arbitrary number of Bytes
38 :params value: arbitrary number of Bytes
39
39
40 A blob of `params size` containing the serialized version of all stream level
40 A blob of `params size` containing the serialized version of all stream level
41 parameters.
41 parameters.
42
42
43 The blob contains a space separated list of parameters. Parameters with value
43 The blob contains a space separated list of parameters. Parameters with value
44 are stored in the form `<name>=<value>`. Both name and value are urlquoted.
44 are stored in the form `<name>=<value>`. Both name and value are urlquoted.
45
45
46 Empty name are obviously forbidden.
46 Empty name are obviously forbidden.
47
47
48 Name MUST start with a letter. If this first letter is lower case, the
48 Name MUST start with a letter. If this first letter is lower case, the
49 parameter is advisory and can be safely ignored. However when the first
49 parameter is advisory and can be safely ignored. However when the first
50 letter is capital, the parameter is mandatory and the bundling process MUST
50 letter is capital, the parameter is mandatory and the bundling process MUST
51 stop if he is not able to proceed it.
51 stop if he is not able to proceed it.
52
52
53 Stream parameters use a simple textual format for two main reasons:
53 Stream parameters use a simple textual format for two main reasons:
54
54
55 - Stream level parameters should remain simple and we want to discourage any
55 - Stream level parameters should remain simple and we want to discourage any
56 crazy usage.
56 crazy usage.
57 - Textual data allow easy human inspection of a bundle2 header in case of
57 - Textual data allow easy human inspection of a bundle2 header in case of
58 troubles.
58 troubles.
59
59
60 Any Applicative level options MUST go into a bundle2 part instead.
60 Any Applicative level options MUST go into a bundle2 part instead.
61
61
62 Payload part
62 Payload part
63 ------------------------
63 ------------------------
64
64
65 Binary format is as follow
65 Binary format is as follow
66
66
67 :header size: int32
67 :header size: int32
68
68
69 The total number of Bytes used by the part header. When the header is empty
69 The total number of Bytes used by the part header. When the header is empty
70 (size = 0) this is interpreted as the end of stream marker.
70 (size = 0) this is interpreted as the end of stream marker.
71
71
72 :header:
72 :header:
73
73
74 The header defines how to interpret the part. It contains two piece of
74 The header defines how to interpret the part. It contains two piece of
75 data: the part type, and the part parameters.
75 data: the part type, and the part parameters.
76
76
77 The part type is used to route an application level handler, that can
77 The part type is used to route an application level handler, that can
78 interpret payload.
78 interpret payload.
79
79
80 Part parameters are passed to the application level handler. They are
80 Part parameters are passed to the application level handler. They are
81 meant to convey information that will help the application level object to
81 meant to convey information that will help the application level object to
82 interpret the part payload.
82 interpret the part payload.
83
83
84 The binary format of the header is has follow
84 The binary format of the header is has follow
85
85
86 :typesize: (one byte)
86 :typesize: (one byte)
87
87
88 :parttype: alphanumerical part name (restricted to [a-zA-Z0-9_:-]*)
88 :parttype: alphanumerical part name (restricted to [a-zA-Z0-9_:-]*)
89
89
90 :partid: A 32bits integer (unique in the bundle) that can be used to refer
90 :partid: A 32bits integer (unique in the bundle) that can be used to refer
91 to this part.
91 to this part.
92
92
93 :parameters:
93 :parameters:
94
94
95 Part's parameter may have arbitrary content, the binary structure is::
95 Part's parameter may have arbitrary content, the binary structure is::
96
96
97 <mandatory-count><advisory-count><param-sizes><param-data>
97 <mandatory-count><advisory-count><param-sizes><param-data>
98
98
99 :mandatory-count: 1 byte, number of mandatory parameters
99 :mandatory-count: 1 byte, number of mandatory parameters
100
100
101 :advisory-count: 1 byte, number of advisory parameters
101 :advisory-count: 1 byte, number of advisory parameters
102
102
103 :param-sizes:
103 :param-sizes:
104
104
105 N couple of bytes, where N is the total number of parameters. Each
105 N couple of bytes, where N is the total number of parameters. Each
106 couple contains (<size-of-key>, <size-of-value) for one parameter.
106 couple contains (<size-of-key>, <size-of-value) for one parameter.
107
107
108 :param-data:
108 :param-data:
109
109
110 A blob of bytes from which each parameter key and value can be
110 A blob of bytes from which each parameter key and value can be
111 retrieved using the list of size couples stored in the previous
111 retrieved using the list of size couples stored in the previous
112 field.
112 field.
113
113
114 Mandatory parameters comes first, then the advisory ones.
114 Mandatory parameters comes first, then the advisory ones.
115
115
116 Each parameter's key MUST be unique within the part.
116 Each parameter's key MUST be unique within the part.
117
117
118 :payload:
118 :payload:
119
119
120 payload is a series of `<chunksize><chunkdata>`.
120 payload is a series of `<chunksize><chunkdata>`.
121
121
122 `chunksize` is an int32, `chunkdata` are plain bytes (as much as
122 `chunksize` is an int32, `chunkdata` are plain bytes (as much as
123 `chunksize` says)` The payload part is concluded by a zero size chunk.
123 `chunksize` says)` The payload part is concluded by a zero size chunk.
124
124
125 The current implementation always produces either zero or one chunk.
125 The current implementation always produces either zero or one chunk.
126 This is an implementation limitation that will ultimately be lifted.
126 This is an implementation limitation that will ultimately be lifted.
127
127
128 `chunksize` can be negative to trigger special case processing. No such
128 `chunksize` can be negative to trigger special case processing. No such
129 processing is in place yet.
129 processing is in place yet.
130
130
131 Bundle processing
131 Bundle processing
132 ============================
132 ============================
133
133
134 Each part is processed in order using a "part handler". Handler are registered
134 Each part is processed in order using a "part handler". Handler are registered
135 for a certain part type.
135 for a certain part type.
136
136
137 The matching of a part to its handler is case insensitive. The case of the
137 The matching of a part to its handler is case insensitive. The case of the
138 part type is used to know if a part is mandatory or advisory. If the Part type
138 part type is used to know if a part is mandatory or advisory. If the Part type
139 contains any uppercase char it is considered mandatory. When no handler is
139 contains any uppercase char it is considered mandatory. When no handler is
140 known for a Mandatory part, the process is aborted and an exception is raised.
140 known for a Mandatory part, the process is aborted and an exception is raised.
141 If the part is advisory and no handler is known, the part is ignored. When the
141 If the part is advisory and no handler is known, the part is ignored. When the
142 process is aborted, the full bundle is still read from the stream to keep the
142 process is aborted, the full bundle is still read from the stream to keep the
143 channel usable. But none of the part read from an abort are processed. In the
143 channel usable. But none of the part read from an abort are processed. In the
144 future, dropping the stream may become an option for channel we do not care to
144 future, dropping the stream may become an option for channel we do not care to
145 preserve.
145 preserve.
146 """
146 """
147
147
148
148
149 import collections
149 import collections
150 import errno
150 import errno
151 import os
151 import os
152 import re
152 import re
153 import string
153 import string
154 import struct
154 import struct
155 import sys
155 import sys
156
156
157 from .i18n import _
157 from .i18n import _
158 from .node import (
158 from .node import (
159 hex,
159 hex,
160 short,
160 short,
161 )
161 )
162 from . import (
162 from . import (
163 bookmarks,
163 bookmarks,
164 changegroup,
164 changegroup,
165 encoding,
165 encoding,
166 error,
166 error,
167 obsolete,
167 obsolete,
168 phases,
168 phases,
169 pushkey,
169 pushkey,
170 pycompat,
170 pycompat,
171 requirements,
171 requirements,
172 scmutil,
172 scmutil,
173 streamclone,
173 streamclone,
174 tags,
174 tags,
175 url,
175 url,
176 util,
176 util,
177 )
177 )
178 from .utils import (
178 from .utils import (
179 stringutil,
179 stringutil,
180 urlutil,
180 urlutil,
181 )
181 )
182 from .interfaces import repository
182 from .interfaces import repository
183
183
184 urlerr = util.urlerr
184 urlerr = util.urlerr
185 urlreq = util.urlreq
185 urlreq = util.urlreq
186
186
187 _pack = struct.pack
187 _pack = struct.pack
188 _unpack = struct.unpack
188 _unpack = struct.unpack
189
189
190 _fstreamparamsize = b'>i'
190 _fstreamparamsize = b'>i'
191 _fpartheadersize = b'>i'
191 _fpartheadersize = b'>i'
192 _fparttypesize = b'>B'
192 _fparttypesize = b'>B'
193 _fpartid = b'>I'
193 _fpartid = b'>I'
194 _fpayloadsize = b'>i'
194 _fpayloadsize = b'>i'
195 _fpartparamcount = b'>BB'
195 _fpartparamcount = b'>BB'
196
196
197 preferedchunksize = 32768
197 preferedchunksize = 32768
198
198
199 _parttypeforbidden = re.compile(b'[^a-zA-Z0-9_:-]')
199 _parttypeforbidden = re.compile(b'[^a-zA-Z0-9_:-]')
200
200
201
201
202 def outdebug(ui, message):
202 def outdebug(ui, message):
203 """debug regarding output stream (bundling)"""
203 """debug regarding output stream (bundling)"""
204 if ui.configbool(b'devel', b'bundle2.debug'):
204 if ui.configbool(b'devel', b'bundle2.debug'):
205 ui.debug(b'bundle2-output: %s\n' % message)
205 ui.debug(b'bundle2-output: %s\n' % message)
206
206
207
207
208 def indebug(ui, message):
208 def indebug(ui, message):
209 """debug on input stream (unbundling)"""
209 """debug on input stream (unbundling)"""
210 if ui.configbool(b'devel', b'bundle2.debug'):
210 if ui.configbool(b'devel', b'bundle2.debug'):
211 ui.debug(b'bundle2-input: %s\n' % message)
211 ui.debug(b'bundle2-input: %s\n' % message)
212
212
213
213
214 def validateparttype(parttype):
214 def validateparttype(parttype):
215 """raise ValueError if a parttype contains invalid character"""
215 """raise ValueError if a parttype contains invalid character"""
216 if _parttypeforbidden.search(parttype):
216 if _parttypeforbidden.search(parttype):
217 raise ValueError(parttype)
217 raise ValueError(parttype)
218
218
219
219
220 def _makefpartparamsizes(nbparams):
220 def _makefpartparamsizes(nbparams):
221 """return a struct format to read part parameter sizes
221 """return a struct format to read part parameter sizes
222
222
223 The number parameters is variable so we need to build that format
223 The number parameters is variable so we need to build that format
224 dynamically.
224 dynamically.
225 """
225 """
226 return b'>' + (b'BB' * nbparams)
226 return b'>' + (b'BB' * nbparams)
227
227
228
228
229 parthandlermapping = {}
229 parthandlermapping = {}
230
230
231
231
232 def parthandler(parttype, params=()):
232 def parthandler(parttype, params=()):
233 """decorator that register a function as a bundle2 part handler
233 """decorator that register a function as a bundle2 part handler
234
234
235 eg::
235 eg::
236
236
237 @parthandler('myparttype', ('mandatory', 'param', 'handled'))
237 @parthandler('myparttype', ('mandatory', 'param', 'handled'))
238 def myparttypehandler(...):
238 def myparttypehandler(...):
239 '''process a part of type "my part".'''
239 '''process a part of type "my part".'''
240 ...
240 ...
241 """
241 """
242 validateparttype(parttype)
242 validateparttype(parttype)
243
243
244 def _decorator(func):
244 def _decorator(func):
245 lparttype = parttype.lower() # enforce lower case matching.
245 lparttype = parttype.lower() # enforce lower case matching.
246 assert lparttype not in parthandlermapping
246 assert lparttype not in parthandlermapping
247 parthandlermapping[lparttype] = func
247 parthandlermapping[lparttype] = func
248 func.params = frozenset(params)
248 func.params = frozenset(params)
249 return func
249 return func
250
250
251 return _decorator
251 return _decorator
252
252
253
253
254 class unbundlerecords:
254 class unbundlerecords:
255 """keep record of what happens during and unbundle
255 """keep record of what happens during and unbundle
256
256
257 New records are added using `records.add('cat', obj)`. Where 'cat' is a
257 New records are added using `records.add('cat', obj)`. Where 'cat' is a
258 category of record and obj is an arbitrary object.
258 category of record and obj is an arbitrary object.
259
259
260 `records['cat']` will return all entries of this category 'cat'.
260 `records['cat']` will return all entries of this category 'cat'.
261
261
262 Iterating on the object itself will yield `('category', obj)` tuples
262 Iterating on the object itself will yield `('category', obj)` tuples
263 for all entries.
263 for all entries.
264
264
265 All iterations happens in chronological order.
265 All iterations happens in chronological order.
266 """
266 """
267
267
268 def __init__(self):
268 def __init__(self):
269 self._categories = {}
269 self._categories = {}
270 self._sequences = []
270 self._sequences = []
271 self._replies = {}
271 self._replies = {}
272
272
273 def add(self, category, entry, inreplyto=None):
273 def add(self, category, entry, inreplyto=None):
274 """add a new record of a given category.
274 """add a new record of a given category.
275
275
276 The entry can then be retrieved in the list returned by
276 The entry can then be retrieved in the list returned by
277 self['category']."""
277 self['category']."""
278 self._categories.setdefault(category, []).append(entry)
278 self._categories.setdefault(category, []).append(entry)
279 self._sequences.append((category, entry))
279 self._sequences.append((category, entry))
280 if inreplyto is not None:
280 if inreplyto is not None:
281 self.getreplies(inreplyto).add(category, entry)
281 self.getreplies(inreplyto).add(category, entry)
282
282
283 def getreplies(self, partid):
283 def getreplies(self, partid):
284 """get the records that are replies to a specific part"""
284 """get the records that are replies to a specific part"""
285 return self._replies.setdefault(partid, unbundlerecords())
285 return self._replies.setdefault(partid, unbundlerecords())
286
286
287 def __getitem__(self, cat):
287 def __getitem__(self, cat):
288 return tuple(self._categories.get(cat, ()))
288 return tuple(self._categories.get(cat, ()))
289
289
290 def __iter__(self):
290 def __iter__(self):
291 return iter(self._sequences)
291 return iter(self._sequences)
292
292
293 def __len__(self):
293 def __len__(self):
294 return len(self._sequences)
294 return len(self._sequences)
295
295
296 def __nonzero__(self):
296 def __nonzero__(self):
297 return bool(self._sequences)
297 return bool(self._sequences)
298
298
299 __bool__ = __nonzero__
299 __bool__ = __nonzero__
300
300
301
301
302 class bundleoperation:
302 class bundleoperation:
303 """an object that represents a single bundling process
303 """an object that represents a single bundling process
304
304
305 Its purpose is to carry unbundle-related objects and states.
305 Its purpose is to carry unbundle-related objects and states.
306
306
307 A new object should be created at the beginning of each bundle processing.
307 A new object should be created at the beginning of each bundle processing.
308 The object is to be returned by the processing function.
308 The object is to be returned by the processing function.
309
309
310 The object has very little content now it will ultimately contain:
310 The object has very little content now it will ultimately contain:
311 * an access to the repo the bundle is applied to,
311 * an access to the repo the bundle is applied to,
312 * a ui object,
312 * a ui object,
313 * a way to retrieve a transaction to add changes to the repo,
313 * a way to retrieve a transaction to add changes to the repo,
314 * a way to record the result of processing each part,
314 * a way to record the result of processing each part,
315 * a way to construct a bundle response when applicable.
315 * a way to construct a bundle response when applicable.
316 """
316 """
317
317
318 def __init__(
318 def __init__(
319 self,
319 self,
320 repo,
320 repo,
321 transactiongetter,
321 transactiongetter,
322 captureoutput=True,
322 captureoutput=True,
323 source=b'',
323 source=b'',
324 remote=None,
324 remote=None,
325 ):
325 ):
326 self.repo = repo
326 self.repo = repo
327 # the peer object who produced this bundle if available
327 # the peer object who produced this bundle if available
328 self.remote = remote
328 self.remote = remote
329 self.ui = repo.ui
329 self.ui = repo.ui
330 self.records = unbundlerecords()
330 self.records = unbundlerecords()
331 self.reply = None
331 self.reply = None
332 self.captureoutput = captureoutput
332 self.captureoutput = captureoutput
333 self.hookargs = {}
333 self.hookargs = {}
334 self._gettransaction = transactiongetter
334 self._gettransaction = transactiongetter
335 # carries value that can modify part behavior
335 # carries value that can modify part behavior
336 self.modes = {}
336 self.modes = {}
337 self.source = source
337 self.source = source
338
338
339 def gettransaction(self):
339 def gettransaction(self):
340 transaction = self._gettransaction()
340 transaction = self._gettransaction()
341
341
342 if self.hookargs:
342 if self.hookargs:
343 # the ones added to the transaction supercede those added
343 # the ones added to the transaction supercede those added
344 # to the operation.
344 # to the operation.
345 self.hookargs.update(transaction.hookargs)
345 self.hookargs.update(transaction.hookargs)
346 transaction.hookargs = self.hookargs
346 transaction.hookargs = self.hookargs
347
347
348 # mark the hookargs as flushed. further attempts to add to
348 # mark the hookargs as flushed. further attempts to add to
349 # hookargs will result in an abort.
349 # hookargs will result in an abort.
350 self.hookargs = None
350 self.hookargs = None
351
351
352 return transaction
352 return transaction
353
353
354 def addhookargs(self, hookargs):
354 def addhookargs(self, hookargs):
355 if self.hookargs is None:
355 if self.hookargs is None:
356 raise error.ProgrammingError(
356 raise error.ProgrammingError(
357 b'attempted to add hookargs to '
357 b'attempted to add hookargs to '
358 b'operation after transaction started'
358 b'operation after transaction started'
359 )
359 )
360 self.hookargs.update(hookargs)
360 self.hookargs.update(hookargs)
361
361
362
362
363 class TransactionUnavailable(RuntimeError):
363 class TransactionUnavailable(RuntimeError):
364 pass
364 pass
365
365
366
366
367 def _notransaction():
367 def _notransaction():
368 """default method to get a transaction while processing a bundle
368 """default method to get a transaction while processing a bundle
369
369
370 Raise an exception to highlight the fact that no transaction was expected
370 Raise an exception to highlight the fact that no transaction was expected
371 to be created"""
371 to be created"""
372 raise TransactionUnavailable()
372 raise TransactionUnavailable()
373
373
374
374
375 def applybundle(repo, unbundler, tr, source, url=None, remote=None, **kwargs):
375 def applybundle(repo, unbundler, tr, source, url=None, remote=None, **kwargs):
376 # transform me into unbundler.apply() as soon as the freeze is lifted
376 # transform me into unbundler.apply() as soon as the freeze is lifted
377 if isinstance(unbundler, unbundle20):
377 if isinstance(unbundler, unbundle20):
378 tr.hookargs[b'bundle2'] = b'1'
378 tr.hookargs[b'bundle2'] = b'1'
379 if source is not None and b'source' not in tr.hookargs:
379 if source is not None and b'source' not in tr.hookargs:
380 tr.hookargs[b'source'] = source
380 tr.hookargs[b'source'] = source
381 if url is not None and b'url' not in tr.hookargs:
381 if url is not None and b'url' not in tr.hookargs:
382 tr.hookargs[b'url'] = url
382 tr.hookargs[b'url'] = url
383 return processbundle(
383 return processbundle(
384 repo, unbundler, lambda: tr, source=source, remote=remote
384 repo, unbundler, lambda: tr, source=source, remote=remote
385 )
385 )
386 else:
386 else:
387 # the transactiongetter won't be used, but we might as well set it
387 # the transactiongetter won't be used, but we might as well set it
388 op = bundleoperation(repo, lambda: tr, source=source, remote=remote)
388 op = bundleoperation(repo, lambda: tr, source=source, remote=remote)
389 _processchangegroup(op, unbundler, tr, source, url, **kwargs)
389 _processchangegroup(op, unbundler, tr, source, url, **kwargs)
390 return op
390 return op
391
391
392
392
393 class partiterator:
393 class partiterator:
394 def __init__(self, repo, op, unbundler):
394 def __init__(self, repo, op, unbundler):
395 self.repo = repo
395 self.repo = repo
396 self.op = op
396 self.op = op
397 self.unbundler = unbundler
397 self.unbundler = unbundler
398 self.iterator = None
398 self.iterator = None
399 self.count = 0
399 self.count = 0
400 self.current = None
400 self.current = None
401
401
402 def __enter__(self):
402 def __enter__(self):
403 def func():
403 def func():
404 itr = enumerate(self.unbundler.iterparts(), 1)
404 itr = enumerate(self.unbundler.iterparts(), 1)
405 for count, p in itr:
405 for count, p in itr:
406 self.count = count
406 self.count = count
407 self.current = p
407 self.current = p
408 yield p
408 yield p
409 p.consume()
409 p.consume()
410 self.current = None
410 self.current = None
411
411
412 self.iterator = func()
412 self.iterator = func()
413 return self.iterator
413 return self.iterator
414
414
415 def __exit__(self, type, exc, tb):
415 def __exit__(self, type, exc, tb):
416 if not self.iterator:
416 if not self.iterator:
417 return
417 return
418
418
419 # Only gracefully abort in a normal exception situation. User aborts
419 # Only gracefully abort in a normal exception situation. User aborts
420 # like Ctrl+C throw a KeyboardInterrupt which is not a base Exception,
420 # like Ctrl+C throw a KeyboardInterrupt which is not a base Exception,
421 # and should not gracefully cleanup.
421 # and should not gracefully cleanup.
422 if isinstance(exc, Exception):
422 if isinstance(exc, Exception):
423 # Any exceptions seeking to the end of the bundle at this point are
423 # Any exceptions seeking to the end of the bundle at this point are
424 # almost certainly related to the underlying stream being bad.
424 # almost certainly related to the underlying stream being bad.
425 # And, chances are that the exception we're handling is related to
425 # And, chances are that the exception we're handling is related to
426 # getting in that bad state. So, we swallow the seeking error and
426 # getting in that bad state. So, we swallow the seeking error and
427 # re-raise the original error.
427 # re-raise the original error.
428 seekerror = False
428 seekerror = False
429 try:
429 try:
430 if self.current:
430 if self.current:
431 # consume the part content to not corrupt the stream.
431 # consume the part content to not corrupt the stream.
432 self.current.consume()
432 self.current.consume()
433
433
434 for part in self.iterator:
434 for part in self.iterator:
435 # consume the bundle content
435 # consume the bundle content
436 part.consume()
436 part.consume()
437 except Exception:
437 except Exception:
438 seekerror = True
438 seekerror = True
439
439
440 # Small hack to let caller code distinguish exceptions from bundle2
440 # Small hack to let caller code distinguish exceptions from bundle2
441 # processing from processing the old format. This is mostly needed
441 # processing from processing the old format. This is mostly needed
442 # to handle different return codes to unbundle according to the type
442 # to handle different return codes to unbundle according to the type
443 # of bundle. We should probably clean up or drop this return code
443 # of bundle. We should probably clean up or drop this return code
444 # craziness in a future version.
444 # craziness in a future version.
445 exc.duringunbundle2 = True
445 exc.duringunbundle2 = True
446 salvaged = []
446 salvaged = []
447 replycaps = None
447 replycaps = None
448 if self.op.reply is not None:
448 if self.op.reply is not None:
449 salvaged = self.op.reply.salvageoutput()
449 salvaged = self.op.reply.salvageoutput()
450 replycaps = self.op.reply.capabilities
450 replycaps = self.op.reply.capabilities
451 exc._replycaps = replycaps
451 exc._replycaps = replycaps
452 exc._bundle2salvagedoutput = salvaged
452 exc._bundle2salvagedoutput = salvaged
453
453
454 # Re-raising from a variable loses the original stack. So only use
454 # Re-raising from a variable loses the original stack. So only use
455 # that form if we need to.
455 # that form if we need to.
456 if seekerror:
456 if seekerror:
457 raise exc
457 raise exc
458
458
459 self.repo.ui.debug(
459 self.repo.ui.debug(
460 b'bundle2-input-bundle: %i parts total\n' % self.count
460 b'bundle2-input-bundle: %i parts total\n' % self.count
461 )
461 )
462
462
463
463
464 def processbundle(
464 def processbundle(
465 repo,
465 repo,
466 unbundler,
466 unbundler,
467 transactiongetter=None,
467 transactiongetter=None,
468 op=None,
468 op=None,
469 source=b'',
469 source=b'',
470 remote=None,
470 remote=None,
471 ):
471 ):
472 """This function process a bundle, apply effect to/from a repo
472 """This function process a bundle, apply effect to/from a repo
473
473
474 It iterates over each part then searches for and uses the proper handling
474 It iterates over each part then searches for and uses the proper handling
475 code to process the part. Parts are processed in order.
475 code to process the part. Parts are processed in order.
476
476
477 Unknown Mandatory part will abort the process.
477 Unknown Mandatory part will abort the process.
478
478
479 It is temporarily possible to provide a prebuilt bundleoperation to the
479 It is temporarily possible to provide a prebuilt bundleoperation to the
480 function. This is used to ensure output is properly propagated in case of
480 function. This is used to ensure output is properly propagated in case of
481 an error during the unbundling. This output capturing part will likely be
481 an error during the unbundling. This output capturing part will likely be
482 reworked and this ability will probably go away in the process.
482 reworked and this ability will probably go away in the process.
483 """
483 """
484 if op is None:
484 if op is None:
485 if transactiongetter is None:
485 if transactiongetter is None:
486 transactiongetter = _notransaction
486 transactiongetter = _notransaction
487 op = bundleoperation(
487 op = bundleoperation(
488 repo,
488 repo,
489 transactiongetter,
489 transactiongetter,
490 source=source,
490 source=source,
491 remote=remote,
491 remote=remote,
492 )
492 )
493 # todo:
493 # todo:
494 # - replace this is a init function soon.
494 # - replace this is a init function soon.
495 # - exception catching
495 # - exception catching
496 unbundler.params
496 unbundler.params
497 if repo.ui.debugflag:
497 if repo.ui.debugflag:
498 msg = [b'bundle2-input-bundle:']
498 msg = [b'bundle2-input-bundle:']
499 if unbundler.params:
499 if unbundler.params:
500 msg.append(b' %i params' % len(unbundler.params))
500 msg.append(b' %i params' % len(unbundler.params))
501 if op._gettransaction is None or op._gettransaction is _notransaction:
501 if op._gettransaction is None or op._gettransaction is _notransaction:
502 msg.append(b' no-transaction')
502 msg.append(b' no-transaction')
503 else:
503 else:
504 msg.append(b' with-transaction')
504 msg.append(b' with-transaction')
505 msg.append(b'\n')
505 msg.append(b'\n')
506 repo.ui.debug(b''.join(msg))
506 repo.ui.debug(b''.join(msg))
507
507
508 processparts(repo, op, unbundler)
508 processparts(repo, op, unbundler)
509
509
510 return op
510 return op
511
511
512
512
513 def processparts(repo, op, unbundler):
513 def processparts(repo, op, unbundler):
514 with partiterator(repo, op, unbundler) as parts:
514 with partiterator(repo, op, unbundler) as parts:
515 for part in parts:
515 for part in parts:
516 _processpart(op, part)
516 _processpart(op, part)
517
517
518
518
519 def _processchangegroup(op, cg, tr, source, url, **kwargs):
519 def _processchangegroup(op, cg, tr, source, url, **kwargs):
520 if op.remote is not None and op.remote.path is not None:
520 if op.remote is not None and op.remote.path is not None:
521 remote_path = op.remote.path
521 remote_path = op.remote.path
522 kwargs = kwargs.copy()
522 kwargs = kwargs.copy()
523 kwargs['delta_base_reuse_policy'] = remote_path.delta_reuse_policy
523 kwargs['delta_base_reuse_policy'] = remote_path.delta_reuse_policy
524 ret = cg.apply(op.repo, tr, source, url, **kwargs)
524 ret = cg.apply(op.repo, tr, source, url, **kwargs)
525 op.records.add(
525 op.records.add(
526 b'changegroup',
526 b'changegroup',
527 {
527 {
528 b'return': ret,
528 b'return': ret,
529 },
529 },
530 )
530 )
531 return ret
531 return ret
532
532
533
533
534 def _gethandler(op, part):
534 def _gethandler(op, part):
535 status = b'unknown' # used by debug output
535 status = b'unknown' # used by debug output
536 try:
536 try:
537 handler = parthandlermapping.get(part.type)
537 handler = parthandlermapping.get(part.type)
538 if handler is None:
538 if handler is None:
539 status = b'unsupported-type'
539 status = b'unsupported-type'
540 raise error.BundleUnknownFeatureError(parttype=part.type)
540 raise error.BundleUnknownFeatureError(parttype=part.type)
541 indebug(op.ui, b'found a handler for part %s' % part.type)
541 indebug(op.ui, b'found a handler for part %s' % part.type)
542 unknownparams = part.mandatorykeys - handler.params
542 unknownparams = part.mandatorykeys - handler.params
543 if unknownparams:
543 if unknownparams:
544 unknownparams = list(unknownparams)
544 unknownparams = list(unknownparams)
545 unknownparams.sort()
545 unknownparams.sort()
546 status = b'unsupported-params (%s)' % b', '.join(unknownparams)
546 status = b'unsupported-params (%s)' % b', '.join(unknownparams)
547 raise error.BundleUnknownFeatureError(
547 raise error.BundleUnknownFeatureError(
548 parttype=part.type, params=unknownparams
548 parttype=part.type, params=unknownparams
549 )
549 )
550 status = b'supported'
550 status = b'supported'
551 except error.BundleUnknownFeatureError as exc:
551 except error.BundleUnknownFeatureError as exc:
552 if part.mandatory: # mandatory parts
552 if part.mandatory: # mandatory parts
553 raise
553 raise
554 indebug(op.ui, b'ignoring unsupported advisory part %s' % exc)
554 indebug(op.ui, b'ignoring unsupported advisory part %s' % exc)
555 return # skip to part processing
555 return # skip to part processing
556 finally:
556 finally:
557 if op.ui.debugflag:
557 if op.ui.debugflag:
558 msg = [b'bundle2-input-part: "%s"' % part.type]
558 msg = [b'bundle2-input-part: "%s"' % part.type]
559 if not part.mandatory:
559 if not part.mandatory:
560 msg.append(b' (advisory)')
560 msg.append(b' (advisory)')
561 nbmp = len(part.mandatorykeys)
561 nbmp = len(part.mandatorykeys)
562 nbap = len(part.params) - nbmp
562 nbap = len(part.params) - nbmp
563 if nbmp or nbap:
563 if nbmp or nbap:
564 msg.append(b' (params:')
564 msg.append(b' (params:')
565 if nbmp:
565 if nbmp:
566 msg.append(b' %i mandatory' % nbmp)
566 msg.append(b' %i mandatory' % nbmp)
567 if nbap:
567 if nbap:
568 msg.append(b' %i advisory' % nbmp)
568 msg.append(b' %i advisory' % nbmp)
569 msg.append(b')')
569 msg.append(b')')
570 msg.append(b' %s\n' % status)
570 msg.append(b' %s\n' % status)
571 op.ui.debug(b''.join(msg))
571 op.ui.debug(b''.join(msg))
572
572
573 return handler
573 return handler
574
574
575
575
576 def _processpart(op, part):
576 def _processpart(op, part):
577 """process a single part from a bundle
577 """process a single part from a bundle
578
578
579 The part is guaranteed to have been fully consumed when the function exits
579 The part is guaranteed to have been fully consumed when the function exits
580 (even if an exception is raised)."""
580 (even if an exception is raised)."""
581 handler = _gethandler(op, part)
581 handler = _gethandler(op, part)
582 if handler is None:
582 if handler is None:
583 return
583 return
584
584
585 # handler is called outside the above try block so that we don't
585 # handler is called outside the above try block so that we don't
586 # risk catching KeyErrors from anything other than the
586 # risk catching KeyErrors from anything other than the
587 # parthandlermapping lookup (any KeyError raised by handler()
587 # parthandlermapping lookup (any KeyError raised by handler()
588 # itself represents a defect of a different variety).
588 # itself represents a defect of a different variety).
589 output = None
589 output = None
590 if op.captureoutput and op.reply is not None:
590 if op.captureoutput and op.reply is not None:
591 op.ui.pushbuffer(error=True, subproc=True)
591 op.ui.pushbuffer(error=True, subproc=True)
592 output = b''
592 output = b''
593 try:
593 try:
594 handler(op, part)
594 handler(op, part)
595 finally:
595 finally:
596 if output is not None:
596 if output is not None:
597 output = op.ui.popbuffer()
597 output = op.ui.popbuffer()
598 if output:
598 if output:
599 outpart = op.reply.newpart(b'output', data=output, mandatory=False)
599 outpart = op.reply.newpart(b'output', data=output, mandatory=False)
600 outpart.addparam(
600 outpart.addparam(
601 b'in-reply-to', pycompat.bytestr(part.id), mandatory=False
601 b'in-reply-to', pycompat.bytestr(part.id), mandatory=False
602 )
602 )
603
603
604
604
605 def decodecaps(blob):
605 def decodecaps(blob):
606 """decode a bundle2 caps bytes blob into a dictionary
606 """decode a bundle2 caps bytes blob into a dictionary
607
607
608 The blob is a list of capabilities (one per line)
608 The blob is a list of capabilities (one per line)
609 Capabilities may have values using a line of the form::
609 Capabilities may have values using a line of the form::
610
610
611 capability=value1,value2,value3
611 capability=value1,value2,value3
612
612
613 The values are always a list."""
613 The values are always a list."""
614 caps = {}
614 caps = {}
615 for line in blob.splitlines():
615 for line in blob.splitlines():
616 if not line:
616 if not line:
617 continue
617 continue
618 if b'=' not in line:
618 if b'=' not in line:
619 key, vals = line, ()
619 key, vals = line, ()
620 else:
620 else:
621 key, vals = line.split(b'=', 1)
621 key, vals = line.split(b'=', 1)
622 vals = vals.split(b',')
622 vals = vals.split(b',')
623 key = urlreq.unquote(key)
623 key = urlreq.unquote(key)
624 vals = [urlreq.unquote(v) for v in vals]
624 vals = [urlreq.unquote(v) for v in vals]
625 caps[key] = vals
625 caps[key] = vals
626 return caps
626 return caps
627
627
628
628
629 def encodecaps(caps):
629 def encodecaps(caps):
630 """encode a bundle2 caps dictionary into a bytes blob"""
630 """encode a bundle2 caps dictionary into a bytes blob"""
631 chunks = []
631 chunks = []
632 for ca in sorted(caps):
632 for ca in sorted(caps):
633 vals = caps[ca]
633 vals = caps[ca]
634 ca = urlreq.quote(ca)
634 ca = urlreq.quote(ca)
635 vals = [urlreq.quote(v) for v in vals]
635 vals = [urlreq.quote(v) for v in vals]
636 if vals:
636 if vals:
637 ca = b"%s=%s" % (ca, b','.join(vals))
637 ca = b"%s=%s" % (ca, b','.join(vals))
638 chunks.append(ca)
638 chunks.append(ca)
639 return b'\n'.join(chunks)
639 return b'\n'.join(chunks)
640
640
641
641
642 bundletypes = {
642 bundletypes = {
643 b"": (b"", b'UN'), # only when using unbundle on ssh and old http servers
643 b"": (b"", b'UN'), # only when using unbundle on ssh and old http servers
644 # since the unification ssh accepts a header but there
644 # since the unification ssh accepts a header but there
645 # is no capability signaling it.
645 # is no capability signaling it.
646 b"HG20": (), # special-cased below
646 b"HG20": (), # special-cased below
647 b"HG10UN": (b"HG10UN", b'UN'),
647 b"HG10UN": (b"HG10UN", b'UN'),
648 b"HG10BZ": (b"HG10", b'BZ'),
648 b"HG10BZ": (b"HG10", b'BZ'),
649 b"HG10GZ": (b"HG10GZ", b'GZ'),
649 b"HG10GZ": (b"HG10GZ", b'GZ'),
650 }
650 }
651
651
652 # hgweb uses this list to communicate its preferred type
652 # hgweb uses this list to communicate its preferred type
653 bundlepriority = [b'HG10GZ', b'HG10BZ', b'HG10UN']
653 bundlepriority = [b'HG10GZ', b'HG10BZ', b'HG10UN']
654
654
655
655
656 class bundle20:
656 class bundle20:
657 """represent an outgoing bundle2 container
657 """represent an outgoing bundle2 container
658
658
659 Use the `addparam` method to add stream level parameter. and `newpart` to
659 Use the `addparam` method to add stream level parameter. and `newpart` to
660 populate it. Then call `getchunks` to retrieve all the binary chunks of
660 populate it. Then call `getchunks` to retrieve all the binary chunks of
661 data that compose the bundle2 container."""
661 data that compose the bundle2 container."""
662
662
663 _magicstring = b'HG20'
663 _magicstring = b'HG20'
664
664
665 def __init__(self, ui, capabilities=()):
665 def __init__(self, ui, capabilities=()):
666 self.ui = ui
666 self.ui = ui
667 self._params = []
667 self._params = []
668 self._parts = []
668 self._parts = []
669 self.capabilities = dict(capabilities)
669 self.capabilities = dict(capabilities)
670 self._compengine = util.compengines.forbundletype(b'UN')
670 self._compengine = util.compengines.forbundletype(b'UN')
671 self._compopts = None
671 self._compopts = None
672 # If compression is being handled by a consumer of the raw
672 # If compression is being handled by a consumer of the raw
673 # data (e.g. the wire protocol), unsetting this flag tells
673 # data (e.g. the wire protocol), unsetting this flag tells
674 # consumers that the bundle is best left uncompressed.
674 # consumers that the bundle is best left uncompressed.
675 self.prefercompressed = True
675 self.prefercompressed = True
676
676
677 def setcompression(self, alg, compopts=None):
677 def setcompression(self, alg, compopts=None):
678 """setup core part compression to <alg>"""
678 """setup core part compression to <alg>"""
679 if alg in (None, b'UN'):
679 if alg in (None, b'UN'):
680 return
680 return
681 assert not any(n.lower() == b'compression' for n, v in self._params)
681 assert not any(n.lower() == b'compression' for n, v in self._params)
682 self.addparam(b'Compression', alg)
682 self.addparam(b'Compression', alg)
683 self._compengine = util.compengines.forbundletype(alg)
683 self._compengine = util.compengines.forbundletype(alg)
684 self._compopts = compopts
684 self._compopts = compopts
685
685
686 @property
686 @property
687 def nbparts(self):
687 def nbparts(self):
688 """total number of parts added to the bundler"""
688 """total number of parts added to the bundler"""
689 return len(self._parts)
689 return len(self._parts)
690
690
691 # methods used to defines the bundle2 content
691 # methods used to defines the bundle2 content
692 def addparam(self, name, value=None):
692 def addparam(self, name, value=None):
693 """add a stream level parameter"""
693 """add a stream level parameter"""
694 if not name:
694 if not name:
695 raise error.ProgrammingError(b'empty parameter name')
695 raise error.ProgrammingError(b'empty parameter name')
696 if name[0:1] not in pycompat.bytestr(
696 if name[0:1] not in pycompat.bytestr(
697 string.ascii_letters # pytype: disable=wrong-arg-types
697 string.ascii_letters # pytype: disable=wrong-arg-types
698 ):
698 ):
699 raise error.ProgrammingError(
699 raise error.ProgrammingError(
700 b'non letter first character: %s' % name
700 b'non letter first character: %s' % name
701 )
701 )
702 self._params.append((name, value))
702 self._params.append((name, value))
703
703
704 def addpart(self, part):
704 def addpart(self, part):
705 """add a new part to the bundle2 container
705 """add a new part to the bundle2 container
706
706
707 Parts contains the actual applicative payload."""
707 Parts contains the actual applicative payload."""
708 assert part.id is None
708 assert part.id is None
709 part.id = len(self._parts) # very cheap counter
709 part.id = len(self._parts) # very cheap counter
710 self._parts.append(part)
710 self._parts.append(part)
711
711
712 def newpart(self, typeid, *args, **kwargs):
712 def newpart(self, typeid, *args, **kwargs):
713 """create a new part and add it to the containers
713 """create a new part and add it to the containers
714
714
715 As the part is directly added to the containers. For now, this means
715 As the part is directly added to the containers. For now, this means
716 that any failure to properly initialize the part after calling
716 that any failure to properly initialize the part after calling
717 ``newpart`` should result in a failure of the whole bundling process.
717 ``newpart`` should result in a failure of the whole bundling process.
718
718
719 You can still fall back to manually create and add if you need better
719 You can still fall back to manually create and add if you need better
720 control."""
720 control."""
721 part = bundlepart(typeid, *args, **kwargs)
721 part = bundlepart(typeid, *args, **kwargs)
722 self.addpart(part)
722 self.addpart(part)
723 return part
723 return part
724
724
725 # methods used to generate the bundle2 stream
725 # methods used to generate the bundle2 stream
726 def getchunks(self):
726 def getchunks(self):
727 if self.ui.debugflag:
727 if self.ui.debugflag:
728 msg = [b'bundle2-output-bundle: "%s",' % self._magicstring]
728 msg = [b'bundle2-output-bundle: "%s",' % self._magicstring]
729 if self._params:
729 if self._params:
730 msg.append(b' (%i params)' % len(self._params))
730 msg.append(b' (%i params)' % len(self._params))
731 msg.append(b' %i parts total\n' % len(self._parts))
731 msg.append(b' %i parts total\n' % len(self._parts))
732 self.ui.debug(b''.join(msg))
732 self.ui.debug(b''.join(msg))
733 outdebug(self.ui, b'start emission of %s stream' % self._magicstring)
733 outdebug(self.ui, b'start emission of %s stream' % self._magicstring)
734 yield self._magicstring
734 yield self._magicstring
735 param = self._paramchunk()
735 param = self._paramchunk()
736 outdebug(self.ui, b'bundle parameter: %s' % param)
736 outdebug(self.ui, b'bundle parameter: %s' % param)
737 yield _pack(_fstreamparamsize, len(param))
737 yield _pack(_fstreamparamsize, len(param))
738 if param:
738 if param:
739 yield param
739 yield param
740 for chunk in self._compengine.compressstream(
740 for chunk in self._compengine.compressstream(
741 self._getcorechunk(), self._compopts
741 self._getcorechunk(), self._compopts
742 ):
742 ):
743 yield chunk
743 yield chunk
744
744
745 def _paramchunk(self):
745 def _paramchunk(self):
746 """return a encoded version of all stream parameters"""
746 """return a encoded version of all stream parameters"""
747 blocks = []
747 blocks = []
748 for par, value in self._params:
748 for par, value in self._params:
749 par = urlreq.quote(par)
749 par = urlreq.quote(par)
750 if value is not None:
750 if value is not None:
751 value = urlreq.quote(value)
751 value = urlreq.quote(value)
752 par = b'%s=%s' % (par, value)
752 par = b'%s=%s' % (par, value)
753 blocks.append(par)
753 blocks.append(par)
754 return b' '.join(blocks)
754 return b' '.join(blocks)
755
755
756 def _getcorechunk(self):
756 def _getcorechunk(self):
757 """yield chunk for the core part of the bundle
757 """yield chunk for the core part of the bundle
758
758
759 (all but headers and parameters)"""
759 (all but headers and parameters)"""
760 outdebug(self.ui, b'start of parts')
760 outdebug(self.ui, b'start of parts')
761 for part in self._parts:
761 for part in self._parts:
762 outdebug(self.ui, b'bundle part: "%s"' % part.type)
762 outdebug(self.ui, b'bundle part: "%s"' % part.type)
763 for chunk in part.getchunks(ui=self.ui):
763 for chunk in part.getchunks(ui=self.ui):
764 yield chunk
764 yield chunk
765 outdebug(self.ui, b'end of bundle')
765 outdebug(self.ui, b'end of bundle')
766 yield _pack(_fpartheadersize, 0)
766 yield _pack(_fpartheadersize, 0)
767
767
768 def salvageoutput(self):
768 def salvageoutput(self):
769 """return a list with a copy of all output parts in the bundle
769 """return a list with a copy of all output parts in the bundle
770
770
771 This is meant to be used during error handling to make sure we preserve
771 This is meant to be used during error handling to make sure we preserve
772 server output"""
772 server output"""
773 salvaged = []
773 salvaged = []
774 for part in self._parts:
774 for part in self._parts:
775 if part.type.startswith(b'output'):
775 if part.type.startswith(b'output'):
776 salvaged.append(part.copy())
776 salvaged.append(part.copy())
777 return salvaged
777 return salvaged
778
778
779
779
780 class unpackermixin:
780 class unpackermixin:
781 """A mixin to extract bytes and struct data from a stream"""
781 """A mixin to extract bytes and struct data from a stream"""
782
782
783 def __init__(self, fp):
783 def __init__(self, fp):
784 self._fp = fp
784 self._fp = fp
785
785
786 def _unpack(self, format):
786 def _unpack(self, format):
787 """unpack this struct format from the stream
787 """unpack this struct format from the stream
788
788
789 This method is meant for internal usage by the bundle2 protocol only.
789 This method is meant for internal usage by the bundle2 protocol only.
790 They directly manipulate the low level stream including bundle2 level
790 They directly manipulate the low level stream including bundle2 level
791 instruction.
791 instruction.
792
792
793 Do not use it to implement higher-level logic or methods."""
793 Do not use it to implement higher-level logic or methods."""
794 data = self._readexact(struct.calcsize(format))
794 data = self._readexact(struct.calcsize(format))
795 return _unpack(format, data)
795 return _unpack(format, data)
796
796
797 def _readexact(self, size):
797 def _readexact(self, size):
798 """read exactly <size> bytes from the stream
798 """read exactly <size> bytes from the stream
799
799
800 This method is meant for internal usage by the bundle2 protocol only.
800 This method is meant for internal usage by the bundle2 protocol only.
801 They directly manipulate the low level stream including bundle2 level
801 They directly manipulate the low level stream including bundle2 level
802 instruction.
802 instruction.
803
803
804 Do not use it to implement higher-level logic or methods."""
804 Do not use it to implement higher-level logic or methods."""
805 return changegroup.readexactly(self._fp, size)
805 return changegroup.readexactly(self._fp, size)
806
806
807
807
808 def getunbundler(ui, fp, magicstring=None):
808 def getunbundler(ui, fp, magicstring=None):
809 """return a valid unbundler object for a given magicstring"""
809 """return a valid unbundler object for a given magicstring"""
810 if magicstring is None:
810 if magicstring is None:
811 magicstring = changegroup.readexactly(fp, 4)
811 magicstring = changegroup.readexactly(fp, 4)
812 magic, version = magicstring[0:2], magicstring[2:4]
812 magic, version = magicstring[0:2], magicstring[2:4]
813 if magic != b'HG':
813 if magic != b'HG':
814 ui.debug(
814 ui.debug(
815 b"error: invalid magic: %r (version %r), should be 'HG'\n"
815 b"error: invalid magic: %r (version %r), should be 'HG'\n"
816 % (magic, version)
816 % (magic, version)
817 )
817 )
818 raise error.Abort(_(b'not a Mercurial bundle'))
818 raise error.Abort(_(b'not a Mercurial bundle'))
819 unbundlerclass = formatmap.get(version)
819 unbundlerclass = formatmap.get(version)
820 if unbundlerclass is None:
820 if unbundlerclass is None:
821 raise error.Abort(_(b'unknown bundle version %s') % version)
821 raise error.Abort(_(b'unknown bundle version %s') % version)
822 unbundler = unbundlerclass(ui, fp)
822 unbundler = unbundlerclass(ui, fp)
823 indebug(ui, b'start processing of %s stream' % magicstring)
823 indebug(ui, b'start processing of %s stream' % magicstring)
824 return unbundler
824 return unbundler
825
825
826
826
827 class unbundle20(unpackermixin):
827 class unbundle20(unpackermixin):
828 """interpret a bundle2 stream
828 """interpret a bundle2 stream
829
829
830 This class is fed with a binary stream and yields parts through its
830 This class is fed with a binary stream and yields parts through its
831 `iterparts` methods."""
831 `iterparts` methods."""
832
832
833 _magicstring = b'HG20'
833 _magicstring = b'HG20'
834
834
835 def __init__(self, ui, fp):
835 def __init__(self, ui, fp):
836 """If header is specified, we do not read it out of the stream."""
836 """If header is specified, we do not read it out of the stream."""
837 self.ui = ui
837 self.ui = ui
838 self._compengine = util.compengines.forbundletype(b'UN')
838 self._compengine = util.compengines.forbundletype(b'UN')
839 self._compressed = None
839 self._compressed = None
840 super(unbundle20, self).__init__(fp)
840 super(unbundle20, self).__init__(fp)
841
841
842 @util.propertycache
842 @util.propertycache
843 def params(self):
843 def params(self):
844 """dictionary of stream level parameters"""
844 """dictionary of stream level parameters"""
845 indebug(self.ui, b'reading bundle2 stream parameters')
845 indebug(self.ui, b'reading bundle2 stream parameters')
846 params = {}
846 params = {}
847 paramssize = self._unpack(_fstreamparamsize)[0]
847 paramssize = self._unpack(_fstreamparamsize)[0]
848 if paramssize < 0:
848 if paramssize < 0:
849 raise error.BundleValueError(
849 raise error.BundleValueError(
850 b'negative bundle param size: %i' % paramssize
850 b'negative bundle param size: %i' % paramssize
851 )
851 )
852 if paramssize:
852 if paramssize:
853 params = self._readexact(paramssize)
853 params = self._readexact(paramssize)
854 params = self._processallparams(params)
854 params = self._processallparams(params)
855 return params
855 return params
856
856
857 def _processallparams(self, paramsblock):
857 def _processallparams(self, paramsblock):
858 """ """
858 """ """
859 params = util.sortdict()
859 params = util.sortdict()
860 for p in paramsblock.split(b' '):
860 for p in paramsblock.split(b' '):
861 p = p.split(b'=', 1)
861 p = p.split(b'=', 1)
862 p = [urlreq.unquote(i) for i in p]
862 p = [urlreq.unquote(i) for i in p]
863 if len(p) < 2:
863 if len(p) < 2:
864 p.append(None)
864 p.append(None)
865 self._processparam(*p)
865 self._processparam(*p)
866 params[p[0]] = p[1]
866 params[p[0]] = p[1]
867 return params
867 return params
868
868
869 def _processparam(self, name, value):
869 def _processparam(self, name, value):
870 """process a parameter, applying its effect if needed
870 """process a parameter, applying its effect if needed
871
871
872 Parameter starting with a lower case letter are advisory and will be
872 Parameter starting with a lower case letter are advisory and will be
873 ignored when unknown. Those starting with an upper case letter are
873 ignored when unknown. Those starting with an upper case letter are
874 mandatory and will this function will raise a KeyError when unknown.
874 mandatory and will this function will raise a KeyError when unknown.
875
875
876 Note: no option are currently supported. Any input will be either
876 Note: no option are currently supported. Any input will be either
877 ignored or failing.
877 ignored or failing.
878 """
878 """
879 if not name:
879 if not name:
880 raise ValueError('empty parameter name')
880 raise ValueError('empty parameter name')
881 if name[0:1] not in pycompat.bytestr(
881 if name[0:1] not in pycompat.bytestr(
882 string.ascii_letters # pytype: disable=wrong-arg-types
882 string.ascii_letters # pytype: disable=wrong-arg-types
883 ):
883 ):
884 raise ValueError('non letter first character: %s' % name)
884 raise ValueError('non letter first character: %s' % name)
885 try:
885 try:
886 handler = b2streamparamsmap[name.lower()]
886 handler = b2streamparamsmap[name.lower()]
887 except KeyError:
887 except KeyError:
888 if name[0:1].islower():
888 if name[0:1].islower():
889 indebug(self.ui, b"ignoring unknown parameter %s" % name)
889 indebug(self.ui, b"ignoring unknown parameter %s" % name)
890 else:
890 else:
891 raise error.BundleUnknownFeatureError(params=(name,))
891 raise error.BundleUnknownFeatureError(params=(name,))
892 else:
892 else:
893 handler(self, name, value)
893 handler(self, name, value)
894
894
895 def _forwardchunks(self):
895 def _forwardchunks(self):
896 """utility to transfer a bundle2 as binary
896 """utility to transfer a bundle2 as binary
897
897
898 This is made necessary by the fact the 'getbundle' command over 'ssh'
898 This is made necessary by the fact the 'getbundle' command over 'ssh'
899 have no way to know then the reply end, relying on the bundle to be
899 have no way to know when the reply ends, relying on the bundle to be
900 interpreted to know its end. This is terrible and we are sorry, but we
900 interpreted to know its end. This is terrible and we are sorry, but we
901 needed to move forward to get general delta enabled.
901 needed to move forward to get general delta enabled.
902 """
902 """
903 yield self._magicstring
903 yield self._magicstring
904 assert 'params' not in vars(self)
904 assert 'params' not in vars(self)
905 paramssize = self._unpack(_fstreamparamsize)[0]
905 paramssize = self._unpack(_fstreamparamsize)[0]
906 if paramssize < 0:
906 if paramssize < 0:
907 raise error.BundleValueError(
907 raise error.BundleValueError(
908 b'negative bundle param size: %i' % paramssize
908 b'negative bundle param size: %i' % paramssize
909 )
909 )
910 if paramssize:
910 if paramssize:
911 params = self._readexact(paramssize)
911 params = self._readexact(paramssize)
912 self._processallparams(params)
912 self._processallparams(params)
913 # The payload itself is decompressed below, so drop
913 # The payload itself is decompressed below, so drop
914 # the compression parameter passed down to compensate.
914 # the compression parameter passed down to compensate.
915 outparams = []
915 outparams = []
916 for p in params.split(b' '):
916 for p in params.split(b' '):
917 k, v = p.split(b'=', 1)
917 k, v = p.split(b'=', 1)
918 if k.lower() != b'compression':
918 if k.lower() != b'compression':
919 outparams.append(p)
919 outparams.append(p)
920 outparams = b' '.join(outparams)
920 outparams = b' '.join(outparams)
921 yield _pack(_fstreamparamsize, len(outparams))
921 yield _pack(_fstreamparamsize, len(outparams))
922 yield outparams
922 yield outparams
923 else:
923 else:
924 yield _pack(_fstreamparamsize, paramssize)
924 yield _pack(_fstreamparamsize, paramssize)
925 # From there, payload might need to be decompressed
925 # From there, payload might need to be decompressed
926 self._fp = self._compengine.decompressorreader(self._fp)
926 self._fp = self._compengine.decompressorreader(self._fp)
927 emptycount = 0
927 emptycount = 0
928 while emptycount < 2:
928 while emptycount < 2:
929 # so we can brainlessly loop
929 # so we can brainlessly loop
930 assert _fpartheadersize == _fpayloadsize
930 assert _fpartheadersize == _fpayloadsize
931 size = self._unpack(_fpartheadersize)[0]
931 size = self._unpack(_fpartheadersize)[0]
932 yield _pack(_fpartheadersize, size)
932 yield _pack(_fpartheadersize, size)
933 if size:
933 if size:
934 emptycount = 0
934 emptycount = 0
935 else:
935 else:
936 emptycount += 1
936 emptycount += 1
937 continue
937 continue
938 if size == flaginterrupt:
938 if size == flaginterrupt:
939 continue
939 continue
940 elif size < 0:
940 elif size < 0:
941 raise error.BundleValueError(b'negative chunk size: %i')
941 raise error.BundleValueError(b'negative chunk size: %i')
942 yield self._readexact(size)
942 yield self._readexact(size)
943
943
944 def iterparts(self, seekable=False):
944 def iterparts(self, seekable=False):
945 """yield all parts contained in the stream"""
945 """yield all parts contained in the stream"""
946 cls = seekableunbundlepart if seekable else unbundlepart
946 cls = seekableunbundlepart if seekable else unbundlepart
947 # make sure param have been loaded
947 # make sure param have been loaded
948 self.params
948 self.params
949 # From there, payload need to be decompressed
949 # From there, payload need to be decompressed
950 self._fp = self._compengine.decompressorreader(self._fp)
950 self._fp = self._compengine.decompressorreader(self._fp)
951 indebug(self.ui, b'start extraction of bundle2 parts')
951 indebug(self.ui, b'start extraction of bundle2 parts')
952 headerblock = self._readpartheader()
952 headerblock = self._readpartheader()
953 while headerblock is not None:
953 while headerblock is not None:
954 part = cls(self.ui, headerblock, self._fp)
954 part = cls(self.ui, headerblock, self._fp)
955 yield part
955 yield part
956 # Ensure part is fully consumed so we can start reading the next
956 # Ensure part is fully consumed so we can start reading the next
957 # part.
957 # part.
958 part.consume()
958 part.consume()
959
959
960 headerblock = self._readpartheader()
960 headerblock = self._readpartheader()
961 indebug(self.ui, b'end of bundle2 stream')
961 indebug(self.ui, b'end of bundle2 stream')
962
962
963 def _readpartheader(self):
963 def _readpartheader(self):
964 """reads a part header size and return the bytes blob
964 """reads a part header size and return the bytes blob
965
965
966 returns None if empty"""
966 returns None if empty"""
967 headersize = self._unpack(_fpartheadersize)[0]
967 headersize = self._unpack(_fpartheadersize)[0]
968 if headersize < 0:
968 if headersize < 0:
969 raise error.BundleValueError(
969 raise error.BundleValueError(
970 b'negative part header size: %i' % headersize
970 b'negative part header size: %i' % headersize
971 )
971 )
972 indebug(self.ui, b'part header size: %i' % headersize)
972 indebug(self.ui, b'part header size: %i' % headersize)
973 if headersize:
973 if headersize:
974 return self._readexact(headersize)
974 return self._readexact(headersize)
975 return None
975 return None
976
976
977 def compressed(self):
977 def compressed(self):
978 self.params # load params
978 self.params # load params
979 return self._compressed
979 return self._compressed
980
980
981 def close(self):
981 def close(self):
982 """close underlying file"""
982 """close underlying file"""
983 if hasattr(self._fp, 'close'):
983 if hasattr(self._fp, 'close'):
984 return self._fp.close()
984 return self._fp.close()
985
985
986
986
987 formatmap = {b'20': unbundle20}
987 formatmap = {b'20': unbundle20}
988
988
989 b2streamparamsmap = {}
989 b2streamparamsmap = {}
990
990
991
991
992 def b2streamparamhandler(name):
992 def b2streamparamhandler(name):
993 """register a handler for a stream level parameter"""
993 """register a handler for a stream level parameter"""
994
994
995 def decorator(func):
995 def decorator(func):
996 assert name not in formatmap
996 assert name not in formatmap
997 b2streamparamsmap[name] = func
997 b2streamparamsmap[name] = func
998 return func
998 return func
999
999
1000 return decorator
1000 return decorator
1001
1001
1002
1002
1003 @b2streamparamhandler(b'compression')
1003 @b2streamparamhandler(b'compression')
1004 def processcompression(unbundler, param, value):
1004 def processcompression(unbundler, param, value):
1005 """read compression parameter and install payload decompression"""
1005 """read compression parameter and install payload decompression"""
1006 if value not in util.compengines.supportedbundletypes:
1006 if value not in util.compengines.supportedbundletypes:
1007 raise error.BundleUnknownFeatureError(params=(param,), values=(value,))
1007 raise error.BundleUnknownFeatureError(params=(param,), values=(value,))
1008 unbundler._compengine = util.compengines.forbundletype(value)
1008 unbundler._compengine = util.compengines.forbundletype(value)
1009 if value is not None:
1009 if value is not None:
1010 unbundler._compressed = True
1010 unbundler._compressed = True
1011
1011
1012
1012
1013 class bundlepart:
1013 class bundlepart:
1014 """A bundle2 part contains application level payload
1014 """A bundle2 part contains application level payload
1015
1015
1016 The part `type` is used to route the part to the application level
1016 The part `type` is used to route the part to the application level
1017 handler.
1017 handler.
1018
1018
1019 The part payload is contained in ``part.data``. It could be raw bytes or a
1019 The part payload is contained in ``part.data``. It could be raw bytes or a
1020 generator of byte chunks.
1020 generator of byte chunks.
1021
1021
1022 You can add parameters to the part using the ``addparam`` method.
1022 You can add parameters to the part using the ``addparam`` method.
1023 Parameters can be either mandatory (default) or advisory. Remote side
1023 Parameters can be either mandatory (default) or advisory. Remote side
1024 should be able to safely ignore the advisory ones.
1024 should be able to safely ignore the advisory ones.
1025
1025
1026 Both data and parameters cannot be modified after the generation has begun.
1026 Both data and parameters cannot be modified after the generation has begun.
1027 """
1027 """
1028
1028
1029 def __init__(
1029 def __init__(
1030 self,
1030 self,
1031 parttype,
1031 parttype,
1032 mandatoryparams=(),
1032 mandatoryparams=(),
1033 advisoryparams=(),
1033 advisoryparams=(),
1034 data=b'',
1034 data=b'',
1035 mandatory=True,
1035 mandatory=True,
1036 ):
1036 ):
1037 validateparttype(parttype)
1037 validateparttype(parttype)
1038 self.id = None
1038 self.id = None
1039 self.type = parttype
1039 self.type = parttype
1040 self._data = data
1040 self._data = data
1041 self._mandatoryparams = list(mandatoryparams)
1041 self._mandatoryparams = list(mandatoryparams)
1042 self._advisoryparams = list(advisoryparams)
1042 self._advisoryparams = list(advisoryparams)
1043 # checking for duplicated entries
1043 # checking for duplicated entries
1044 self._seenparams = set()
1044 self._seenparams = set()
1045 for pname, __ in self._mandatoryparams + self._advisoryparams:
1045 for pname, __ in self._mandatoryparams + self._advisoryparams:
1046 if pname in self._seenparams:
1046 if pname in self._seenparams:
1047 raise error.ProgrammingError(b'duplicated params: %s' % pname)
1047 raise error.ProgrammingError(b'duplicated params: %s' % pname)
1048 self._seenparams.add(pname)
1048 self._seenparams.add(pname)
1049 # status of the part's generation:
1049 # status of the part's generation:
1050 # - None: not started,
1050 # - None: not started,
1051 # - False: currently generated,
1051 # - False: currently generated,
1052 # - True: generation done.
1052 # - True: generation done.
1053 self._generated = None
1053 self._generated = None
1054 self.mandatory = mandatory
1054 self.mandatory = mandatory
1055
1055
1056 def __repr__(self):
1056 def __repr__(self):
1057 cls = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
1057 cls = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
1058 return '<%s object at %x; id: %s; type: %s; mandatory: %s>' % (
1058 return '<%s object at %x; id: %s; type: %s; mandatory: %s>' % (
1059 cls,
1059 cls,
1060 id(self),
1060 id(self),
1061 self.id,
1061 self.id,
1062 self.type,
1062 self.type,
1063 self.mandatory,
1063 self.mandatory,
1064 )
1064 )
1065
1065
1066 def copy(self):
1066 def copy(self):
1067 """return a copy of the part
1067 """return a copy of the part
1068
1068
1069 The new part have the very same content but no partid assigned yet.
1069 The new part have the very same content but no partid assigned yet.
1070 Parts with generated data cannot be copied."""
1070 Parts with generated data cannot be copied."""
1071 assert not hasattr(self.data, 'next')
1071 assert not hasattr(self.data, 'next')
1072 return self.__class__(
1072 return self.__class__(
1073 self.type,
1073 self.type,
1074 self._mandatoryparams,
1074 self._mandatoryparams,
1075 self._advisoryparams,
1075 self._advisoryparams,
1076 self._data,
1076 self._data,
1077 self.mandatory,
1077 self.mandatory,
1078 )
1078 )
1079
1079
1080 # methods used to defines the part content
1080 # methods used to defines the part content
1081 @property
1081 @property
1082 def data(self):
1082 def data(self):
1083 return self._data
1083 return self._data
1084
1084
1085 @data.setter
1085 @data.setter
1086 def data(self, data):
1086 def data(self, data):
1087 if self._generated is not None:
1087 if self._generated is not None:
1088 raise error.ReadOnlyPartError(b'part is being generated')
1088 raise error.ReadOnlyPartError(b'part is being generated')
1089 self._data = data
1089 self._data = data
1090
1090
1091 @property
1091 @property
1092 def mandatoryparams(self):
1092 def mandatoryparams(self):
1093 # make it an immutable tuple to force people through ``addparam``
1093 # make it an immutable tuple to force people through ``addparam``
1094 return tuple(self._mandatoryparams)
1094 return tuple(self._mandatoryparams)
1095
1095
1096 @property
1096 @property
1097 def advisoryparams(self):
1097 def advisoryparams(self):
1098 # make it an immutable tuple to force people through ``addparam``
1098 # make it an immutable tuple to force people through ``addparam``
1099 return tuple(self._advisoryparams)
1099 return tuple(self._advisoryparams)
1100
1100
1101 def addparam(self, name, value=b'', mandatory=True):
1101 def addparam(self, name, value=b'', mandatory=True):
1102 """add a parameter to the part
1102 """add a parameter to the part
1103
1103
1104 If 'mandatory' is set to True, the remote handler must claim support
1104 If 'mandatory' is set to True, the remote handler must claim support
1105 for this parameter or the unbundling will be aborted.
1105 for this parameter or the unbundling will be aborted.
1106
1106
1107 The 'name' and 'value' cannot exceed 255 bytes each.
1107 The 'name' and 'value' cannot exceed 255 bytes each.
1108 """
1108 """
1109 if self._generated is not None:
1109 if self._generated is not None:
1110 raise error.ReadOnlyPartError(b'part is being generated')
1110 raise error.ReadOnlyPartError(b'part is being generated')
1111 if name in self._seenparams:
1111 if name in self._seenparams:
1112 raise ValueError(b'duplicated params: %s' % name)
1112 raise ValueError(b'duplicated params: %s' % name)
1113 self._seenparams.add(name)
1113 self._seenparams.add(name)
1114 params = self._advisoryparams
1114 params = self._advisoryparams
1115 if mandatory:
1115 if mandatory:
1116 params = self._mandatoryparams
1116 params = self._mandatoryparams
1117 params.append((name, value))
1117 params.append((name, value))
1118
1118
1119 # methods used to generates the bundle2 stream
1119 # methods used to generates the bundle2 stream
1120 def getchunks(self, ui):
1120 def getchunks(self, ui):
1121 if self._generated is not None:
1121 if self._generated is not None:
1122 raise error.ProgrammingError(b'part can only be consumed once')
1122 raise error.ProgrammingError(b'part can only be consumed once')
1123 self._generated = False
1123 self._generated = False
1124
1124
1125 if ui.debugflag:
1125 if ui.debugflag:
1126 msg = [b'bundle2-output-part: "%s"' % self.type]
1126 msg = [b'bundle2-output-part: "%s"' % self.type]
1127 if not self.mandatory:
1127 if not self.mandatory:
1128 msg.append(b' (advisory)')
1128 msg.append(b' (advisory)')
1129 nbmp = len(self.mandatoryparams)
1129 nbmp = len(self.mandatoryparams)
1130 nbap = len(self.advisoryparams)
1130 nbap = len(self.advisoryparams)
1131 if nbmp or nbap:
1131 if nbmp or nbap:
1132 msg.append(b' (params:')
1132 msg.append(b' (params:')
1133 if nbmp:
1133 if nbmp:
1134 msg.append(b' %i mandatory' % nbmp)
1134 msg.append(b' %i mandatory' % nbmp)
1135 if nbap:
1135 if nbap:
1136 msg.append(b' %i advisory' % nbmp)
1136 msg.append(b' %i advisory' % nbmp)
1137 msg.append(b')')
1137 msg.append(b')')
1138 if not self.data:
1138 if not self.data:
1139 msg.append(b' empty payload')
1139 msg.append(b' empty payload')
1140 elif hasattr(self.data, 'next') or hasattr(self.data, '__next__'):
1140 elif hasattr(self.data, 'next') or hasattr(self.data, '__next__'):
1141 msg.append(b' streamed payload')
1141 msg.append(b' streamed payload')
1142 else:
1142 else:
1143 msg.append(b' %i bytes payload' % len(self.data))
1143 msg.append(b' %i bytes payload' % len(self.data))
1144 msg.append(b'\n')
1144 msg.append(b'\n')
1145 ui.debug(b''.join(msg))
1145 ui.debug(b''.join(msg))
1146
1146
1147 #### header
1147 #### header
1148 if self.mandatory:
1148 if self.mandatory:
1149 parttype = self.type.upper()
1149 parttype = self.type.upper()
1150 else:
1150 else:
1151 parttype = self.type.lower()
1151 parttype = self.type.lower()
1152 outdebug(ui, b'part %s: "%s"' % (pycompat.bytestr(self.id), parttype))
1152 outdebug(ui, b'part %s: "%s"' % (pycompat.bytestr(self.id), parttype))
1153 ## parttype
1153 ## parttype
1154 header = [
1154 header = [
1155 _pack(_fparttypesize, len(parttype)),
1155 _pack(_fparttypesize, len(parttype)),
1156 parttype,
1156 parttype,
1157 _pack(_fpartid, self.id),
1157 _pack(_fpartid, self.id),
1158 ]
1158 ]
1159 ## parameters
1159 ## parameters
1160 # count
1160 # count
1161 manpar = self.mandatoryparams
1161 manpar = self.mandatoryparams
1162 advpar = self.advisoryparams
1162 advpar = self.advisoryparams
1163 header.append(_pack(_fpartparamcount, len(manpar), len(advpar)))
1163 header.append(_pack(_fpartparamcount, len(manpar), len(advpar)))
1164 # size
1164 # size
1165 parsizes = []
1165 parsizes = []
1166 for key, value in manpar:
1166 for key, value in manpar:
1167 parsizes.append(len(key))
1167 parsizes.append(len(key))
1168 parsizes.append(len(value))
1168 parsizes.append(len(value))
1169 for key, value in advpar:
1169 for key, value in advpar:
1170 parsizes.append(len(key))
1170 parsizes.append(len(key))
1171 parsizes.append(len(value))
1171 parsizes.append(len(value))
1172 paramsizes = _pack(_makefpartparamsizes(len(parsizes) // 2), *parsizes)
1172 paramsizes = _pack(_makefpartparamsizes(len(parsizes) // 2), *parsizes)
1173 header.append(paramsizes)
1173 header.append(paramsizes)
1174 # key, value
1174 # key, value
1175 for key, value in manpar:
1175 for key, value in manpar:
1176 header.append(key)
1176 header.append(key)
1177 header.append(value)
1177 header.append(value)
1178 for key, value in advpar:
1178 for key, value in advpar:
1179 header.append(key)
1179 header.append(key)
1180 header.append(value)
1180 header.append(value)
1181 ## finalize header
1181 ## finalize header
1182 try:
1182 try:
1183 headerchunk = b''.join(header)
1183 headerchunk = b''.join(header)
1184 except TypeError:
1184 except TypeError:
1185 raise TypeError(
1185 raise TypeError(
1186 'Found a non-bytes trying to '
1186 'Found a non-bytes trying to '
1187 'build bundle part header: %r' % header
1187 'build bundle part header: %r' % header
1188 )
1188 )
1189 outdebug(ui, b'header chunk size: %i' % len(headerchunk))
1189 outdebug(ui, b'header chunk size: %i' % len(headerchunk))
1190 yield _pack(_fpartheadersize, len(headerchunk))
1190 yield _pack(_fpartheadersize, len(headerchunk))
1191 yield headerchunk
1191 yield headerchunk
1192 ## payload
1192 ## payload
1193 try:
1193 try:
1194 for chunk in self._payloadchunks():
1194 for chunk in self._payloadchunks():
1195 outdebug(ui, b'payload chunk size: %i' % len(chunk))
1195 outdebug(ui, b'payload chunk size: %i' % len(chunk))
1196 yield _pack(_fpayloadsize, len(chunk))
1196 yield _pack(_fpayloadsize, len(chunk))
1197 yield chunk
1197 yield chunk
1198 except GeneratorExit:
1198 except GeneratorExit:
1199 # GeneratorExit means that nobody is listening for our
1199 # GeneratorExit means that nobody is listening for our
1200 # results anyway, so just bail quickly rather than trying
1200 # results anyway, so just bail quickly rather than trying
1201 # to produce an error part.
1201 # to produce an error part.
1202 ui.debug(b'bundle2-generatorexit\n')
1202 ui.debug(b'bundle2-generatorexit\n')
1203 raise
1203 raise
1204 except BaseException as exc:
1204 except BaseException as exc:
1205 bexc = stringutil.forcebytestr(exc)
1205 bexc = stringutil.forcebytestr(exc)
1206 # backup exception data for later
1206 # backup exception data for later
1207 ui.debug(
1207 ui.debug(
1208 b'bundle2-input-stream-interrupt: encoding exception %s' % bexc
1208 b'bundle2-input-stream-interrupt: encoding exception %s' % bexc
1209 )
1209 )
1210 tb = sys.exc_info()[2]
1210 tb = sys.exc_info()[2]
1211 msg = b'unexpected error: %s' % bexc
1211 msg = b'unexpected error: %s' % bexc
1212 interpart = bundlepart(
1212 interpart = bundlepart(
1213 b'error:abort', [(b'message', msg)], mandatory=False
1213 b'error:abort', [(b'message', msg)], mandatory=False
1214 )
1214 )
1215 interpart.id = 0
1215 interpart.id = 0
1216 yield _pack(_fpayloadsize, -1)
1216 yield _pack(_fpayloadsize, -1)
1217 for chunk in interpart.getchunks(ui=ui):
1217 for chunk in interpart.getchunks(ui=ui):
1218 yield chunk
1218 yield chunk
1219 outdebug(ui, b'closing payload chunk')
1219 outdebug(ui, b'closing payload chunk')
1220 # abort current part payload
1220 # abort current part payload
1221 yield _pack(_fpayloadsize, 0)
1221 yield _pack(_fpayloadsize, 0)
1222 pycompat.raisewithtb(exc, tb)
1222 pycompat.raisewithtb(exc, tb)
1223 # end of payload
1223 # end of payload
1224 outdebug(ui, b'closing payload chunk')
1224 outdebug(ui, b'closing payload chunk')
1225 yield _pack(_fpayloadsize, 0)
1225 yield _pack(_fpayloadsize, 0)
1226 self._generated = True
1226 self._generated = True
1227
1227
1228 def _payloadchunks(self):
1228 def _payloadchunks(self):
1229 """yield chunks of a the part payload
1229 """yield chunks of a the part payload
1230
1230
1231 Exists to handle the different methods to provide data to a part."""
1231 Exists to handle the different methods to provide data to a part."""
1232 # we only support fixed size data now.
1232 # we only support fixed size data now.
1233 # This will be improved in the future.
1233 # This will be improved in the future.
1234 if hasattr(self.data, 'next') or hasattr(self.data, '__next__'):
1234 if hasattr(self.data, 'next') or hasattr(self.data, '__next__'):
1235 buff = util.chunkbuffer(self.data)
1235 buff = util.chunkbuffer(self.data)
1236 chunk = buff.read(preferedchunksize)
1236 chunk = buff.read(preferedchunksize)
1237 while chunk:
1237 while chunk:
1238 yield chunk
1238 yield chunk
1239 chunk = buff.read(preferedchunksize)
1239 chunk = buff.read(preferedchunksize)
1240 elif len(self.data):
1240 elif len(self.data):
1241 yield self.data
1241 yield self.data
1242
1242
1243
1243
1244 flaginterrupt = -1
1244 flaginterrupt = -1
1245
1245
1246
1246
1247 class interrupthandler(unpackermixin):
1247 class interrupthandler(unpackermixin):
1248 """read one part and process it with restricted capability
1248 """read one part and process it with restricted capability
1249
1249
1250 This allows to transmit exception raised on the producer size during part
1250 This allows to transmit exception raised on the producer size during part
1251 iteration while the consumer is reading a part.
1251 iteration while the consumer is reading a part.
1252
1252
1253 Part processed in this manner only have access to a ui object,"""
1253 Part processed in this manner only have access to a ui object,"""
1254
1254
1255 def __init__(self, ui, fp):
1255 def __init__(self, ui, fp):
1256 super(interrupthandler, self).__init__(fp)
1256 super(interrupthandler, self).__init__(fp)
1257 self.ui = ui
1257 self.ui = ui
1258
1258
1259 def _readpartheader(self):
1259 def _readpartheader(self):
1260 """reads a part header size and return the bytes blob
1260 """reads a part header size and return the bytes blob
1261
1261
1262 returns None if empty"""
1262 returns None if empty"""
1263 headersize = self._unpack(_fpartheadersize)[0]
1263 headersize = self._unpack(_fpartheadersize)[0]
1264 if headersize < 0:
1264 if headersize < 0:
1265 raise error.BundleValueError(
1265 raise error.BundleValueError(
1266 b'negative part header size: %i' % headersize
1266 b'negative part header size: %i' % headersize
1267 )
1267 )
1268 indebug(self.ui, b'part header size: %i\n' % headersize)
1268 indebug(self.ui, b'part header size: %i\n' % headersize)
1269 if headersize:
1269 if headersize:
1270 return self._readexact(headersize)
1270 return self._readexact(headersize)
1271 return None
1271 return None
1272
1272
1273 def __call__(self):
1273 def __call__(self):
1274
1274
1275 self.ui.debug(
1275 self.ui.debug(
1276 b'bundle2-input-stream-interrupt: opening out of band context\n'
1276 b'bundle2-input-stream-interrupt: opening out of band context\n'
1277 )
1277 )
1278 indebug(self.ui, b'bundle2 stream interruption, looking for a part.')
1278 indebug(self.ui, b'bundle2 stream interruption, looking for a part.')
1279 headerblock = self._readpartheader()
1279 headerblock = self._readpartheader()
1280 if headerblock is None:
1280 if headerblock is None:
1281 indebug(self.ui, b'no part found during interruption.')
1281 indebug(self.ui, b'no part found during interruption.')
1282 return
1282 return
1283 part = unbundlepart(self.ui, headerblock, self._fp)
1283 part = unbundlepart(self.ui, headerblock, self._fp)
1284 op = interruptoperation(self.ui)
1284 op = interruptoperation(self.ui)
1285 hardabort = False
1285 hardabort = False
1286 try:
1286 try:
1287 _processpart(op, part)
1287 _processpart(op, part)
1288 except (SystemExit, KeyboardInterrupt):
1288 except (SystemExit, KeyboardInterrupt):
1289 hardabort = True
1289 hardabort = True
1290 raise
1290 raise
1291 finally:
1291 finally:
1292 if not hardabort:
1292 if not hardabort:
1293 part.consume()
1293 part.consume()
1294 self.ui.debug(
1294 self.ui.debug(
1295 b'bundle2-input-stream-interrupt: closing out of band context\n'
1295 b'bundle2-input-stream-interrupt: closing out of band context\n'
1296 )
1296 )
1297
1297
1298
1298
1299 class interruptoperation:
1299 class interruptoperation:
1300 """A limited operation to be use by part handler during interruption
1300 """A limited operation to be use by part handler during interruption
1301
1301
1302 It only have access to an ui object.
1302 It only have access to an ui object.
1303 """
1303 """
1304
1304
1305 def __init__(self, ui):
1305 def __init__(self, ui):
1306 self.ui = ui
1306 self.ui = ui
1307 self.reply = None
1307 self.reply = None
1308 self.captureoutput = False
1308 self.captureoutput = False
1309
1309
1310 @property
1310 @property
1311 def repo(self):
1311 def repo(self):
1312 raise error.ProgrammingError(b'no repo access from stream interruption')
1312 raise error.ProgrammingError(b'no repo access from stream interruption')
1313
1313
1314 def gettransaction(self):
1314 def gettransaction(self):
1315 raise TransactionUnavailable(b'no repo access from stream interruption')
1315 raise TransactionUnavailable(b'no repo access from stream interruption')
1316
1316
1317
1317
1318 def decodepayloadchunks(ui, fh):
1318 def decodepayloadchunks(ui, fh):
1319 """Reads bundle2 part payload data into chunks.
1319 """Reads bundle2 part payload data into chunks.
1320
1320
1321 Part payload data consists of framed chunks. This function takes
1321 Part payload data consists of framed chunks. This function takes
1322 a file handle and emits those chunks.
1322 a file handle and emits those chunks.
1323 """
1323 """
1324 dolog = ui.configbool(b'devel', b'bundle2.debug')
1324 dolog = ui.configbool(b'devel', b'bundle2.debug')
1325 debug = ui.debug
1325 debug = ui.debug
1326
1326
1327 headerstruct = struct.Struct(_fpayloadsize)
1327 headerstruct = struct.Struct(_fpayloadsize)
1328 headersize = headerstruct.size
1328 headersize = headerstruct.size
1329 unpack = headerstruct.unpack
1329 unpack = headerstruct.unpack
1330
1330
1331 readexactly = changegroup.readexactly
1331 readexactly = changegroup.readexactly
1332 read = fh.read
1332 read = fh.read
1333
1333
1334 chunksize = unpack(readexactly(fh, headersize))[0]
1334 chunksize = unpack(readexactly(fh, headersize))[0]
1335 indebug(ui, b'payload chunk size: %i' % chunksize)
1335 indebug(ui, b'payload chunk size: %i' % chunksize)
1336
1336
1337 # changegroup.readexactly() is inlined below for performance.
1337 # changegroup.readexactly() is inlined below for performance.
1338 while chunksize:
1338 while chunksize:
1339 if chunksize >= 0:
1339 if chunksize >= 0:
1340 s = read(chunksize)
1340 s = read(chunksize)
1341 if len(s) < chunksize:
1341 if len(s) < chunksize:
1342 raise error.Abort(
1342 raise error.Abort(
1343 _(
1343 _(
1344 b'stream ended unexpectedly '
1344 b'stream ended unexpectedly '
1345 b' (got %d bytes, expected %d)'
1345 b' (got %d bytes, expected %d)'
1346 )
1346 )
1347 % (len(s), chunksize)
1347 % (len(s), chunksize)
1348 )
1348 )
1349
1349
1350 yield s
1350 yield s
1351 elif chunksize == flaginterrupt:
1351 elif chunksize == flaginterrupt:
1352 # Interrupt "signal" detected. The regular stream is interrupted
1352 # Interrupt "signal" detected. The regular stream is interrupted
1353 # and a bundle2 part follows. Consume it.
1353 # and a bundle2 part follows. Consume it.
1354 interrupthandler(ui, fh)()
1354 interrupthandler(ui, fh)()
1355 else:
1355 else:
1356 raise error.BundleValueError(
1356 raise error.BundleValueError(
1357 b'negative payload chunk size: %s' % chunksize
1357 b'negative payload chunk size: %s' % chunksize
1358 )
1358 )
1359
1359
1360 s = read(headersize)
1360 s = read(headersize)
1361 if len(s) < headersize:
1361 if len(s) < headersize:
1362 raise error.Abort(
1362 raise error.Abort(
1363 _(b'stream ended unexpectedly (got %d bytes, expected %d)')
1363 _(b'stream ended unexpectedly (got %d bytes, expected %d)')
1364 % (len(s), chunksize)
1364 % (len(s), chunksize)
1365 )
1365 )
1366
1366
1367 chunksize = unpack(s)[0]
1367 chunksize = unpack(s)[0]
1368
1368
1369 # indebug() inlined for performance.
1369 # indebug() inlined for performance.
1370 if dolog:
1370 if dolog:
1371 debug(b'bundle2-input: payload chunk size: %i\n' % chunksize)
1371 debug(b'bundle2-input: payload chunk size: %i\n' % chunksize)
1372
1372
1373
1373
1374 class unbundlepart(unpackermixin):
1374 class unbundlepart(unpackermixin):
1375 """a bundle part read from a bundle"""
1375 """a bundle part read from a bundle"""
1376
1376
1377 def __init__(self, ui, header, fp):
1377 def __init__(self, ui, header, fp):
1378 super(unbundlepart, self).__init__(fp)
1378 super(unbundlepart, self).__init__(fp)
1379 self._seekable = hasattr(fp, 'seek') and hasattr(fp, 'tell')
1379 self._seekable = hasattr(fp, 'seek') and hasattr(fp, 'tell')
1380 self.ui = ui
1380 self.ui = ui
1381 # unbundle state attr
1381 # unbundle state attr
1382 self._headerdata = header
1382 self._headerdata = header
1383 self._headeroffset = 0
1383 self._headeroffset = 0
1384 self._initialized = False
1384 self._initialized = False
1385 self.consumed = False
1385 self.consumed = False
1386 # part data
1386 # part data
1387 self.id = None
1387 self.id = None
1388 self.type = None
1388 self.type = None
1389 self.mandatoryparams = None
1389 self.mandatoryparams = None
1390 self.advisoryparams = None
1390 self.advisoryparams = None
1391 self.params = None
1391 self.params = None
1392 self.mandatorykeys = ()
1392 self.mandatorykeys = ()
1393 self._readheader()
1393 self._readheader()
1394 self._mandatory = None
1394 self._mandatory = None
1395 self._pos = 0
1395 self._pos = 0
1396
1396
1397 def _fromheader(self, size):
1397 def _fromheader(self, size):
1398 """return the next <size> byte from the header"""
1398 """return the next <size> byte from the header"""
1399 offset = self._headeroffset
1399 offset = self._headeroffset
1400 data = self._headerdata[offset : (offset + size)]
1400 data = self._headerdata[offset : (offset + size)]
1401 self._headeroffset = offset + size
1401 self._headeroffset = offset + size
1402 return data
1402 return data
1403
1403
1404 def _unpackheader(self, format):
1404 def _unpackheader(self, format):
1405 """read given format from header
1405 """read given format from header
1406
1406
1407 This automatically compute the size of the format to read."""
1407 This automatically compute the size of the format to read."""
1408 data = self._fromheader(struct.calcsize(format))
1408 data = self._fromheader(struct.calcsize(format))
1409 return _unpack(format, data)
1409 return _unpack(format, data)
1410
1410
1411 def _initparams(self, mandatoryparams, advisoryparams):
1411 def _initparams(self, mandatoryparams, advisoryparams):
1412 """internal function to setup all logic related parameters"""
1412 """internal function to setup all logic related parameters"""
1413 # make it read only to prevent people touching it by mistake.
1413 # make it read only to prevent people touching it by mistake.
1414 self.mandatoryparams = tuple(mandatoryparams)
1414 self.mandatoryparams = tuple(mandatoryparams)
1415 self.advisoryparams = tuple(advisoryparams)
1415 self.advisoryparams = tuple(advisoryparams)
1416 # user friendly UI
1416 # user friendly UI
1417 self.params = util.sortdict(self.mandatoryparams)
1417 self.params = util.sortdict(self.mandatoryparams)
1418 self.params.update(self.advisoryparams)
1418 self.params.update(self.advisoryparams)
1419 self.mandatorykeys = frozenset(p[0] for p in mandatoryparams)
1419 self.mandatorykeys = frozenset(p[0] for p in mandatoryparams)
1420
1420
1421 def _readheader(self):
1421 def _readheader(self):
1422 """read the header and setup the object"""
1422 """read the header and setup the object"""
1423 typesize = self._unpackheader(_fparttypesize)[0]
1423 typesize = self._unpackheader(_fparttypesize)[0]
1424 self.type = self._fromheader(typesize)
1424 self.type = self._fromheader(typesize)
1425 indebug(self.ui, b'part type: "%s"' % self.type)
1425 indebug(self.ui, b'part type: "%s"' % self.type)
1426 self.id = self._unpackheader(_fpartid)[0]
1426 self.id = self._unpackheader(_fpartid)[0]
1427 indebug(self.ui, b'part id: "%s"' % pycompat.bytestr(self.id))
1427 indebug(self.ui, b'part id: "%s"' % pycompat.bytestr(self.id))
1428 # extract mandatory bit from type
1428 # extract mandatory bit from type
1429 self.mandatory = self.type != self.type.lower()
1429 self.mandatory = self.type != self.type.lower()
1430 self.type = self.type.lower()
1430 self.type = self.type.lower()
1431 ## reading parameters
1431 ## reading parameters
1432 # param count
1432 # param count
1433 mancount, advcount = self._unpackheader(_fpartparamcount)
1433 mancount, advcount = self._unpackheader(_fpartparamcount)
1434 indebug(self.ui, b'part parameters: %i' % (mancount + advcount))
1434 indebug(self.ui, b'part parameters: %i' % (mancount + advcount))
1435 # param size
1435 # param size
1436 fparamsizes = _makefpartparamsizes(mancount + advcount)
1436 fparamsizes = _makefpartparamsizes(mancount + advcount)
1437 paramsizes = self._unpackheader(fparamsizes)
1437 paramsizes = self._unpackheader(fparamsizes)
1438 # make it a list of couple again
1438 # make it a list of couple again
1439 paramsizes = list(zip(paramsizes[::2], paramsizes[1::2]))
1439 paramsizes = list(zip(paramsizes[::2], paramsizes[1::2]))
1440 # split mandatory from advisory
1440 # split mandatory from advisory
1441 mansizes = paramsizes[:mancount]
1441 mansizes = paramsizes[:mancount]
1442 advsizes = paramsizes[mancount:]
1442 advsizes = paramsizes[mancount:]
1443 # retrieve param value
1443 # retrieve param value
1444 manparams = []
1444 manparams = []
1445 for key, value in mansizes:
1445 for key, value in mansizes:
1446 manparams.append((self._fromheader(key), self._fromheader(value)))
1446 manparams.append((self._fromheader(key), self._fromheader(value)))
1447 advparams = []
1447 advparams = []
1448 for key, value in advsizes:
1448 for key, value in advsizes:
1449 advparams.append((self._fromheader(key), self._fromheader(value)))
1449 advparams.append((self._fromheader(key), self._fromheader(value)))
1450 self._initparams(manparams, advparams)
1450 self._initparams(manparams, advparams)
1451 ## part payload
1451 ## part payload
1452 self._payloadstream = util.chunkbuffer(self._payloadchunks())
1452 self._payloadstream = util.chunkbuffer(self._payloadchunks())
1453 # we read the data, tell it
1453 # we read the data, tell it
1454 self._initialized = True
1454 self._initialized = True
1455
1455
1456 def _payloadchunks(self):
1456 def _payloadchunks(self):
1457 """Generator of decoded chunks in the payload."""
1457 """Generator of decoded chunks in the payload."""
1458 return decodepayloadchunks(self.ui, self._fp)
1458 return decodepayloadchunks(self.ui, self._fp)
1459
1459
1460 def consume(self):
1460 def consume(self):
1461 """Read the part payload until completion.
1461 """Read the part payload until completion.
1462
1462
1463 By consuming the part data, the underlying stream read offset will
1463 By consuming the part data, the underlying stream read offset will
1464 be advanced to the next part (or end of stream).
1464 be advanced to the next part (or end of stream).
1465 """
1465 """
1466 if self.consumed:
1466 if self.consumed:
1467 return
1467 return
1468
1468
1469 chunk = self.read(32768)
1469 chunk = self.read(32768)
1470 while chunk:
1470 while chunk:
1471 self._pos += len(chunk)
1471 self._pos += len(chunk)
1472 chunk = self.read(32768)
1472 chunk = self.read(32768)
1473
1473
1474 def read(self, size=None):
1474 def read(self, size=None):
1475 """read payload data"""
1475 """read payload data"""
1476 if not self._initialized:
1476 if not self._initialized:
1477 self._readheader()
1477 self._readheader()
1478 if size is None:
1478 if size is None:
1479 data = self._payloadstream.read()
1479 data = self._payloadstream.read()
1480 else:
1480 else:
1481 data = self._payloadstream.read(size)
1481 data = self._payloadstream.read(size)
1482 self._pos += len(data)
1482 self._pos += len(data)
1483 if size is None or len(data) < size:
1483 if size is None or len(data) < size:
1484 if not self.consumed and self._pos:
1484 if not self.consumed and self._pos:
1485 self.ui.debug(
1485 self.ui.debug(
1486 b'bundle2-input-part: total payload size %i\n' % self._pos
1486 b'bundle2-input-part: total payload size %i\n' % self._pos
1487 )
1487 )
1488 self.consumed = True
1488 self.consumed = True
1489 return data
1489 return data
1490
1490
1491
1491
1492 class seekableunbundlepart(unbundlepart):
1492 class seekableunbundlepart(unbundlepart):
1493 """A bundle2 part in a bundle that is seekable.
1493 """A bundle2 part in a bundle that is seekable.
1494
1494
1495 Regular ``unbundlepart`` instances can only be read once. This class
1495 Regular ``unbundlepart`` instances can only be read once. This class
1496 extends ``unbundlepart`` to enable bi-directional seeking within the
1496 extends ``unbundlepart`` to enable bi-directional seeking within the
1497 part.
1497 part.
1498
1498
1499 Bundle2 part data consists of framed chunks. Offsets when seeking
1499 Bundle2 part data consists of framed chunks. Offsets when seeking
1500 refer to the decoded data, not the offsets in the underlying bundle2
1500 refer to the decoded data, not the offsets in the underlying bundle2
1501 stream.
1501 stream.
1502
1502
1503 To facilitate quickly seeking within the decoded data, instances of this
1503 To facilitate quickly seeking within the decoded data, instances of this
1504 class maintain a mapping between offsets in the underlying stream and
1504 class maintain a mapping between offsets in the underlying stream and
1505 the decoded payload. This mapping will consume memory in proportion
1505 the decoded payload. This mapping will consume memory in proportion
1506 to the number of chunks within the payload (which almost certainly
1506 to the number of chunks within the payload (which almost certainly
1507 increases in proportion with the size of the part).
1507 increases in proportion with the size of the part).
1508 """
1508 """
1509
1509
1510 def __init__(self, ui, header, fp):
1510 def __init__(self, ui, header, fp):
1511 # (payload, file) offsets for chunk starts.
1511 # (payload, file) offsets for chunk starts.
1512 self._chunkindex = []
1512 self._chunkindex = []
1513
1513
1514 super(seekableunbundlepart, self).__init__(ui, header, fp)
1514 super(seekableunbundlepart, self).__init__(ui, header, fp)
1515
1515
1516 def _payloadchunks(self, chunknum=0):
1516 def _payloadchunks(self, chunknum=0):
1517 '''seek to specified chunk and start yielding data'''
1517 '''seek to specified chunk and start yielding data'''
1518 if len(self._chunkindex) == 0:
1518 if len(self._chunkindex) == 0:
1519 assert chunknum == 0, b'Must start with chunk 0'
1519 assert chunknum == 0, b'Must start with chunk 0'
1520 self._chunkindex.append((0, self._tellfp()))
1520 self._chunkindex.append((0, self._tellfp()))
1521 else:
1521 else:
1522 assert chunknum < len(self._chunkindex), (
1522 assert chunknum < len(self._chunkindex), (
1523 b'Unknown chunk %d' % chunknum
1523 b'Unknown chunk %d' % chunknum
1524 )
1524 )
1525 self._seekfp(self._chunkindex[chunknum][1])
1525 self._seekfp(self._chunkindex[chunknum][1])
1526
1526
1527 pos = self._chunkindex[chunknum][0]
1527 pos = self._chunkindex[chunknum][0]
1528
1528
1529 for chunk in decodepayloadchunks(self.ui, self._fp):
1529 for chunk in decodepayloadchunks(self.ui, self._fp):
1530 chunknum += 1
1530 chunknum += 1
1531 pos += len(chunk)
1531 pos += len(chunk)
1532 if chunknum == len(self._chunkindex):
1532 if chunknum == len(self._chunkindex):
1533 self._chunkindex.append((pos, self._tellfp()))
1533 self._chunkindex.append((pos, self._tellfp()))
1534
1534
1535 yield chunk
1535 yield chunk
1536
1536
1537 def _findchunk(self, pos):
1537 def _findchunk(self, pos):
1538 '''for a given payload position, return a chunk number and offset'''
1538 '''for a given payload position, return a chunk number and offset'''
1539 for chunk, (ppos, fpos) in enumerate(self._chunkindex):
1539 for chunk, (ppos, fpos) in enumerate(self._chunkindex):
1540 if ppos == pos:
1540 if ppos == pos:
1541 return chunk, 0
1541 return chunk, 0
1542 elif ppos > pos:
1542 elif ppos > pos:
1543 return chunk - 1, pos - self._chunkindex[chunk - 1][0]
1543 return chunk - 1, pos - self._chunkindex[chunk - 1][0]
1544 raise ValueError(b'Unknown chunk')
1544 raise ValueError(b'Unknown chunk')
1545
1545
1546 def tell(self):
1546 def tell(self):
1547 return self._pos
1547 return self._pos
1548
1548
1549 def seek(self, offset, whence=os.SEEK_SET):
1549 def seek(self, offset, whence=os.SEEK_SET):
1550 if whence == os.SEEK_SET:
1550 if whence == os.SEEK_SET:
1551 newpos = offset
1551 newpos = offset
1552 elif whence == os.SEEK_CUR:
1552 elif whence == os.SEEK_CUR:
1553 newpos = self._pos + offset
1553 newpos = self._pos + offset
1554 elif whence == os.SEEK_END:
1554 elif whence == os.SEEK_END:
1555 if not self.consumed:
1555 if not self.consumed:
1556 # Can't use self.consume() here because it advances self._pos.
1556 # Can't use self.consume() here because it advances self._pos.
1557 chunk = self.read(32768)
1557 chunk = self.read(32768)
1558 while chunk:
1558 while chunk:
1559 chunk = self.read(32768)
1559 chunk = self.read(32768)
1560 newpos = self._chunkindex[-1][0] - offset
1560 newpos = self._chunkindex[-1][0] - offset
1561 else:
1561 else:
1562 raise ValueError(b'Unknown whence value: %r' % (whence,))
1562 raise ValueError(b'Unknown whence value: %r' % (whence,))
1563
1563
1564 if newpos > self._chunkindex[-1][0] and not self.consumed:
1564 if newpos > self._chunkindex[-1][0] and not self.consumed:
1565 # Can't use self.consume() here because it advances self._pos.
1565 # Can't use self.consume() here because it advances self._pos.
1566 chunk = self.read(32768)
1566 chunk = self.read(32768)
1567 while chunk:
1567 while chunk:
1568 chunk = self.read(32668)
1568 chunk = self.read(32668)
1569
1569
1570 if not 0 <= newpos <= self._chunkindex[-1][0]:
1570 if not 0 <= newpos <= self._chunkindex[-1][0]:
1571 raise ValueError(b'Offset out of range')
1571 raise ValueError(b'Offset out of range')
1572
1572
1573 if self._pos != newpos:
1573 if self._pos != newpos:
1574 chunk, internaloffset = self._findchunk(newpos)
1574 chunk, internaloffset = self._findchunk(newpos)
1575 self._payloadstream = util.chunkbuffer(self._payloadchunks(chunk))
1575 self._payloadstream = util.chunkbuffer(self._payloadchunks(chunk))
1576 adjust = self.read(internaloffset)
1576 adjust = self.read(internaloffset)
1577 if len(adjust) != internaloffset:
1577 if len(adjust) != internaloffset:
1578 raise error.Abort(_(b'Seek failed\n'))
1578 raise error.Abort(_(b'Seek failed\n'))
1579 self._pos = newpos
1579 self._pos = newpos
1580
1580
1581 def _seekfp(self, offset, whence=0):
1581 def _seekfp(self, offset, whence=0):
1582 """move the underlying file pointer
1582 """move the underlying file pointer
1583
1583
1584 This method is meant for internal usage by the bundle2 protocol only.
1584 This method is meant for internal usage by the bundle2 protocol only.
1585 They directly manipulate the low level stream including bundle2 level
1585 They directly manipulate the low level stream including bundle2 level
1586 instruction.
1586 instruction.
1587
1587
1588 Do not use it to implement higher-level logic or methods."""
1588 Do not use it to implement higher-level logic or methods."""
1589 if self._seekable:
1589 if self._seekable:
1590 return self._fp.seek(offset, whence)
1590 return self._fp.seek(offset, whence)
1591 else:
1591 else:
1592 raise NotImplementedError(_(b'File pointer is not seekable'))
1592 raise NotImplementedError(_(b'File pointer is not seekable'))
1593
1593
1594 def _tellfp(self):
1594 def _tellfp(self):
1595 """return the file offset, or None if file is not seekable
1595 """return the file offset, or None if file is not seekable
1596
1596
1597 This method is meant for internal usage by the bundle2 protocol only.
1597 This method is meant for internal usage by the bundle2 protocol only.
1598 They directly manipulate the low level stream including bundle2 level
1598 They directly manipulate the low level stream including bundle2 level
1599 instruction.
1599 instruction.
1600
1600
1601 Do not use it to implement higher-level logic or methods."""
1601 Do not use it to implement higher-level logic or methods."""
1602 if self._seekable:
1602 if self._seekable:
1603 try:
1603 try:
1604 return self._fp.tell()
1604 return self._fp.tell()
1605 except IOError as e:
1605 except IOError as e:
1606 if e.errno == errno.ESPIPE:
1606 if e.errno == errno.ESPIPE:
1607 self._seekable = False
1607 self._seekable = False
1608 else:
1608 else:
1609 raise
1609 raise
1610 return None
1610 return None
1611
1611
1612
1612
1613 # These are only the static capabilities.
1613 # These are only the static capabilities.
1614 # Check the 'getrepocaps' function for the rest.
1614 # Check the 'getrepocaps' function for the rest.
1615 capabilities = {
1615 capabilities = {
1616 b'HG20': (),
1616 b'HG20': (),
1617 b'bookmarks': (),
1617 b'bookmarks': (),
1618 b'error': (b'abort', b'unsupportedcontent', b'pushraced', b'pushkey'),
1618 b'error': (b'abort', b'unsupportedcontent', b'pushraced', b'pushkey'),
1619 b'listkeys': (),
1619 b'listkeys': (),
1620 b'pushkey': (),
1620 b'pushkey': (),
1621 b'digests': tuple(sorted(util.DIGESTS.keys())),
1621 b'digests': tuple(sorted(util.DIGESTS.keys())),
1622 b'remote-changegroup': (b'http', b'https'),
1622 b'remote-changegroup': (b'http', b'https'),
1623 b'hgtagsfnodes': (),
1623 b'hgtagsfnodes': (),
1624 b'phases': (b'heads',),
1624 b'phases': (b'heads',),
1625 b'stream': (b'v2',),
1625 b'stream': (b'v2',),
1626 }
1626 }
1627
1627
1628
1628
1629 def getrepocaps(repo, allowpushback=False, role=None):
1629 def getrepocaps(repo, allowpushback=False, role=None):
1630 """return the bundle2 capabilities for a given repo
1630 """return the bundle2 capabilities for a given repo
1631
1631
1632 Exists to allow extensions (like evolution) to mutate the capabilities.
1632 Exists to allow extensions (like evolution) to mutate the capabilities.
1633
1633
1634 The returned value is used for servers advertising their capabilities as
1634 The returned value is used for servers advertising their capabilities as
1635 well as clients advertising their capabilities to servers as part of
1635 well as clients advertising their capabilities to servers as part of
1636 bundle2 requests. The ``role`` argument specifies which is which.
1636 bundle2 requests. The ``role`` argument specifies which is which.
1637 """
1637 """
1638 if role not in (b'client', b'server'):
1638 if role not in (b'client', b'server'):
1639 raise error.ProgrammingError(b'role argument must be client or server')
1639 raise error.ProgrammingError(b'role argument must be client or server')
1640
1640
1641 caps = capabilities.copy()
1641 caps = capabilities.copy()
1642 caps[b'changegroup'] = tuple(
1642 caps[b'changegroup'] = tuple(
1643 sorted(changegroup.supportedincomingversions(repo))
1643 sorted(changegroup.supportedincomingversions(repo))
1644 )
1644 )
1645 if obsolete.isenabled(repo, obsolete.exchangeopt):
1645 if obsolete.isenabled(repo, obsolete.exchangeopt):
1646 supportedformat = tuple(b'V%i' % v for v in obsolete.formats)
1646 supportedformat = tuple(b'V%i' % v for v in obsolete.formats)
1647 caps[b'obsmarkers'] = supportedformat
1647 caps[b'obsmarkers'] = supportedformat
1648 if allowpushback:
1648 if allowpushback:
1649 caps[b'pushback'] = ()
1649 caps[b'pushback'] = ()
1650 cpmode = repo.ui.config(b'server', b'concurrent-push-mode')
1650 cpmode = repo.ui.config(b'server', b'concurrent-push-mode')
1651 if cpmode == b'check-related':
1651 if cpmode == b'check-related':
1652 caps[b'checkheads'] = (b'related',)
1652 caps[b'checkheads'] = (b'related',)
1653 if b'phases' in repo.ui.configlist(b'devel', b'legacy.exchange'):
1653 if b'phases' in repo.ui.configlist(b'devel', b'legacy.exchange'):
1654 caps.pop(b'phases')
1654 caps.pop(b'phases')
1655
1655
1656 # Don't advertise stream clone support in server mode if not configured.
1656 # Don't advertise stream clone support in server mode if not configured.
1657 if role == b'server':
1657 if role == b'server':
1658 streamsupported = repo.ui.configbool(
1658 streamsupported = repo.ui.configbool(
1659 b'server', b'uncompressed', untrusted=True
1659 b'server', b'uncompressed', untrusted=True
1660 )
1660 )
1661 featuresupported = repo.ui.configbool(b'server', b'bundle2.stream')
1661 featuresupported = repo.ui.configbool(b'server', b'bundle2.stream')
1662
1662
1663 if not streamsupported or not featuresupported:
1663 if not streamsupported or not featuresupported:
1664 caps.pop(b'stream')
1664 caps.pop(b'stream')
1665 # Else always advertise support on client, because payload support
1665 # Else always advertise support on client, because payload support
1666 # should always be advertised.
1666 # should always be advertised.
1667
1667
1668 if repo.ui.configbool(b'experimental', b'stream-v3'):
1668 if repo.ui.configbool(b'experimental', b'stream-v3'):
1669 if b'stream' in caps:
1669 if b'stream' in caps:
1670 caps[b'stream'] += (b'v3-exp',)
1670 caps[b'stream'] += (b'v3-exp',)
1671
1671
1672 # b'rev-branch-cache is no longer advertised, but still supported
1672 # b'rev-branch-cache is no longer advertised, but still supported
1673 # for legacy clients.
1673 # for legacy clients.
1674
1674
1675 return caps
1675 return caps
1676
1676
1677
1677
1678 def bundle2caps(remote):
1678 def bundle2caps(remote):
1679 """return the bundle capabilities of a peer as dict"""
1679 """return the bundle capabilities of a peer as dict"""
1680 raw = remote.capable(b'bundle2')
1680 raw = remote.capable(b'bundle2')
1681 if not raw and raw != b'':
1681 if not raw and raw != b'':
1682 return {}
1682 return {}
1683 capsblob = urlreq.unquote(remote.capable(b'bundle2'))
1683 capsblob = urlreq.unquote(remote.capable(b'bundle2'))
1684 return decodecaps(capsblob)
1684 return decodecaps(capsblob)
1685
1685
1686
1686
1687 def obsmarkersversion(caps):
1687 def obsmarkersversion(caps):
1688 """extract the list of supported obsmarkers versions from a bundle2caps dict"""
1688 """extract the list of supported obsmarkers versions from a bundle2caps dict"""
1689 obscaps = caps.get(b'obsmarkers', ())
1689 obscaps = caps.get(b'obsmarkers', ())
1690 return [int(c[1:]) for c in obscaps if c.startswith(b'V')]
1690 return [int(c[1:]) for c in obscaps if c.startswith(b'V')]
1691
1691
1692
1692
1693 def writenewbundle(
1693 def writenewbundle(
1694 ui,
1694 ui,
1695 repo,
1695 repo,
1696 source,
1696 source,
1697 filename,
1697 filename,
1698 bundletype,
1698 bundletype,
1699 outgoing,
1699 outgoing,
1700 opts,
1700 opts,
1701 vfs=None,
1701 vfs=None,
1702 compression=None,
1702 compression=None,
1703 compopts=None,
1703 compopts=None,
1704 allow_internal=False,
1704 allow_internal=False,
1705 ):
1705 ):
1706 if bundletype.startswith(b'HG10'):
1706 if bundletype.startswith(b'HG10'):
1707 cg = changegroup.makechangegroup(repo, outgoing, b'01', source)
1707 cg = changegroup.makechangegroup(repo, outgoing, b'01', source)
1708 return writebundle(
1708 return writebundle(
1709 ui,
1709 ui,
1710 cg,
1710 cg,
1711 filename,
1711 filename,
1712 bundletype,
1712 bundletype,
1713 vfs=vfs,
1713 vfs=vfs,
1714 compression=compression,
1714 compression=compression,
1715 compopts=compopts,
1715 compopts=compopts,
1716 )
1716 )
1717 elif not bundletype.startswith(b'HG20'):
1717 elif not bundletype.startswith(b'HG20'):
1718 raise error.ProgrammingError(b'unknown bundle type: %s' % bundletype)
1718 raise error.ProgrammingError(b'unknown bundle type: %s' % bundletype)
1719
1719
1720 # enforce that no internal phase are to be bundled
1720 # enforce that no internal phase are to be bundled
1721 bundled_internal = repo.revs(b"%ln and _internal()", outgoing.ancestorsof)
1721 bundled_internal = repo.revs(b"%ln and _internal()", outgoing.ancestorsof)
1722 if bundled_internal and not allow_internal:
1722 if bundled_internal and not allow_internal:
1723 count = len(repo.revs(b'%ln and _internal()', outgoing.missing))
1723 count = len(repo.revs(b'%ln and _internal()', outgoing.missing))
1724 msg = "backup bundle would contains %d internal changesets"
1724 msg = "backup bundle would contains %d internal changesets"
1725 msg %= count
1725 msg %= count
1726 raise error.ProgrammingError(msg)
1726 raise error.ProgrammingError(msg)
1727
1727
1728 caps = {}
1728 caps = {}
1729 if opts.get(b'obsolescence', False):
1729 if opts.get(b'obsolescence', False):
1730 caps[b'obsmarkers'] = (b'V1',)
1730 caps[b'obsmarkers'] = (b'V1',)
1731 if opts.get(b'streamv2'):
1731 if opts.get(b'streamv2'):
1732 caps[b'stream'] = [b'v2']
1732 caps[b'stream'] = [b'v2']
1733 elif opts.get(b'streamv3-exp'):
1733 elif opts.get(b'streamv3-exp'):
1734 caps[b'stream'] = [b'v3-exp']
1734 caps[b'stream'] = [b'v3-exp']
1735 bundle = bundle20(ui, caps)
1735 bundle = bundle20(ui, caps)
1736 bundle.setcompression(compression, compopts)
1736 bundle.setcompression(compression, compopts)
1737 _addpartsfromopts(ui, repo, bundle, source, outgoing, opts)
1737 _addpartsfromopts(ui, repo, bundle, source, outgoing, opts)
1738 chunkiter = bundle.getchunks()
1738 chunkiter = bundle.getchunks()
1739
1739
1740 return changegroup.writechunks(ui, chunkiter, filename, vfs=vfs)
1740 return changegroup.writechunks(ui, chunkiter, filename, vfs=vfs)
1741
1741
1742
1742
1743 def _addpartsfromopts(ui, repo, bundler, source, outgoing, opts):
1743 def _addpartsfromopts(ui, repo, bundler, source, outgoing, opts):
1744 # We should eventually reconcile this logic with the one behind
1744 # We should eventually reconcile this logic with the one behind
1745 # 'exchange.getbundle2partsgenerator'.
1745 # 'exchange.getbundle2partsgenerator'.
1746 #
1746 #
1747 # The type of input from 'getbundle' and 'writenewbundle' are a bit
1747 # The type of input from 'getbundle' and 'writenewbundle' are a bit
1748 # different right now. So we keep them separated for now for the sake of
1748 # different right now. So we keep them separated for now for the sake of
1749 # simplicity.
1749 # simplicity.
1750
1750
1751 # we might not always want a changegroup in such bundle, for example in
1751 # we might not always want a changegroup in such bundle, for example in
1752 # stream bundles
1752 # stream bundles
1753 if opts.get(b'changegroup', True):
1753 if opts.get(b'changegroup', True):
1754 cgversion = opts.get(b'cg.version')
1754 cgversion = opts.get(b'cg.version')
1755 if cgversion is None:
1755 if cgversion is None:
1756 cgversion = changegroup.safeversion(repo)
1756 cgversion = changegroup.safeversion(repo)
1757 cg = changegroup.makechangegroup(repo, outgoing, cgversion, source)
1757 cg = changegroup.makechangegroup(repo, outgoing, cgversion, source)
1758 part = bundler.newpart(b'changegroup', data=cg.getchunks())
1758 part = bundler.newpart(b'changegroup', data=cg.getchunks())
1759 part.addparam(b'version', cg.version)
1759 part.addparam(b'version', cg.version)
1760 if b'clcount' in cg.extras:
1760 if b'clcount' in cg.extras:
1761 part.addparam(
1761 part.addparam(
1762 b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False
1762 b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False
1763 )
1763 )
1764 if opts.get(b'phases'):
1764 if opts.get(b'phases'):
1765 target_phase = phases.draft
1765 target_phase = phases.draft
1766 for head in outgoing.ancestorsof:
1766 for head in outgoing.ancestorsof:
1767 target_phase = max(target_phase, repo[head].phase())
1767 target_phase = max(target_phase, repo[head].phase())
1768 if target_phase > phases.draft:
1768 if target_phase > phases.draft:
1769 part.addparam(
1769 part.addparam(
1770 b'targetphase',
1770 b'targetphase',
1771 b'%d' % target_phase,
1771 b'%d' % target_phase,
1772 mandatory=False,
1772 mandatory=False,
1773 )
1773 )
1774 if repository.REPO_FEATURE_SIDE_DATA in repo.features:
1774 if repository.REPO_FEATURE_SIDE_DATA in repo.features:
1775 part.addparam(b'exp-sidedata', b'1')
1775 part.addparam(b'exp-sidedata', b'1')
1776
1776
1777 if opts.get(b'streamv2', False):
1777 if opts.get(b'streamv2', False):
1778 addpartbundlestream2(bundler, repo, stream=True)
1778 addpartbundlestream2(bundler, repo, stream=True)
1779
1779
1780 if opts.get(b'streamv3-exp', False):
1780 if opts.get(b'streamv3-exp', False):
1781 addpartbundlestream2(bundler, repo, stream=True)
1781 addpartbundlestream2(bundler, repo, stream=True)
1782
1782
1783 if opts.get(b'tagsfnodescache', True):
1783 if opts.get(b'tagsfnodescache', True):
1784 addparttagsfnodescache(repo, bundler, outgoing)
1784 addparttagsfnodescache(repo, bundler, outgoing)
1785
1785
1786 if opts.get(b'revbranchcache', True):
1786 if opts.get(b'revbranchcache', True):
1787 addpartrevbranchcache(repo, bundler, outgoing)
1787 addpartrevbranchcache(repo, bundler, outgoing)
1788
1788
1789 if opts.get(b'obsolescence', False):
1789 if opts.get(b'obsolescence', False):
1790 obsmarkers = repo.obsstore.relevantmarkers(outgoing.missing)
1790 obsmarkers = repo.obsstore.relevantmarkers(outgoing.missing)
1791 buildobsmarkerspart(
1791 buildobsmarkerspart(
1792 bundler,
1792 bundler,
1793 obsmarkers,
1793 obsmarkers,
1794 mandatory=opts.get(b'obsolescence-mandatory', True),
1794 mandatory=opts.get(b'obsolescence-mandatory', True),
1795 )
1795 )
1796
1796
1797 if opts.get(b'phases', False):
1797 if opts.get(b'phases', False):
1798 headsbyphase = phases.subsetphaseheads(repo, outgoing.missing)
1798 headsbyphase = phases.subsetphaseheads(repo, outgoing.missing)
1799 phasedata = phases.binaryencode(headsbyphase)
1799 phasedata = phases.binaryencode(headsbyphase)
1800 bundler.newpart(b'phase-heads', data=phasedata)
1800 bundler.newpart(b'phase-heads', data=phasedata)
1801
1801
1802
1802
1803 def addparttagsfnodescache(repo, bundler, outgoing):
1803 def addparttagsfnodescache(repo, bundler, outgoing):
1804 # we include the tags fnode cache for the bundle changeset
1804 # we include the tags fnode cache for the bundle changeset
1805 # (as an optional parts)
1805 # (as an optional parts)
1806 cache = tags.hgtagsfnodescache(repo.unfiltered())
1806 cache = tags.hgtagsfnodescache(repo.unfiltered())
1807 chunks = []
1807 chunks = []
1808
1808
1809 # .hgtags fnodes are only relevant for head changesets. While we could
1809 # .hgtags fnodes are only relevant for head changesets. While we could
1810 # transfer values for all known nodes, there will likely be little to
1810 # transfer values for all known nodes, there will likely be little to
1811 # no benefit.
1811 # no benefit.
1812 #
1812 #
1813 # We don't bother using a generator to produce output data because
1813 # We don't bother using a generator to produce output data because
1814 # a) we only have 40 bytes per head and even esoteric numbers of heads
1814 # a) we only have 40 bytes per head and even esoteric numbers of heads
1815 # consume little memory (1M heads is 40MB) b) we don't want to send the
1815 # consume little memory (1M heads is 40MB) b) we don't want to send the
1816 # part if we don't have entries and knowing if we have entries requires
1816 # part if we don't have entries and knowing if we have entries requires
1817 # cache lookups.
1817 # cache lookups.
1818 for node in outgoing.ancestorsof:
1818 for node in outgoing.ancestorsof:
1819 # Don't compute missing, as this may slow down serving.
1819 # Don't compute missing, as this may slow down serving.
1820 fnode = cache.getfnode(node, computemissing=False)
1820 fnode = cache.getfnode(node, computemissing=False)
1821 if fnode:
1821 if fnode:
1822 chunks.extend([node, fnode])
1822 chunks.extend([node, fnode])
1823
1823
1824 if chunks:
1824 if chunks:
1825 bundler.newpart(b'hgtagsfnodes', data=b''.join(chunks))
1825 bundler.newpart(b'hgtagsfnodes', data=b''.join(chunks))
1826
1826
1827
1827
1828 def addpartrevbranchcache(repo, bundler, outgoing):
1828 def addpartrevbranchcache(repo, bundler, outgoing):
1829 # we include the rev branch cache for the bundle changeset
1829 # we include the rev branch cache for the bundle changeset
1830 # (as an optional parts)
1830 # (as an optional parts)
1831 cache = repo.revbranchcache()
1831 cache = repo.revbranchcache()
1832 cl = repo.unfiltered().changelog
1832 cl = repo.unfiltered().changelog
1833 branchesdata = collections.defaultdict(lambda: (set(), set()))
1833 branchesdata = collections.defaultdict(lambda: (set(), set()))
1834 for node in outgoing.missing:
1834 for node in outgoing.missing:
1835 branch, close = cache.branchinfo(cl.rev(node))
1835 branch, close = cache.branchinfo(cl.rev(node))
1836 branchesdata[branch][close].add(node)
1836 branchesdata[branch][close].add(node)
1837
1837
1838 def generate():
1838 def generate():
1839 for branch, (nodes, closed) in sorted(branchesdata.items()):
1839 for branch, (nodes, closed) in sorted(branchesdata.items()):
1840 utf8branch = encoding.fromlocal(branch)
1840 utf8branch = encoding.fromlocal(branch)
1841 yield rbcstruct.pack(len(utf8branch), len(nodes), len(closed))
1841 yield rbcstruct.pack(len(utf8branch), len(nodes), len(closed))
1842 yield utf8branch
1842 yield utf8branch
1843 for n in sorted(nodes):
1843 for n in sorted(nodes):
1844 yield n
1844 yield n
1845 for n in sorted(closed):
1845 for n in sorted(closed):
1846 yield n
1846 yield n
1847
1847
1848 bundler.newpart(b'cache:rev-branch-cache', data=generate(), mandatory=False)
1848 bundler.newpart(b'cache:rev-branch-cache', data=generate(), mandatory=False)
1849
1849
1850
1850
1851 def _formatrequirementsspec(requirements):
1851 def _formatrequirementsspec(requirements):
1852 requirements = [req for req in requirements if req != b"shared"]
1852 requirements = [req for req in requirements if req != b"shared"]
1853 return urlreq.quote(b','.join(sorted(requirements)))
1853 return urlreq.quote(b','.join(sorted(requirements)))
1854
1854
1855
1855
1856 def _formatrequirementsparams(requirements):
1856 def _formatrequirementsparams(requirements):
1857 requirements = _formatrequirementsspec(requirements)
1857 requirements = _formatrequirementsspec(requirements)
1858 params = b"%s%s" % (urlreq.quote(b"requirements="), requirements)
1858 params = b"%s%s" % (urlreq.quote(b"requirements="), requirements)
1859 return params
1859 return params
1860
1860
1861
1861
1862 def format_remote_wanted_sidedata(repo):
1862 def format_remote_wanted_sidedata(repo):
1863 """Formats a repo's wanted sidedata categories into a bytestring for
1863 """Formats a repo's wanted sidedata categories into a bytestring for
1864 capabilities exchange."""
1864 capabilities exchange."""
1865 wanted = b""
1865 wanted = b""
1866 if repo._wanted_sidedata:
1866 if repo._wanted_sidedata:
1867 wanted = b','.join(
1867 wanted = b','.join(
1868 pycompat.bytestr(c) for c in sorted(repo._wanted_sidedata)
1868 pycompat.bytestr(c) for c in sorted(repo._wanted_sidedata)
1869 )
1869 )
1870 return wanted
1870 return wanted
1871
1871
1872
1872
1873 def read_remote_wanted_sidedata(remote):
1873 def read_remote_wanted_sidedata(remote):
1874 sidedata_categories = remote.capable(b'exp-wanted-sidedata')
1874 sidedata_categories = remote.capable(b'exp-wanted-sidedata')
1875 return read_wanted_sidedata(sidedata_categories)
1875 return read_wanted_sidedata(sidedata_categories)
1876
1876
1877
1877
1878 def read_wanted_sidedata(formatted):
1878 def read_wanted_sidedata(formatted):
1879 if formatted:
1879 if formatted:
1880 return set(formatted.split(b','))
1880 return set(formatted.split(b','))
1881 return set()
1881 return set()
1882
1882
1883
1883
1884 def addpartbundlestream2(bundler, repo, **kwargs):
1884 def addpartbundlestream2(bundler, repo, **kwargs):
1885 if not kwargs.get('stream', False):
1885 if not kwargs.get('stream', False):
1886 return
1886 return
1887
1887
1888 if not streamclone.allowservergeneration(repo):
1888 if not streamclone.allowservergeneration(repo):
1889 msg = _(b'stream data requested but server does not allow this feature')
1889 msg = _(b'stream data requested but server does not allow this feature')
1890 hint = _(b'the client seems buggy')
1890 hint = _(b'the client seems buggy')
1891 raise error.Abort(msg, hint=hint)
1891 raise error.Abort(msg, hint=hint)
1892 if not (b'stream' in bundler.capabilities):
1892 if not (b'stream' in bundler.capabilities):
1893 msg = _(
1893 msg = _(
1894 b'stream data requested but supported streaming clone versions were not specified'
1894 b'stream data requested but supported streaming clone versions were not specified'
1895 )
1895 )
1896 hint = _(b'the client seems buggy')
1896 hint = _(b'the client seems buggy')
1897 raise error.Abort(msg, hint=hint)
1897 raise error.Abort(msg, hint=hint)
1898 client_supported = set(bundler.capabilities[b'stream'])
1898 client_supported = set(bundler.capabilities[b'stream'])
1899 server_supported = set(getrepocaps(repo, role=b'client').get(b'stream', []))
1899 server_supported = set(getrepocaps(repo, role=b'client').get(b'stream', []))
1900 common_supported = client_supported & server_supported
1900 common_supported = client_supported & server_supported
1901 if not common_supported:
1901 if not common_supported:
1902 msg = _(b'no common supported version with the client: %s; %s')
1902 msg = _(b'no common supported version with the client: %s; %s')
1903 str_server = b','.join(sorted(server_supported))
1903 str_server = b','.join(sorted(server_supported))
1904 str_client = b','.join(sorted(client_supported))
1904 str_client = b','.join(sorted(client_supported))
1905 msg %= (str_server, str_client)
1905 msg %= (str_server, str_client)
1906 raise error.Abort(msg)
1906 raise error.Abort(msg)
1907 version = max(common_supported)
1907 version = max(common_supported)
1908
1908
1909 # Stream clones don't compress well. And compression undermines a
1909 # Stream clones don't compress well. And compression undermines a
1910 # goal of stream clones, which is to be fast. Communicate the desire
1910 # goal of stream clones, which is to be fast. Communicate the desire
1911 # to avoid compression to consumers of the bundle.
1911 # to avoid compression to consumers of the bundle.
1912 bundler.prefercompressed = False
1912 bundler.prefercompressed = False
1913
1913
1914 # get the includes and excludes
1914 # get the includes and excludes
1915 includepats = kwargs.get('includepats')
1915 includepats = kwargs.get('includepats')
1916 excludepats = kwargs.get('excludepats')
1916 excludepats = kwargs.get('excludepats')
1917
1917
1918 narrowstream = repo.ui.configbool(
1918 narrowstream = repo.ui.configbool(
1919 b'experimental', b'server.stream-narrow-clones'
1919 b'experimental', b'server.stream-narrow-clones'
1920 )
1920 )
1921
1921
1922 if (includepats or excludepats) and not narrowstream:
1922 if (includepats or excludepats) and not narrowstream:
1923 raise error.Abort(_(b'server does not support narrow stream clones'))
1923 raise error.Abort(_(b'server does not support narrow stream clones'))
1924
1924
1925 includeobsmarkers = False
1925 includeobsmarkers = False
1926 if repo.obsstore:
1926 if repo.obsstore:
1927 remoteversions = obsmarkersversion(bundler.capabilities)
1927 remoteversions = obsmarkersversion(bundler.capabilities)
1928 if not remoteversions:
1928 if not remoteversions:
1929 raise error.Abort(
1929 raise error.Abort(
1930 _(
1930 _(
1931 b'server has obsolescence markers, but client '
1931 b'server has obsolescence markers, but client '
1932 b'cannot receive them via stream clone'
1932 b'cannot receive them via stream clone'
1933 )
1933 )
1934 )
1934 )
1935 elif repo.obsstore._version in remoteversions:
1935 elif repo.obsstore._version in remoteversions:
1936 includeobsmarkers = True
1936 includeobsmarkers = True
1937
1937
1938 if version == b"v2":
1938 if version == b"v2":
1939 filecount, bytecount, it = streamclone.generatev2(
1939 filecount, bytecount, it = streamclone.generatev2(
1940 repo, includepats, excludepats, includeobsmarkers
1940 repo, includepats, excludepats, includeobsmarkers
1941 )
1941 )
1942 requirements = streamclone.streamed_requirements(repo)
1942 requirements = streamclone.streamed_requirements(repo)
1943 requirements = _formatrequirementsspec(requirements)
1943 requirements = _formatrequirementsspec(requirements)
1944 part = bundler.newpart(b'stream2', data=it)
1944 part = bundler.newpart(b'stream2', data=it)
1945 part.addparam(b'bytecount', b'%d' % bytecount, mandatory=True)
1945 part.addparam(b'bytecount', b'%d' % bytecount, mandatory=True)
1946 part.addparam(b'filecount', b'%d' % filecount, mandatory=True)
1946 part.addparam(b'filecount', b'%d' % filecount, mandatory=True)
1947 part.addparam(b'requirements', requirements, mandatory=True)
1947 part.addparam(b'requirements', requirements, mandatory=True)
1948 elif version == b"v3-exp":
1948 elif version == b"v3-exp":
1949 it = streamclone.generatev3(
1949 it = streamclone.generatev3(
1950 repo, includepats, excludepats, includeobsmarkers
1950 repo, includepats, excludepats, includeobsmarkers
1951 )
1951 )
1952 requirements = streamclone.streamed_requirements(repo)
1952 requirements = streamclone.streamed_requirements(repo)
1953 requirements = _formatrequirementsspec(requirements)
1953 requirements = _formatrequirementsspec(requirements)
1954 part = bundler.newpart(b'stream3-exp', data=it)
1954 part = bundler.newpart(b'stream3-exp', data=it)
1955 part.addparam(b'requirements', requirements, mandatory=True)
1955 part.addparam(b'requirements', requirements, mandatory=True)
1956
1956
1957
1957
1958 def buildobsmarkerspart(bundler, markers, mandatory=True):
1958 def buildobsmarkerspart(bundler, markers, mandatory=True):
1959 """add an obsmarker part to the bundler with <markers>
1959 """add an obsmarker part to the bundler with <markers>
1960
1960
1961 No part is created if markers is empty.
1961 No part is created if markers is empty.
1962 Raises ValueError if the bundler doesn't support any known obsmarker format.
1962 Raises ValueError if the bundler doesn't support any known obsmarker format.
1963 """
1963 """
1964 if not markers:
1964 if not markers:
1965 return None
1965 return None
1966
1966
1967 remoteversions = obsmarkersversion(bundler.capabilities)
1967 remoteversions = obsmarkersversion(bundler.capabilities)
1968 version = obsolete.commonversion(remoteversions)
1968 version = obsolete.commonversion(remoteversions)
1969 if version is None:
1969 if version is None:
1970 raise ValueError(b'bundler does not support common obsmarker format')
1970 raise ValueError(b'bundler does not support common obsmarker format')
1971 stream = obsolete.encodemarkers(markers, True, version=version)
1971 stream = obsolete.encodemarkers(markers, True, version=version)
1972 return bundler.newpart(b'obsmarkers', data=stream, mandatory=mandatory)
1972 return bundler.newpart(b'obsmarkers', data=stream, mandatory=mandatory)
1973
1973
1974
1974
1975 def writebundle(
1975 def writebundle(
1976 ui, cg, filename, bundletype, vfs=None, compression=None, compopts=None
1976 ui, cg, filename, bundletype, vfs=None, compression=None, compopts=None
1977 ):
1977 ):
1978 """Write a bundle file and return its filename.
1978 """Write a bundle file and return its filename.
1979
1979
1980 Existing files will not be overwritten.
1980 Existing files will not be overwritten.
1981 If no filename is specified, a temporary file is created.
1981 If no filename is specified, a temporary file is created.
1982 bz2 compression can be turned off.
1982 bz2 compression can be turned off.
1983 The bundle file will be deleted in case of errors.
1983 The bundle file will be deleted in case of errors.
1984 """
1984 """
1985
1985
1986 if bundletype == b"HG20":
1986 if bundletype == b"HG20":
1987 bundle = bundle20(ui)
1987 bundle = bundle20(ui)
1988 bundle.setcompression(compression, compopts)
1988 bundle.setcompression(compression, compopts)
1989 part = bundle.newpart(b'changegroup', data=cg.getchunks())
1989 part = bundle.newpart(b'changegroup', data=cg.getchunks())
1990 part.addparam(b'version', cg.version)
1990 part.addparam(b'version', cg.version)
1991 if b'clcount' in cg.extras:
1991 if b'clcount' in cg.extras:
1992 part.addparam(
1992 part.addparam(
1993 b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False
1993 b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False
1994 )
1994 )
1995 chunkiter = bundle.getchunks()
1995 chunkiter = bundle.getchunks()
1996 else:
1996 else:
1997 # compression argument is only for the bundle2 case
1997 # compression argument is only for the bundle2 case
1998 assert compression is None
1998 assert compression is None
1999 if cg.version != b'01':
1999 if cg.version != b'01':
2000 raise error.Abort(
2000 raise error.Abort(
2001 _(b'old bundle types only supports v1 changegroups')
2001 _(b'old bundle types only supports v1 changegroups')
2002 )
2002 )
2003
2003
2004 # HG20 is the case without 2 values to unpack, but is handled above.
2004 # HG20 is the case without 2 values to unpack, but is handled above.
2005 # pytype: disable=bad-unpacking
2005 # pytype: disable=bad-unpacking
2006 header, comp = bundletypes[bundletype]
2006 header, comp = bundletypes[bundletype]
2007 # pytype: enable=bad-unpacking
2007 # pytype: enable=bad-unpacking
2008
2008
2009 if comp not in util.compengines.supportedbundletypes:
2009 if comp not in util.compengines.supportedbundletypes:
2010 raise error.Abort(_(b'unknown stream compression type: %s') % comp)
2010 raise error.Abort(_(b'unknown stream compression type: %s') % comp)
2011 compengine = util.compengines.forbundletype(comp)
2011 compengine = util.compengines.forbundletype(comp)
2012
2012
2013 def chunkiter():
2013 def chunkiter():
2014 yield header
2014 yield header
2015 for chunk in compengine.compressstream(cg.getchunks(), compopts):
2015 for chunk in compengine.compressstream(cg.getchunks(), compopts):
2016 yield chunk
2016 yield chunk
2017
2017
2018 chunkiter = chunkiter()
2018 chunkiter = chunkiter()
2019
2019
2020 # parse the changegroup data, otherwise we will block
2020 # parse the changegroup data, otherwise we will block
2021 # in case of sshrepo because we don't know the end of the stream
2021 # in case of sshrepo because we don't know the end of the stream
2022 return changegroup.writechunks(ui, chunkiter, filename, vfs=vfs)
2022 return changegroup.writechunks(ui, chunkiter, filename, vfs=vfs)
2023
2023
2024
2024
2025 def combinechangegroupresults(op):
2025 def combinechangegroupresults(op):
2026 """logic to combine 0 or more addchangegroup results into one"""
2026 """logic to combine 0 or more addchangegroup results into one"""
2027 results = [r.get(b'return', 0) for r in op.records[b'changegroup']]
2027 results = [r.get(b'return', 0) for r in op.records[b'changegroup']]
2028 changedheads = 0
2028 changedheads = 0
2029 result = 1
2029 result = 1
2030 for ret in results:
2030 for ret in results:
2031 # If any changegroup result is 0, return 0
2031 # If any changegroup result is 0, return 0
2032 if ret == 0:
2032 if ret == 0:
2033 result = 0
2033 result = 0
2034 break
2034 break
2035 if ret < -1:
2035 if ret < -1:
2036 changedheads += ret + 1
2036 changedheads += ret + 1
2037 elif ret > 1:
2037 elif ret > 1:
2038 changedheads += ret - 1
2038 changedheads += ret - 1
2039 if changedheads > 0:
2039 if changedheads > 0:
2040 result = 1 + changedheads
2040 result = 1 + changedheads
2041 elif changedheads < 0:
2041 elif changedheads < 0:
2042 result = -1 + changedheads
2042 result = -1 + changedheads
2043 return result
2043 return result
2044
2044
2045
2045
2046 @parthandler(
2046 @parthandler(
2047 b'changegroup',
2047 b'changegroup',
2048 (
2048 (
2049 b'version',
2049 b'version',
2050 b'nbchanges',
2050 b'nbchanges',
2051 b'exp-sidedata',
2051 b'exp-sidedata',
2052 b'exp-wanted-sidedata',
2052 b'exp-wanted-sidedata',
2053 b'treemanifest',
2053 b'treemanifest',
2054 b'targetphase',
2054 b'targetphase',
2055 ),
2055 ),
2056 )
2056 )
2057 def handlechangegroup(op, inpart):
2057 def handlechangegroup(op, inpart):
2058 """apply a changegroup part on the repo"""
2058 """apply a changegroup part on the repo"""
2059 from . import localrepo
2059 from . import localrepo
2060
2060
2061 tr = op.gettransaction()
2061 tr = op.gettransaction()
2062 unpackerversion = inpart.params.get(b'version', b'01')
2062 unpackerversion = inpart.params.get(b'version', b'01')
2063 # We should raise an appropriate exception here
2063 # We should raise an appropriate exception here
2064 cg = changegroup.getunbundler(unpackerversion, inpart, None)
2064 cg = changegroup.getunbundler(unpackerversion, inpart, None)
2065 # the source and url passed here are overwritten by the one contained in
2065 # the source and url passed here are overwritten by the one contained in
2066 # the transaction.hookargs argument. So 'bundle2' is a placeholder
2066 # the transaction.hookargs argument. So 'bundle2' is a placeholder
2067 nbchangesets = None
2067 nbchangesets = None
2068 if b'nbchanges' in inpart.params:
2068 if b'nbchanges' in inpart.params:
2069 nbchangesets = int(inpart.params.get(b'nbchanges'))
2069 nbchangesets = int(inpart.params.get(b'nbchanges'))
2070 if b'treemanifest' in inpart.params and not scmutil.istreemanifest(op.repo):
2070 if b'treemanifest' in inpart.params and not scmutil.istreemanifest(op.repo):
2071 if len(op.repo.changelog) != 0:
2071 if len(op.repo.changelog) != 0:
2072 raise error.Abort(
2072 raise error.Abort(
2073 _(
2073 _(
2074 b"bundle contains tree manifests, but local repo is "
2074 b"bundle contains tree manifests, but local repo is "
2075 b"non-empty and does not use tree manifests"
2075 b"non-empty and does not use tree manifests"
2076 )
2076 )
2077 )
2077 )
2078 op.repo.requirements.add(requirements.TREEMANIFEST_REQUIREMENT)
2078 op.repo.requirements.add(requirements.TREEMANIFEST_REQUIREMENT)
2079 op.repo.svfs.options = localrepo.resolvestorevfsoptions(
2079 op.repo.svfs.options = localrepo.resolvestorevfsoptions(
2080 op.repo.ui, op.repo.requirements, op.repo.features
2080 op.repo.ui, op.repo.requirements, op.repo.features
2081 )
2081 )
2082 scmutil.writereporequirements(op.repo)
2082 scmutil.writereporequirements(op.repo)
2083
2083
2084 extrakwargs = {}
2084 extrakwargs = {}
2085 targetphase = inpart.params.get(b'targetphase')
2085 targetphase = inpart.params.get(b'targetphase')
2086 if targetphase is not None:
2086 if targetphase is not None:
2087 extrakwargs['targetphase'] = int(targetphase)
2087 extrakwargs['targetphase'] = int(targetphase)
2088
2088
2089 remote_sidedata = inpart.params.get(b'exp-wanted-sidedata')
2089 remote_sidedata = inpart.params.get(b'exp-wanted-sidedata')
2090 extrakwargs['sidedata_categories'] = read_wanted_sidedata(remote_sidedata)
2090 extrakwargs['sidedata_categories'] = read_wanted_sidedata(remote_sidedata)
2091
2091
2092 ret = _processchangegroup(
2092 ret = _processchangegroup(
2093 op,
2093 op,
2094 cg,
2094 cg,
2095 tr,
2095 tr,
2096 op.source,
2096 op.source,
2097 b'bundle2',
2097 b'bundle2',
2098 expectedtotal=nbchangesets,
2098 expectedtotal=nbchangesets,
2099 **extrakwargs
2099 **extrakwargs
2100 )
2100 )
2101 if op.reply is not None:
2101 if op.reply is not None:
2102 # This is definitely not the final form of this
2102 # This is definitely not the final form of this
2103 # return. But one need to start somewhere.
2103 # return. But one need to start somewhere.
2104 part = op.reply.newpart(b'reply:changegroup', mandatory=False)
2104 part = op.reply.newpart(b'reply:changegroup', mandatory=False)
2105 part.addparam(
2105 part.addparam(
2106 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2106 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2107 )
2107 )
2108 part.addparam(b'return', b'%i' % ret, mandatory=False)
2108 part.addparam(b'return', b'%i' % ret, mandatory=False)
2109 assert not inpart.read()
2109 assert not inpart.read()
2110
2110
2111
2111
2112 _remotechangegroupparams = tuple(
2112 _remotechangegroupparams = tuple(
2113 [b'url', b'size', b'digests']
2113 [b'url', b'size', b'digests']
2114 + [b'digest:%s' % k for k in util.DIGESTS.keys()]
2114 + [b'digest:%s' % k for k in util.DIGESTS.keys()]
2115 )
2115 )
2116
2116
2117
2117
2118 @parthandler(b'remote-changegroup', _remotechangegroupparams)
2118 @parthandler(b'remote-changegroup', _remotechangegroupparams)
2119 def handleremotechangegroup(op, inpart):
2119 def handleremotechangegroup(op, inpart):
2120 """apply a bundle10 on the repo, given an url and validation information
2120 """apply a bundle10 on the repo, given an url and validation information
2121
2121
2122 All the information about the remote bundle to import are given as
2122 All the information about the remote bundle to import are given as
2123 parameters. The parameters include:
2123 parameters. The parameters include:
2124 - url: the url to the bundle10.
2124 - url: the url to the bundle10.
2125 - size: the bundle10 file size. It is used to validate what was
2125 - size: the bundle10 file size. It is used to validate what was
2126 retrieved by the client matches the server knowledge about the bundle.
2126 retrieved by the client matches the server knowledge about the bundle.
2127 - digests: a space separated list of the digest types provided as
2127 - digests: a space separated list of the digest types provided as
2128 parameters.
2128 parameters.
2129 - digest:<digest-type>: the hexadecimal representation of the digest with
2129 - digest:<digest-type>: the hexadecimal representation of the digest with
2130 that name. Like the size, it is used to validate what was retrieved by
2130 that name. Like the size, it is used to validate what was retrieved by
2131 the client matches what the server knows about the bundle.
2131 the client matches what the server knows about the bundle.
2132
2132
2133 When multiple digest types are given, all of them are checked.
2133 When multiple digest types are given, all of them are checked.
2134 """
2134 """
2135 try:
2135 try:
2136 raw_url = inpart.params[b'url']
2136 raw_url = inpart.params[b'url']
2137 except KeyError:
2137 except KeyError:
2138 raise error.Abort(_(b'remote-changegroup: missing "%s" param') % b'url')
2138 raise error.Abort(_(b'remote-changegroup: missing "%s" param') % b'url')
2139 parsed_url = urlutil.url(raw_url)
2139 parsed_url = urlutil.url(raw_url)
2140 if parsed_url.scheme not in capabilities[b'remote-changegroup']:
2140 if parsed_url.scheme not in capabilities[b'remote-changegroup']:
2141 raise error.Abort(
2141 raise error.Abort(
2142 _(b'remote-changegroup does not support %s urls')
2142 _(b'remote-changegroup does not support %s urls')
2143 % parsed_url.scheme
2143 % parsed_url.scheme
2144 )
2144 )
2145
2145
2146 try:
2146 try:
2147 size = int(inpart.params[b'size'])
2147 size = int(inpart.params[b'size'])
2148 except ValueError:
2148 except ValueError:
2149 raise error.Abort(
2149 raise error.Abort(
2150 _(b'remote-changegroup: invalid value for param "%s"') % b'size'
2150 _(b'remote-changegroup: invalid value for param "%s"') % b'size'
2151 )
2151 )
2152 except KeyError:
2152 except KeyError:
2153 raise error.Abort(
2153 raise error.Abort(
2154 _(b'remote-changegroup: missing "%s" param') % b'size'
2154 _(b'remote-changegroup: missing "%s" param') % b'size'
2155 )
2155 )
2156
2156
2157 digests = {}
2157 digests = {}
2158 for typ in inpart.params.get(b'digests', b'').split():
2158 for typ in inpart.params.get(b'digests', b'').split():
2159 param = b'digest:%s' % typ
2159 param = b'digest:%s' % typ
2160 try:
2160 try:
2161 value = inpart.params[param]
2161 value = inpart.params[param]
2162 except KeyError:
2162 except KeyError:
2163 raise error.Abort(
2163 raise error.Abort(
2164 _(b'remote-changegroup: missing "%s" param') % param
2164 _(b'remote-changegroup: missing "%s" param') % param
2165 )
2165 )
2166 digests[typ] = value
2166 digests[typ] = value
2167
2167
2168 real_part = util.digestchecker(url.open(op.ui, raw_url), size, digests)
2168 real_part = util.digestchecker(url.open(op.ui, raw_url), size, digests)
2169
2169
2170 tr = op.gettransaction()
2170 tr = op.gettransaction()
2171 from . import exchange
2171 from . import exchange
2172
2172
2173 cg = exchange.readbundle(op.repo.ui, real_part, raw_url)
2173 cg = exchange.readbundle(op.repo.ui, real_part, raw_url)
2174 if not isinstance(cg, changegroup.cg1unpacker):
2174 if not isinstance(cg, changegroup.cg1unpacker):
2175 raise error.Abort(
2175 raise error.Abort(
2176 _(b'%s: not a bundle version 1.0') % urlutil.hidepassword(raw_url)
2176 _(b'%s: not a bundle version 1.0') % urlutil.hidepassword(raw_url)
2177 )
2177 )
2178 ret = _processchangegroup(op, cg, tr, op.source, b'bundle2')
2178 ret = _processchangegroup(op, cg, tr, op.source, b'bundle2')
2179 if op.reply is not None:
2179 if op.reply is not None:
2180 # This is definitely not the final form of this
2180 # This is definitely not the final form of this
2181 # return. But one need to start somewhere.
2181 # return. But one need to start somewhere.
2182 part = op.reply.newpart(b'reply:changegroup')
2182 part = op.reply.newpart(b'reply:changegroup')
2183 part.addparam(
2183 part.addparam(
2184 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2184 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2185 )
2185 )
2186 part.addparam(b'return', b'%i' % ret, mandatory=False)
2186 part.addparam(b'return', b'%i' % ret, mandatory=False)
2187 try:
2187 try:
2188 real_part.validate()
2188 real_part.validate()
2189 except error.Abort as e:
2189 except error.Abort as e:
2190 raise error.Abort(
2190 raise error.Abort(
2191 _(b'bundle at %s is corrupted:\n%s')
2191 _(b'bundle at %s is corrupted:\n%s')
2192 % (urlutil.hidepassword(raw_url), e.message)
2192 % (urlutil.hidepassword(raw_url), e.message)
2193 )
2193 )
2194 assert not inpart.read()
2194 assert not inpart.read()
2195
2195
2196
2196
2197 @parthandler(b'reply:changegroup', (b'return', b'in-reply-to'))
2197 @parthandler(b'reply:changegroup', (b'return', b'in-reply-to'))
2198 def handlereplychangegroup(op, inpart):
2198 def handlereplychangegroup(op, inpart):
2199 ret = int(inpart.params[b'return'])
2199 ret = int(inpart.params[b'return'])
2200 replyto = int(inpart.params[b'in-reply-to'])
2200 replyto = int(inpart.params[b'in-reply-to'])
2201 op.records.add(b'changegroup', {b'return': ret}, replyto)
2201 op.records.add(b'changegroup', {b'return': ret}, replyto)
2202
2202
2203
2203
2204 @parthandler(b'check:bookmarks')
2204 @parthandler(b'check:bookmarks')
2205 def handlecheckbookmarks(op, inpart):
2205 def handlecheckbookmarks(op, inpart):
2206 """check location of bookmarks
2206 """check location of bookmarks
2207
2207
2208 This part is to be used to detect push race regarding bookmark, it
2208 This part is to be used to detect push race regarding bookmark, it
2209 contains binary encoded (bookmark, node) tuple. If the local state does
2209 contains binary encoded (bookmark, node) tuple. If the local state does
2210 not marks the one in the part, a PushRaced exception is raised
2210 not marks the one in the part, a PushRaced exception is raised
2211 """
2211 """
2212 bookdata = bookmarks.binarydecode(op.repo, inpart)
2212 bookdata = bookmarks.binarydecode(op.repo, inpart)
2213
2213
2214 msgstandard = (
2214 msgstandard = (
2215 b'remote repository changed while pushing - please try again '
2215 b'remote repository changed while pushing - please try again '
2216 b'(bookmark "%s" move from %s to %s)'
2216 b'(bookmark "%s" move from %s to %s)'
2217 )
2217 )
2218 msgmissing = (
2218 msgmissing = (
2219 b'remote repository changed while pushing - please try again '
2219 b'remote repository changed while pushing - please try again '
2220 b'(bookmark "%s" is missing, expected %s)'
2220 b'(bookmark "%s" is missing, expected %s)'
2221 )
2221 )
2222 msgexist = (
2222 msgexist = (
2223 b'remote repository changed while pushing - please try again '
2223 b'remote repository changed while pushing - please try again '
2224 b'(bookmark "%s" set on %s, expected missing)'
2224 b'(bookmark "%s" set on %s, expected missing)'
2225 )
2225 )
2226 for book, node in bookdata:
2226 for book, node in bookdata:
2227 currentnode = op.repo._bookmarks.get(book)
2227 currentnode = op.repo._bookmarks.get(book)
2228 if currentnode != node:
2228 if currentnode != node:
2229 if node is None:
2229 if node is None:
2230 finalmsg = msgexist % (book, short(currentnode))
2230 finalmsg = msgexist % (book, short(currentnode))
2231 elif currentnode is None:
2231 elif currentnode is None:
2232 finalmsg = msgmissing % (book, short(node))
2232 finalmsg = msgmissing % (book, short(node))
2233 else:
2233 else:
2234 finalmsg = msgstandard % (
2234 finalmsg = msgstandard % (
2235 book,
2235 book,
2236 short(node),
2236 short(node),
2237 short(currentnode),
2237 short(currentnode),
2238 )
2238 )
2239 raise error.PushRaced(finalmsg)
2239 raise error.PushRaced(finalmsg)
2240
2240
2241
2241
2242 @parthandler(b'check:heads')
2242 @parthandler(b'check:heads')
2243 def handlecheckheads(op, inpart):
2243 def handlecheckheads(op, inpart):
2244 """check that head of the repo did not change
2244 """check that head of the repo did not change
2245
2245
2246 This is used to detect a push race when using unbundle.
2246 This is used to detect a push race when using unbundle.
2247 This replaces the "heads" argument of unbundle."""
2247 This replaces the "heads" argument of unbundle."""
2248 h = inpart.read(20)
2248 h = inpart.read(20)
2249 heads = []
2249 heads = []
2250 while len(h) == 20:
2250 while len(h) == 20:
2251 heads.append(h)
2251 heads.append(h)
2252 h = inpart.read(20)
2252 h = inpart.read(20)
2253 assert not h
2253 assert not h
2254 # Trigger a transaction so that we are guaranteed to have the lock now.
2254 # Trigger a transaction so that we are guaranteed to have the lock now.
2255 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2255 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2256 op.gettransaction()
2256 op.gettransaction()
2257 if sorted(heads) != sorted(op.repo.heads()):
2257 if sorted(heads) != sorted(op.repo.heads()):
2258 raise error.PushRaced(
2258 raise error.PushRaced(
2259 b'remote repository changed while pushing - please try again'
2259 b'remote repository changed while pushing - please try again'
2260 )
2260 )
2261
2261
2262
2262
2263 @parthandler(b'check:updated-heads')
2263 @parthandler(b'check:updated-heads')
2264 def handlecheckupdatedheads(op, inpart):
2264 def handlecheckupdatedheads(op, inpart):
2265 """check for race on the heads touched by a push
2265 """check for race on the heads touched by a push
2266
2266
2267 This is similar to 'check:heads' but focus on the heads actually updated
2267 This is similar to 'check:heads' but focus on the heads actually updated
2268 during the push. If other activities happen on unrelated heads, it is
2268 during the push. If other activities happen on unrelated heads, it is
2269 ignored.
2269 ignored.
2270
2270
2271 This allow server with high traffic to avoid push contention as long as
2271 This allow server with high traffic to avoid push contention as long as
2272 unrelated parts of the graph are involved."""
2272 unrelated parts of the graph are involved."""
2273 h = inpart.read(20)
2273 h = inpart.read(20)
2274 heads = []
2274 heads = []
2275 while len(h) == 20:
2275 while len(h) == 20:
2276 heads.append(h)
2276 heads.append(h)
2277 h = inpart.read(20)
2277 h = inpart.read(20)
2278 assert not h
2278 assert not h
2279 # trigger a transaction so that we are guaranteed to have the lock now.
2279 # trigger a transaction so that we are guaranteed to have the lock now.
2280 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2280 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2281 op.gettransaction()
2281 op.gettransaction()
2282
2282
2283 currentheads = set()
2283 currentheads = set()
2284 for ls in op.repo.branchmap().iterheads():
2284 for ls in op.repo.branchmap().iterheads():
2285 currentheads.update(ls)
2285 currentheads.update(ls)
2286
2286
2287 for h in heads:
2287 for h in heads:
2288 if h not in currentheads:
2288 if h not in currentheads:
2289 raise error.PushRaced(
2289 raise error.PushRaced(
2290 b'remote repository changed while pushing - '
2290 b'remote repository changed while pushing - '
2291 b'please try again'
2291 b'please try again'
2292 )
2292 )
2293
2293
2294
2294
2295 @parthandler(b'check:phases')
2295 @parthandler(b'check:phases')
2296 def handlecheckphases(op, inpart):
2296 def handlecheckphases(op, inpart):
2297 """check that phase boundaries of the repository did not change
2297 """check that phase boundaries of the repository did not change
2298
2298
2299 This is used to detect a push race.
2299 This is used to detect a push race.
2300 """
2300 """
2301 phasetonodes = phases.binarydecode(inpart)
2301 phasetonodes = phases.binarydecode(inpart)
2302 unfi = op.repo.unfiltered()
2302 unfi = op.repo.unfiltered()
2303 cl = unfi.changelog
2303 cl = unfi.changelog
2304 phasecache = unfi._phasecache
2304 phasecache = unfi._phasecache
2305 msg = (
2305 msg = (
2306 b'remote repository changed while pushing - please try again '
2306 b'remote repository changed while pushing - please try again '
2307 b'(%s is %s expected %s)'
2307 b'(%s is %s expected %s)'
2308 )
2308 )
2309 for expectedphase, nodes in phasetonodes.items():
2309 for expectedphase, nodes in phasetonodes.items():
2310 for n in nodes:
2310 for n in nodes:
2311 actualphase = phasecache.phase(unfi, cl.rev(n))
2311 actualphase = phasecache.phase(unfi, cl.rev(n))
2312 if actualphase != expectedphase:
2312 if actualphase != expectedphase:
2313 finalmsg = msg % (
2313 finalmsg = msg % (
2314 short(n),
2314 short(n),
2315 phases.phasenames[actualphase],
2315 phases.phasenames[actualphase],
2316 phases.phasenames[expectedphase],
2316 phases.phasenames[expectedphase],
2317 )
2317 )
2318 raise error.PushRaced(finalmsg)
2318 raise error.PushRaced(finalmsg)
2319
2319
2320
2320
2321 @parthandler(b'output')
2321 @parthandler(b'output')
2322 def handleoutput(op, inpart):
2322 def handleoutput(op, inpart):
2323 """forward output captured on the server to the client"""
2323 """forward output captured on the server to the client"""
2324 for line in inpart.read().splitlines():
2324 for line in inpart.read().splitlines():
2325 op.ui.status(_(b'remote: %s\n') % line)
2325 op.ui.status(_(b'remote: %s\n') % line)
2326
2326
2327
2327
2328 @parthandler(b'replycaps')
2328 @parthandler(b'replycaps')
2329 def handlereplycaps(op, inpart):
2329 def handlereplycaps(op, inpart):
2330 """Notify that a reply bundle should be created
2330 """Notify that a reply bundle should be created
2331
2331
2332 The payload contains the capabilities information for the reply"""
2332 The payload contains the capabilities information for the reply"""
2333 caps = decodecaps(inpart.read())
2333 caps = decodecaps(inpart.read())
2334 if op.reply is None:
2334 if op.reply is None:
2335 op.reply = bundle20(op.ui, caps)
2335 op.reply = bundle20(op.ui, caps)
2336
2336
2337
2337
2338 class AbortFromPart(error.Abort):
2338 class AbortFromPart(error.Abort):
2339 """Sub-class of Abort that denotes an error from a bundle2 part."""
2339 """Sub-class of Abort that denotes an error from a bundle2 part."""
2340
2340
2341
2341
2342 @parthandler(b'error:abort', (b'message', b'hint'))
2342 @parthandler(b'error:abort', (b'message', b'hint'))
2343 def handleerrorabort(op, inpart):
2343 def handleerrorabort(op, inpart):
2344 """Used to transmit abort error over the wire"""
2344 """Used to transmit abort error over the wire"""
2345 raise AbortFromPart(
2345 raise AbortFromPart(
2346 inpart.params[b'message'], hint=inpart.params.get(b'hint')
2346 inpart.params[b'message'], hint=inpart.params.get(b'hint')
2347 )
2347 )
2348
2348
2349
2349
2350 @parthandler(
2350 @parthandler(
2351 b'error:pushkey',
2351 b'error:pushkey',
2352 (b'namespace', b'key', b'new', b'old', b'ret', b'in-reply-to'),
2352 (b'namespace', b'key', b'new', b'old', b'ret', b'in-reply-to'),
2353 )
2353 )
2354 def handleerrorpushkey(op, inpart):
2354 def handleerrorpushkey(op, inpart):
2355 """Used to transmit failure of a mandatory pushkey over the wire"""
2355 """Used to transmit failure of a mandatory pushkey over the wire"""
2356 kwargs = {}
2356 kwargs = {}
2357 for name in (b'namespace', b'key', b'new', b'old', b'ret'):
2357 for name in (b'namespace', b'key', b'new', b'old', b'ret'):
2358 value = inpart.params.get(name)
2358 value = inpart.params.get(name)
2359 if value is not None:
2359 if value is not None:
2360 kwargs[name] = value
2360 kwargs[name] = value
2361 raise error.PushkeyFailed(
2361 raise error.PushkeyFailed(
2362 inpart.params[b'in-reply-to'], **pycompat.strkwargs(kwargs)
2362 inpart.params[b'in-reply-to'], **pycompat.strkwargs(kwargs)
2363 )
2363 )
2364
2364
2365
2365
2366 @parthandler(b'error:unsupportedcontent', (b'parttype', b'params'))
2366 @parthandler(b'error:unsupportedcontent', (b'parttype', b'params'))
2367 def handleerrorunsupportedcontent(op, inpart):
2367 def handleerrorunsupportedcontent(op, inpart):
2368 """Used to transmit unknown content error over the wire"""
2368 """Used to transmit unknown content error over the wire"""
2369 kwargs = {}
2369 kwargs = {}
2370 parttype = inpart.params.get(b'parttype')
2370 parttype = inpart.params.get(b'parttype')
2371 if parttype is not None:
2371 if parttype is not None:
2372 kwargs[b'parttype'] = parttype
2372 kwargs[b'parttype'] = parttype
2373 params = inpart.params.get(b'params')
2373 params = inpart.params.get(b'params')
2374 if params is not None:
2374 if params is not None:
2375 kwargs[b'params'] = params.split(b'\0')
2375 kwargs[b'params'] = params.split(b'\0')
2376
2376
2377 raise error.BundleUnknownFeatureError(**pycompat.strkwargs(kwargs))
2377 raise error.BundleUnknownFeatureError(**pycompat.strkwargs(kwargs))
2378
2378
2379
2379
2380 @parthandler(b'error:pushraced', (b'message',))
2380 @parthandler(b'error:pushraced', (b'message',))
2381 def handleerrorpushraced(op, inpart):
2381 def handleerrorpushraced(op, inpart):
2382 """Used to transmit push race error over the wire"""
2382 """Used to transmit push race error over the wire"""
2383 raise error.ResponseError(_(b'push failed:'), inpart.params[b'message'])
2383 raise error.ResponseError(_(b'push failed:'), inpart.params[b'message'])
2384
2384
2385
2385
2386 @parthandler(b'listkeys', (b'namespace',))
2386 @parthandler(b'listkeys', (b'namespace',))
2387 def handlelistkeys(op, inpart):
2387 def handlelistkeys(op, inpart):
2388 """retrieve pushkey namespace content stored in a bundle2"""
2388 """retrieve pushkey namespace content stored in a bundle2"""
2389 namespace = inpart.params[b'namespace']
2389 namespace = inpart.params[b'namespace']
2390 r = pushkey.decodekeys(inpart.read())
2390 r = pushkey.decodekeys(inpart.read())
2391 op.records.add(b'listkeys', (namespace, r))
2391 op.records.add(b'listkeys', (namespace, r))
2392
2392
2393
2393
2394 @parthandler(b'pushkey', (b'namespace', b'key', b'old', b'new'))
2394 @parthandler(b'pushkey', (b'namespace', b'key', b'old', b'new'))
2395 def handlepushkey(op, inpart):
2395 def handlepushkey(op, inpart):
2396 """process a pushkey request"""
2396 """process a pushkey request"""
2397 dec = pushkey.decode
2397 dec = pushkey.decode
2398 namespace = dec(inpart.params[b'namespace'])
2398 namespace = dec(inpart.params[b'namespace'])
2399 key = dec(inpart.params[b'key'])
2399 key = dec(inpart.params[b'key'])
2400 old = dec(inpart.params[b'old'])
2400 old = dec(inpart.params[b'old'])
2401 new = dec(inpart.params[b'new'])
2401 new = dec(inpart.params[b'new'])
2402 # Grab the transaction to ensure that we have the lock before performing the
2402 # Grab the transaction to ensure that we have the lock before performing the
2403 # pushkey.
2403 # pushkey.
2404 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2404 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2405 op.gettransaction()
2405 op.gettransaction()
2406 ret = op.repo.pushkey(namespace, key, old, new)
2406 ret = op.repo.pushkey(namespace, key, old, new)
2407 record = {b'namespace': namespace, b'key': key, b'old': old, b'new': new}
2407 record = {b'namespace': namespace, b'key': key, b'old': old, b'new': new}
2408 op.records.add(b'pushkey', record)
2408 op.records.add(b'pushkey', record)
2409 if op.reply is not None:
2409 if op.reply is not None:
2410 rpart = op.reply.newpart(b'reply:pushkey')
2410 rpart = op.reply.newpart(b'reply:pushkey')
2411 rpart.addparam(
2411 rpart.addparam(
2412 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2412 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2413 )
2413 )
2414 rpart.addparam(b'return', b'%i' % ret, mandatory=False)
2414 rpart.addparam(b'return', b'%i' % ret, mandatory=False)
2415 if inpart.mandatory and not ret:
2415 if inpart.mandatory and not ret:
2416 kwargs = {}
2416 kwargs = {}
2417 for key in (b'namespace', b'key', b'new', b'old', b'ret'):
2417 for key in (b'namespace', b'key', b'new', b'old', b'ret'):
2418 if key in inpart.params:
2418 if key in inpart.params:
2419 kwargs[key] = inpart.params[key]
2419 kwargs[key] = inpart.params[key]
2420 raise error.PushkeyFailed(
2420 raise error.PushkeyFailed(
2421 partid=b'%d' % inpart.id, **pycompat.strkwargs(kwargs)
2421 partid=b'%d' % inpart.id, **pycompat.strkwargs(kwargs)
2422 )
2422 )
2423
2423
2424
2424
2425 @parthandler(b'bookmarks')
2425 @parthandler(b'bookmarks')
2426 def handlebookmark(op, inpart):
2426 def handlebookmark(op, inpart):
2427 """transmit bookmark information
2427 """transmit bookmark information
2428
2428
2429 The part contains binary encoded bookmark information.
2429 The part contains binary encoded bookmark information.
2430
2430
2431 The exact behavior of this part can be controlled by the 'bookmarks' mode
2431 The exact behavior of this part can be controlled by the 'bookmarks' mode
2432 on the bundle operation.
2432 on the bundle operation.
2433
2433
2434 When mode is 'apply' (the default) the bookmark information is applied as
2434 When mode is 'apply' (the default) the bookmark information is applied as
2435 is to the unbundling repository. Make sure a 'check:bookmarks' part is
2435 is to the unbundling repository. Make sure a 'check:bookmarks' part is
2436 issued earlier to check for push races in such update. This behavior is
2436 issued earlier to check for push races in such update. This behavior is
2437 suitable for pushing.
2437 suitable for pushing.
2438
2438
2439 When mode is 'records', the information is recorded into the 'bookmarks'
2439 When mode is 'records', the information is recorded into the 'bookmarks'
2440 records of the bundle operation. This behavior is suitable for pulling.
2440 records of the bundle operation. This behavior is suitable for pulling.
2441 """
2441 """
2442 changes = bookmarks.binarydecode(op.repo, inpart)
2442 changes = bookmarks.binarydecode(op.repo, inpart)
2443
2443
2444 pushkeycompat = op.repo.ui.configbool(
2444 pushkeycompat = op.repo.ui.configbool(
2445 b'server', b'bookmarks-pushkey-compat'
2445 b'server', b'bookmarks-pushkey-compat'
2446 )
2446 )
2447 bookmarksmode = op.modes.get(b'bookmarks', b'apply')
2447 bookmarksmode = op.modes.get(b'bookmarks', b'apply')
2448
2448
2449 if bookmarksmode == b'apply':
2449 if bookmarksmode == b'apply':
2450 tr = op.gettransaction()
2450 tr = op.gettransaction()
2451 bookstore = op.repo._bookmarks
2451 bookstore = op.repo._bookmarks
2452 if pushkeycompat:
2452 if pushkeycompat:
2453 allhooks = []
2453 allhooks = []
2454 for book, node in changes:
2454 for book, node in changes:
2455 hookargs = tr.hookargs.copy()
2455 hookargs = tr.hookargs.copy()
2456 hookargs[b'pushkeycompat'] = b'1'
2456 hookargs[b'pushkeycompat'] = b'1'
2457 hookargs[b'namespace'] = b'bookmarks'
2457 hookargs[b'namespace'] = b'bookmarks'
2458 hookargs[b'key'] = book
2458 hookargs[b'key'] = book
2459 hookargs[b'old'] = hex(bookstore.get(book, b''))
2459 hookargs[b'old'] = hex(bookstore.get(book, b''))
2460 hookargs[b'new'] = hex(node if node is not None else b'')
2460 hookargs[b'new'] = hex(node if node is not None else b'')
2461 allhooks.append(hookargs)
2461 allhooks.append(hookargs)
2462
2462
2463 for hookargs in allhooks:
2463 for hookargs in allhooks:
2464 op.repo.hook(
2464 op.repo.hook(
2465 b'prepushkey', throw=True, **pycompat.strkwargs(hookargs)
2465 b'prepushkey', throw=True, **pycompat.strkwargs(hookargs)
2466 )
2466 )
2467
2467
2468 for book, node in changes:
2468 for book, node in changes:
2469 if bookmarks.isdivergent(book):
2469 if bookmarks.isdivergent(book):
2470 msg = _(b'cannot accept divergent bookmark %s!') % book
2470 msg = _(b'cannot accept divergent bookmark %s!') % book
2471 raise error.Abort(msg)
2471 raise error.Abort(msg)
2472
2472
2473 bookstore.applychanges(op.repo, op.gettransaction(), changes)
2473 bookstore.applychanges(op.repo, op.gettransaction(), changes)
2474
2474
2475 if pushkeycompat:
2475 if pushkeycompat:
2476
2476
2477 def runhook(unused_success):
2477 def runhook(unused_success):
2478 for hookargs in allhooks:
2478 for hookargs in allhooks:
2479 op.repo.hook(b'pushkey', **pycompat.strkwargs(hookargs))
2479 op.repo.hook(b'pushkey', **pycompat.strkwargs(hookargs))
2480
2480
2481 op.repo._afterlock(runhook)
2481 op.repo._afterlock(runhook)
2482
2482
2483 elif bookmarksmode == b'records':
2483 elif bookmarksmode == b'records':
2484 for book, node in changes:
2484 for book, node in changes:
2485 record = {b'bookmark': book, b'node': node}
2485 record = {b'bookmark': book, b'node': node}
2486 op.records.add(b'bookmarks', record)
2486 op.records.add(b'bookmarks', record)
2487 else:
2487 else:
2488 raise error.ProgrammingError(
2488 raise error.ProgrammingError(
2489 b'unknown bookmark mode: %s' % bookmarksmode
2489 b'unknown bookmark mode: %s' % bookmarksmode
2490 )
2490 )
2491
2491
2492
2492
2493 @parthandler(b'phase-heads')
2493 @parthandler(b'phase-heads')
2494 def handlephases(op, inpart):
2494 def handlephases(op, inpart):
2495 """apply phases from bundle part to repo"""
2495 """apply phases from bundle part to repo"""
2496 headsbyphase = phases.binarydecode(inpart)
2496 headsbyphase = phases.binarydecode(inpart)
2497 phases.updatephases(op.repo.unfiltered(), op.gettransaction, headsbyphase)
2497 phases.updatephases(op.repo.unfiltered(), op.gettransaction, headsbyphase)
2498
2498
2499
2499
2500 @parthandler(b'reply:pushkey', (b'return', b'in-reply-to'))
2500 @parthandler(b'reply:pushkey', (b'return', b'in-reply-to'))
2501 def handlepushkeyreply(op, inpart):
2501 def handlepushkeyreply(op, inpart):
2502 """retrieve the result of a pushkey request"""
2502 """retrieve the result of a pushkey request"""
2503 ret = int(inpart.params[b'return'])
2503 ret = int(inpart.params[b'return'])
2504 partid = int(inpart.params[b'in-reply-to'])
2504 partid = int(inpart.params[b'in-reply-to'])
2505 op.records.add(b'pushkey', {b'return': ret}, partid)
2505 op.records.add(b'pushkey', {b'return': ret}, partid)
2506
2506
2507
2507
2508 @parthandler(b'obsmarkers')
2508 @parthandler(b'obsmarkers')
2509 def handleobsmarker(op, inpart):
2509 def handleobsmarker(op, inpart):
2510 """add a stream of obsmarkers to the repo"""
2510 """add a stream of obsmarkers to the repo"""
2511 tr = op.gettransaction()
2511 tr = op.gettransaction()
2512 markerdata = inpart.read()
2512 markerdata = inpart.read()
2513 if op.ui.config(b'experimental', b'obsmarkers-exchange-debug'):
2513 if op.ui.config(b'experimental', b'obsmarkers-exchange-debug'):
2514 op.ui.writenoi18n(
2514 op.ui.writenoi18n(
2515 b'obsmarker-exchange: %i bytes received\n' % len(markerdata)
2515 b'obsmarker-exchange: %i bytes received\n' % len(markerdata)
2516 )
2516 )
2517 # The mergemarkers call will crash if marker creation is not enabled.
2517 # The mergemarkers call will crash if marker creation is not enabled.
2518 # we want to avoid this if the part is advisory.
2518 # we want to avoid this if the part is advisory.
2519 if not inpart.mandatory and op.repo.obsstore.readonly:
2519 if not inpart.mandatory and op.repo.obsstore.readonly:
2520 op.repo.ui.debug(
2520 op.repo.ui.debug(
2521 b'ignoring obsolescence markers, feature not enabled\n'
2521 b'ignoring obsolescence markers, feature not enabled\n'
2522 )
2522 )
2523 return
2523 return
2524 new = op.repo.obsstore.mergemarkers(tr, markerdata)
2524 new = op.repo.obsstore.mergemarkers(tr, markerdata)
2525 op.repo.invalidatevolatilesets()
2525 op.repo.invalidatevolatilesets()
2526 op.records.add(b'obsmarkers', {b'new': new})
2526 op.records.add(b'obsmarkers', {b'new': new})
2527 if op.reply is not None:
2527 if op.reply is not None:
2528 rpart = op.reply.newpart(b'reply:obsmarkers')
2528 rpart = op.reply.newpart(b'reply:obsmarkers')
2529 rpart.addparam(
2529 rpart.addparam(
2530 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2530 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2531 )
2531 )
2532 rpart.addparam(b'new', b'%i' % new, mandatory=False)
2532 rpart.addparam(b'new', b'%i' % new, mandatory=False)
2533
2533
2534
2534
2535 @parthandler(b'reply:obsmarkers', (b'new', b'in-reply-to'))
2535 @parthandler(b'reply:obsmarkers', (b'new', b'in-reply-to'))
2536 def handleobsmarkerreply(op, inpart):
2536 def handleobsmarkerreply(op, inpart):
2537 """retrieve the result of a pushkey request"""
2537 """retrieve the result of a pushkey request"""
2538 ret = int(inpart.params[b'new'])
2538 ret = int(inpart.params[b'new'])
2539 partid = int(inpart.params[b'in-reply-to'])
2539 partid = int(inpart.params[b'in-reply-to'])
2540 op.records.add(b'obsmarkers', {b'new': ret}, partid)
2540 op.records.add(b'obsmarkers', {b'new': ret}, partid)
2541
2541
2542
2542
2543 @parthandler(b'hgtagsfnodes')
2543 @parthandler(b'hgtagsfnodes')
2544 def handlehgtagsfnodes(op, inpart):
2544 def handlehgtagsfnodes(op, inpart):
2545 """Applies .hgtags fnodes cache entries to the local repo.
2545 """Applies .hgtags fnodes cache entries to the local repo.
2546
2546
2547 Payload is pairs of 20 byte changeset nodes and filenodes.
2547 Payload is pairs of 20 byte changeset nodes and filenodes.
2548 """
2548 """
2549 # Grab the transaction so we ensure that we have the lock at this point.
2549 # Grab the transaction so we ensure that we have the lock at this point.
2550 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2550 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2551 op.gettransaction()
2551 op.gettransaction()
2552 cache = tags.hgtagsfnodescache(op.repo.unfiltered())
2552 cache = tags.hgtagsfnodescache(op.repo.unfiltered())
2553
2553
2554 count = 0
2554 count = 0
2555 while True:
2555 while True:
2556 node = inpart.read(20)
2556 node = inpart.read(20)
2557 fnode = inpart.read(20)
2557 fnode = inpart.read(20)
2558 if len(node) < 20 or len(fnode) < 20:
2558 if len(node) < 20 or len(fnode) < 20:
2559 op.ui.debug(b'ignoring incomplete received .hgtags fnodes data\n')
2559 op.ui.debug(b'ignoring incomplete received .hgtags fnodes data\n')
2560 break
2560 break
2561 cache.setfnode(node, fnode)
2561 cache.setfnode(node, fnode)
2562 count += 1
2562 count += 1
2563
2563
2564 cache.write()
2564 cache.write()
2565 op.ui.debug(b'applied %i hgtags fnodes cache entries\n' % count)
2565 op.ui.debug(b'applied %i hgtags fnodes cache entries\n' % count)
2566
2566
2567
2567
2568 rbcstruct = struct.Struct(b'>III')
2568 rbcstruct = struct.Struct(b'>III')
2569
2569
2570
2570
2571 @parthandler(b'cache:rev-branch-cache')
2571 @parthandler(b'cache:rev-branch-cache')
2572 def handlerbc(op, inpart):
2572 def handlerbc(op, inpart):
2573 """Legacy part, ignored for compatibility with bundles from or
2573 """Legacy part, ignored for compatibility with bundles from or
2574 for Mercurial before 5.7. Newer Mercurial computes the cache
2574 for Mercurial before 5.7. Newer Mercurial computes the cache
2575 efficiently enough during unbundling that the additional transfer
2575 efficiently enough during unbundling that the additional transfer
2576 is unnecessary."""
2576 is unnecessary."""
2577
2577
2578
2578
2579 @parthandler(b'pushvars')
2579 @parthandler(b'pushvars')
2580 def bundle2getvars(op, part):
2580 def bundle2getvars(op, part):
2581 '''unbundle a bundle2 containing shellvars on the server'''
2581 '''unbundle a bundle2 containing shellvars on the server'''
2582 # An option to disable unbundling on server-side for security reasons
2582 # An option to disable unbundling on server-side for security reasons
2583 if op.ui.configbool(b'push', b'pushvars.server'):
2583 if op.ui.configbool(b'push', b'pushvars.server'):
2584 hookargs = {}
2584 hookargs = {}
2585 for key, value in part.advisoryparams:
2585 for key, value in part.advisoryparams:
2586 key = key.upper()
2586 key = key.upper()
2587 # We want pushed variables to have USERVAR_ prepended so we know
2587 # We want pushed variables to have USERVAR_ prepended so we know
2588 # they came from the --pushvar flag.
2588 # they came from the --pushvar flag.
2589 key = b"USERVAR_" + key
2589 key = b"USERVAR_" + key
2590 hookargs[key] = value
2590 hookargs[key] = value
2591 op.addhookargs(hookargs)
2591 op.addhookargs(hookargs)
2592
2592
2593
2593
2594 @parthandler(b'stream2', (b'requirements', b'filecount', b'bytecount'))
2594 @parthandler(b'stream2', (b'requirements', b'filecount', b'bytecount'))
2595 def handlestreamv2bundle(op, part):
2595 def handlestreamv2bundle(op, part):
2596
2596
2597 requirements = urlreq.unquote(part.params[b'requirements'])
2597 requirements = urlreq.unquote(part.params[b'requirements'])
2598 requirements = requirements.split(b',') if requirements else []
2598 requirements = requirements.split(b',') if requirements else []
2599 filecount = int(part.params[b'filecount'])
2599 filecount = int(part.params[b'filecount'])
2600 bytecount = int(part.params[b'bytecount'])
2600 bytecount = int(part.params[b'bytecount'])
2601
2601
2602 repo = op.repo
2602 repo = op.repo
2603 if len(repo):
2603 if len(repo):
2604 msg = _(b'cannot apply stream clone to non empty repository')
2604 msg = _(b'cannot apply stream clone to non empty repository')
2605 raise error.Abort(msg)
2605 raise error.Abort(msg)
2606
2606
2607 repo.ui.debug(b'applying stream bundle\n')
2607 repo.ui.debug(b'applying stream bundle\n')
2608 streamclone.applybundlev2(repo, part, filecount, bytecount, requirements)
2608 streamclone.applybundlev2(repo, part, filecount, bytecount, requirements)
2609
2609
2610
2610
2611 @parthandler(b'stream3-exp', (b'requirements',))
2611 @parthandler(b'stream3-exp', (b'requirements',))
2612 def handlestreamv3bundle(op, part):
2612 def handlestreamv3bundle(op, part):
2613 requirements = urlreq.unquote(part.params[b'requirements'])
2613 requirements = urlreq.unquote(part.params[b'requirements'])
2614 requirements = requirements.split(b',') if requirements else []
2614 requirements = requirements.split(b',') if requirements else []
2615
2615
2616 repo = op.repo
2616 repo = op.repo
2617 if len(repo):
2617 if len(repo):
2618 msg = _(b'cannot apply stream clone to non empty repository')
2618 msg = _(b'cannot apply stream clone to non empty repository')
2619 raise error.Abort(msg)
2619 raise error.Abort(msg)
2620
2620
2621 repo.ui.debug(b'applying stream bundle\n')
2621 repo.ui.debug(b'applying stream bundle\n')
2622 streamclone.applybundlev3(repo, part, requirements)
2622 streamclone.applybundlev3(repo, part, requirements)
2623
2623
2624
2624
2625 def widen_bundle(
2625 def widen_bundle(
2626 bundler, repo, oldmatcher, newmatcher, common, known, cgversion, ellipses
2626 bundler, repo, oldmatcher, newmatcher, common, known, cgversion, ellipses
2627 ):
2627 ):
2628 """generates bundle2 for widening a narrow clone
2628 """generates bundle2 for widening a narrow clone
2629
2629
2630 bundler is the bundle to which data should be added
2630 bundler is the bundle to which data should be added
2631 repo is the localrepository instance
2631 repo is the localrepository instance
2632 oldmatcher matches what the client already has
2632 oldmatcher matches what the client already has
2633 newmatcher matches what the client needs (including what it already has)
2633 newmatcher matches what the client needs (including what it already has)
2634 common is set of common heads between server and client
2634 common is set of common heads between server and client
2635 known is a set of revs known on the client side (used in ellipses)
2635 known is a set of revs known on the client side (used in ellipses)
2636 cgversion is the changegroup version to send
2636 cgversion is the changegroup version to send
2637 ellipses is boolean value telling whether to send ellipses data or not
2637 ellipses is boolean value telling whether to send ellipses data or not
2638
2638
2639 returns bundle2 of the data required for extending
2639 returns bundle2 of the data required for extending
2640 """
2640 """
2641 commonnodes = set()
2641 commonnodes = set()
2642 cl = repo.changelog
2642 cl = repo.changelog
2643 for r in repo.revs(b"::%ln", common):
2643 for r in repo.revs(b"::%ln", common):
2644 commonnodes.add(cl.node(r))
2644 commonnodes.add(cl.node(r))
2645 if commonnodes:
2645 if commonnodes:
2646 packer = changegroup.getbundler(
2646 packer = changegroup.getbundler(
2647 cgversion,
2647 cgversion,
2648 repo,
2648 repo,
2649 oldmatcher=oldmatcher,
2649 oldmatcher=oldmatcher,
2650 matcher=newmatcher,
2650 matcher=newmatcher,
2651 fullnodes=commonnodes,
2651 fullnodes=commonnodes,
2652 )
2652 )
2653 cgdata = packer.generate(
2653 cgdata = packer.generate(
2654 {repo.nullid},
2654 {repo.nullid},
2655 list(commonnodes),
2655 list(commonnodes),
2656 False,
2656 False,
2657 b'narrow_widen',
2657 b'narrow_widen',
2658 changelog=False,
2658 changelog=False,
2659 )
2659 )
2660
2660
2661 part = bundler.newpart(b'changegroup', data=cgdata)
2661 part = bundler.newpart(b'changegroup', data=cgdata)
2662 part.addparam(b'version', cgversion)
2662 part.addparam(b'version', cgversion)
2663 if scmutil.istreemanifest(repo):
2663 if scmutil.istreemanifest(repo):
2664 part.addparam(b'treemanifest', b'1')
2664 part.addparam(b'treemanifest', b'1')
2665 if repository.REPO_FEATURE_SIDE_DATA in repo.features:
2665 if repository.REPO_FEATURE_SIDE_DATA in repo.features:
2666 part.addparam(b'exp-sidedata', b'1')
2666 part.addparam(b'exp-sidedata', b'1')
2667 wanted = format_remote_wanted_sidedata(repo)
2667 wanted = format_remote_wanted_sidedata(repo)
2668 part.addparam(b'exp-wanted-sidedata', wanted)
2668 part.addparam(b'exp-wanted-sidedata', wanted)
2669
2669
2670 return bundler
2670 return bundler
@@ -1,2839 +1,2845 b''
1 # configitems.toml - centralized declaration of configuration options
1 # configitems.toml - centralized declaration of configuration options
2 #
2 #
3 # This file contains declarations of the core Mercurial configuration options.
3 # This file contains declarations of the core Mercurial configuration options.
4 #
4 #
5 # # Structure
5 # # Structure
6 #
6 #
7 # items: array of config items
7 # items: array of config items
8 # templates: mapping of template name to template declaration
8 # templates: mapping of template name to template declaration
9 # template-applications: array of template applications
9 # template-applications: array of template applications
10 #
10 #
11 # # Elements
11 # # Elements
12 #
12 #
13 # ## Item
13 # ## Item
14 #
14 #
15 # Declares a core Mercurial option.
15 # Declares a core Mercurial option.
16 #
16 #
17 # - section: string (required)
17 # - section: string (required)
18 # - name: string (required)
18 # - name: string (required)
19 # - default-type: boolean, changes how `default` is read
19 # - default-type: boolean, changes how `default` is read
20 # - default: any
20 # - default: any
21 # - generic: boolean
21 # - generic: boolean
22 # - priority: integer, only if `generic` is true
22 # - priority: integer, only if `generic` is true
23 # - alias: list of 2-tuples of strings
23 # - alias: list of 2-tuples of strings
24 # - experimental: boolean
24 # - experimental: boolean
25 # - documentation: string
25 # - documentation: string
26 # - in_core_extension: string
26 # - in_core_extension: string
27 #
27 #
28 # ## Template
28 # ## Template
29 #
29 #
30 # Declares a group of options to be re-used for multiple sections.
30 # Declares a group of options to be re-used for multiple sections.
31 #
31 #
32 # - all the same fields as `Item`, except `section` and `name`
32 # - all the same fields as `Item`, except `section` and `name`
33 # - `suffix` (string, required)
33 # - `suffix` (string, required)
34 #
34 #
35 # ## Template applications
35 # ## Template applications
36 #
36 #
37 # Uses a `Template` to instanciate its options in a given section.
37 # Uses a `Template` to instanciate its options in a given section.
38 #
38 #
39 # - template: string (required, must match a `Template` name)
39 # - template: string (required, must match a `Template` name)
40 # - section: string (required)
40 # - section: string (required)
41
41
42 [[items]]
42 [[items]]
43 section = "alias"
43 section = "alias"
44 name = ".*"
44 name = ".*"
45 default-type = "dynamic"
45 default-type = "dynamic"
46 generic = true
46 generic = true
47
47
48 [[items]]
48 [[items]]
49 section = "auth"
49 section = "auth"
50 name = "cookiefile"
50 name = "cookiefile"
51
51
52 # bookmarks.pushing: internal hack for discovery
52 # bookmarks.pushing: internal hack for discovery
53 [[items]]
53 [[items]]
54 section = "bookmarks"
54 section = "bookmarks"
55 name = "pushing"
55 name = "pushing"
56 default-type = "list_type"
56 default-type = "list_type"
57
57
58 # bundle.mainreporoot: internal hack for bundlerepo
58 # bundle.mainreporoot: internal hack for bundlerepo
59 [[items]]
59 [[items]]
60 section = "bundle"
60 section = "bundle"
61 name = "mainreporoot"
61 name = "mainreporoot"
62 default = ""
62 default = ""
63
63
64 [[items]]
64 [[items]]
65 section = "censor"
65 section = "censor"
66 name = "policy"
66 name = "policy"
67 default = "abort"
67 default = "abort"
68 experimental = true
68 experimental = true
69
69
70 [[items]]
70 [[items]]
71 section = "chgserver"
71 section = "chgserver"
72 name = "idletimeout"
72 name = "idletimeout"
73 default = 3600
73 default = 3600
74
74
75 [[items]]
75 [[items]]
76 section = "chgserver"
76 section = "chgserver"
77 name = "skiphash"
77 name = "skiphash"
78 default = false
78 default = false
79
79
80 [[items]]
80 [[items]]
81 section = "cmdserver"
81 section = "cmdserver"
82 name = "log"
82 name = "log"
83
83
84 [[items]]
84 [[items]]
85 section = "cmdserver"
85 section = "cmdserver"
86 name = "max-log-files"
86 name = "max-log-files"
87 default = 7
87 default = 7
88
88
89 [[items]]
89 [[items]]
90 section = "cmdserver"
90 section = "cmdserver"
91 name = "max-log-size"
91 name = "max-log-size"
92 default = "1 MB"
92 default = "1 MB"
93
93
94 [[items]]
94 [[items]]
95 section = "cmdserver"
95 section = "cmdserver"
96 name = "max-repo-cache"
96 name = "max-repo-cache"
97 default = 0
97 default = 0
98 experimental = true
98 experimental = true
99
99
100 [[items]]
100 [[items]]
101 section = "cmdserver"
101 section = "cmdserver"
102 name = "message-encodings"
102 name = "message-encodings"
103 default-type = "list_type"
103 default-type = "list_type"
104
104
105 [[items]]
105 [[items]]
106 section = "cmdserver"
106 section = "cmdserver"
107 name = "shutdown-on-interrupt"
107 name = "shutdown-on-interrupt"
108 default = true
108 default = true
109
109
110 [[items]]
110 [[items]]
111 section = "cmdserver"
111 section = "cmdserver"
112 name = "track-log"
112 name = "track-log"
113 default-type = "lambda"
113 default-type = "lambda"
114 default = [ "chgserver", "cmdserver", "repocache",]
114 default = [ "chgserver", "cmdserver", "repocache",]
115
115
116 [[items]]
116 [[items]]
117 section = "color"
117 section = "color"
118 name = ".*"
118 name = ".*"
119 generic = true
119 generic = true
120
120
121 [[items]]
121 [[items]]
122 section = "color"
122 section = "color"
123 name = "mode"
123 name = "mode"
124 default = "auto"
124 default = "auto"
125
125
126 [[items]]
126 [[items]]
127 section = "color"
127 section = "color"
128 name = "pagermode"
128 name = "pagermode"
129 default-type = "dynamic"
129 default-type = "dynamic"
130
130
131 [[items]]
131 [[items]]
132 section = "command-templates"
132 section = "command-templates"
133 name = "graphnode"
133 name = "graphnode"
134 alias = [["ui", "graphnodetemplate"]]
134 alias = [["ui", "graphnodetemplate"]]
135
135
136 [[items]]
136 [[items]]
137 section = "command-templates"
137 section = "command-templates"
138 name = "log"
138 name = "log"
139 alias = [["ui", "logtemplate"]]
139 alias = [["ui", "logtemplate"]]
140
140
141 [[items]]
141 [[items]]
142 section = "command-templates"
142 section = "command-templates"
143 name = "mergemarker"
143 name = "mergemarker"
144 default = '{node|short} {ifeq(tags, "tip", "", ifeq(tags, "", "", "{tags} "))}{if(bookmarks, "{bookmarks} ")}{ifeq(branch, "default", "", "{branch} ")}- {author|user}: {desc|firstline}'
144 default = '{node|short} {ifeq(tags, "tip", "", ifeq(tags, "", "", "{tags} "))}{if(bookmarks, "{bookmarks} ")}{ifeq(branch, "default", "", "{branch} ")}- {author|user}: {desc|firstline}'
145 alias = [["ui", "mergemarkertemplate"]]
145 alias = [["ui", "mergemarkertemplate"]]
146
146
147 [[items]]
147 [[items]]
148 section = "command-templates"
148 section = "command-templates"
149 name = "oneline-summary"
149 name = "oneline-summary"
150
150
151 [[items]]
151 [[items]]
152 section = "command-templates"
152 section = "command-templates"
153 name = "oneline-summary.*"
153 name = "oneline-summary.*"
154 default-type = "dynamic"
154 default-type = "dynamic"
155 generic = true
155 generic = true
156
156
157 [[items]]
157 [[items]]
158 section = "command-templates"
158 section = "command-templates"
159 name = "pre-merge-tool-output"
159 name = "pre-merge-tool-output"
160 alias = [["ui", "pre-merge-tool-output-template"]]
160 alias = [["ui", "pre-merge-tool-output-template"]]
161
161
162 [[items]]
162 [[items]]
163 section = "commands"
163 section = "commands"
164 name = "commit.post-status"
164 name = "commit.post-status"
165 default = false
165 default = false
166
166
167 [[items]]
167 [[items]]
168 section = "commands"
168 section = "commands"
169 name = "grep.all-files"
169 name = "grep.all-files"
170 default = false
170 default = false
171 experimental = true
171 experimental = true
172
172
173 [[items]]
173 [[items]]
174 section = "commands"
174 section = "commands"
175 name = "merge.require-rev"
175 name = "merge.require-rev"
176 default = false
176 default = false
177
177
178 [[items]]
178 [[items]]
179 section = "commands"
179 section = "commands"
180 name = "push.require-revs"
180 name = "push.require-revs"
181 default = false
181 default = false
182
182
183 # Rebase related configuration moved to core because other extension are doing
183 # Rebase related configuration moved to core because other extension are doing
184 # strange things. For example, shelve import the extensions to reuse some bit
184 # strange things. For example, shelve import the extensions to reuse some bit
185 # without formally loading it.
185 # without formally loading it.
186 [[items]]
186 [[items]]
187 section = "commands"
187 section = "commands"
188 name = "rebase.requiredest"
188 name = "rebase.requiredest"
189 default = false
189 default = false
190
190
191 [[items]]
191 [[items]]
192 section = "commands"
192 section = "commands"
193 name = "resolve.confirm"
193 name = "resolve.confirm"
194 default = false
194 default = false
195
195
196 [[items]]
196 [[items]]
197 section = "commands"
197 section = "commands"
198 name = "resolve.explicit-re-merge"
198 name = "resolve.explicit-re-merge"
199 default = false
199 default = false
200
200
201 [[items]]
201 [[items]]
202 section = "commands"
202 section = "commands"
203 name = "resolve.mark-check"
203 name = "resolve.mark-check"
204 default = "none"
204 default = "none"
205
205
206 [[items]]
206 [[items]]
207 section = "commands"
207 section = "commands"
208 name = "show.aliasprefix"
208 name = "show.aliasprefix"
209 default-type = "list_type"
209 default-type = "list_type"
210
210
211 [[items]]
211 [[items]]
212 section = "commands"
212 section = "commands"
213 name = "status.relative"
213 name = "status.relative"
214 default = false
214 default = false
215
215
216 [[items]]
216 [[items]]
217 section = "commands"
217 section = "commands"
218 name = "status.skipstates"
218 name = "status.skipstates"
219 default = []
219 default = []
220 experimental = true
220 experimental = true
221
221
222 [[items]]
222 [[items]]
223 section = "commands"
223 section = "commands"
224 name = "status.terse"
224 name = "status.terse"
225 default = ""
225 default = ""
226
226
227 [[items]]
227 [[items]]
228 section = "commands"
228 section = "commands"
229 name = "status.verbose"
229 name = "status.verbose"
230 default = false
230 default = false
231
231
232 [[items]]
232 [[items]]
233 section = "commands"
233 section = "commands"
234 name = "update.check"
234 name = "update.check"
235
235
236 [[items]]
236 [[items]]
237 section = "commands"
237 section = "commands"
238 name = "update.requiredest"
238 name = "update.requiredest"
239 default = false
239 default = false
240
240
241 [[items]]
241 [[items]]
242 section = "committemplate"
242 section = "committemplate"
243 name = ".*"
243 name = ".*"
244 generic = true
244 generic = true
245
245
246 [[items]]
246 [[items]]
247 section = "convert"
247 section = "convert"
248 name = "bzr.saverev"
248 name = "bzr.saverev"
249 default = true
249 default = true
250
250
251 [[items]]
251 [[items]]
252 section = "convert"
252 section = "convert"
253 name = "cvsps.cache"
253 name = "cvsps.cache"
254 default = true
254 default = true
255
255
256 [[items]]
256 [[items]]
257 section = "convert"
257 section = "convert"
258 name = "cvsps.fuzz"
258 name = "cvsps.fuzz"
259 default = 60
259 default = 60
260
260
261 [[items]]
261 [[items]]
262 section = "convert"
262 section = "convert"
263 name = "cvsps.logencoding"
263 name = "cvsps.logencoding"
264
264
265 [[items]]
265 [[items]]
266 section = "convert"
266 section = "convert"
267 name = "cvsps.mergefrom"
267 name = "cvsps.mergefrom"
268
268
269 [[items]]
269 [[items]]
270 section = "convert"
270 section = "convert"
271 name = "cvsps.mergeto"
271 name = "cvsps.mergeto"
272
272
273 [[items]]
273 [[items]]
274 section = "convert"
274 section = "convert"
275 name = "git.committeractions"
275 name = "git.committeractions"
276 default-type = "lambda"
276 default-type = "lambda"
277 default = [ "messagedifferent",]
277 default = [ "messagedifferent",]
278
278
279 [[items]]
279 [[items]]
280 section = "convert"
280 section = "convert"
281 name = "git.extrakeys"
281 name = "git.extrakeys"
282 default-type = "list_type"
282 default-type = "list_type"
283
283
284 [[items]]
284 [[items]]
285 section = "convert"
285 section = "convert"
286 name = "git.findcopiesharder"
286 name = "git.findcopiesharder"
287 default = false
287 default = false
288
288
289 [[items]]
289 [[items]]
290 section = "convert"
290 section = "convert"
291 name = "git.remoteprefix"
291 name = "git.remoteprefix"
292 default = "remote"
292 default = "remote"
293
293
294 [[items]]
294 [[items]]
295 section = "convert"
295 section = "convert"
296 name = "git.renamelimit"
296 name = "git.renamelimit"
297 default = 400
297 default = 400
298
298
299 [[items]]
299 [[items]]
300 section = "convert"
300 section = "convert"
301 name = "git.saverev"
301 name = "git.saverev"
302 default = true
302 default = true
303
303
304 [[items]]
304 [[items]]
305 section = "convert"
305 section = "convert"
306 name = "git.similarity"
306 name = "git.similarity"
307 default = 50
307 default = 50
308
308
309 [[items]]
309 [[items]]
310 section = "convert"
310 section = "convert"
311 name = "git.skipsubmodules"
311 name = "git.skipsubmodules"
312 default = false
312 default = false
313
313
314 [[items]]
314 [[items]]
315 section = "convert"
315 section = "convert"
316 name = "hg.clonebranches"
316 name = "hg.clonebranches"
317 default = false
317 default = false
318
318
319 [[items]]
319 [[items]]
320 section = "convert"
320 section = "convert"
321 name = "hg.ignoreerrors"
321 name = "hg.ignoreerrors"
322 default = false
322 default = false
323
323
324 [[items]]
324 [[items]]
325 section = "convert"
325 section = "convert"
326 name = "hg.preserve-hash"
326 name = "hg.preserve-hash"
327 default = false
327 default = false
328
328
329 [[items]]
329 [[items]]
330 section = "convert"
330 section = "convert"
331 name = "hg.revs"
331 name = "hg.revs"
332
332
333 [[items]]
333 [[items]]
334 section = "convert"
334 section = "convert"
335 name = "hg.saverev"
335 name = "hg.saverev"
336 default = false
336 default = false
337
337
338 [[items]]
338 [[items]]
339 section = "convert"
339 section = "convert"
340 name = "hg.sourcename"
340 name = "hg.sourcename"
341
341
342 [[items]]
342 [[items]]
343 section = "convert"
343 section = "convert"
344 name = "hg.startrev"
344 name = "hg.startrev"
345
345
346 [[items]]
346 [[items]]
347 section = "convert"
347 section = "convert"
348 name = "hg.tagsbranch"
348 name = "hg.tagsbranch"
349 default = "default"
349 default = "default"
350
350
351 [[items]]
351 [[items]]
352 section = "convert"
352 section = "convert"
353 name = "hg.usebranchnames"
353 name = "hg.usebranchnames"
354 default = true
354 default = true
355
355
356 [[items]]
356 [[items]]
357 section = "convert"
357 section = "convert"
358 name = "ignoreancestorcheck"
358 name = "ignoreancestorcheck"
359 default = false
359 default = false
360 experimental = true
360 experimental = true
361
361
362 [[items]]
362 [[items]]
363 section = "convert"
363 section = "convert"
364 name = "localtimezone"
364 name = "localtimezone"
365 default = false
365 default = false
366
366
367 [[items]]
367 [[items]]
368 section = "convert"
368 section = "convert"
369 name = "p4.encoding"
369 name = "p4.encoding"
370 default-type = "dynamic"
370 default-type = "dynamic"
371
371
372 [[items]]
372 [[items]]
373 section = "convert"
373 section = "convert"
374 name = "p4.startrev"
374 name = "p4.startrev"
375 default = 0
375 default = 0
376
376
377 [[items]]
377 [[items]]
378 section = "convert"
378 section = "convert"
379 name = "skiptags"
379 name = "skiptags"
380 default = false
380 default = false
381
381
382 [[items]]
382 [[items]]
383 section = "convert"
383 section = "convert"
384 name = "svn.branches"
384 name = "svn.branches"
385
385
386 [[items]]
386 [[items]]
387 section = "convert"
387 section = "convert"
388 name = "svn.dangerous-set-commit-dates"
388 name = "svn.dangerous-set-commit-dates"
389 default = false
389 default = false
390
390
391 [[items]]
391 [[items]]
392 section = "convert"
392 section = "convert"
393 name = "svn.debugsvnlog"
393 name = "svn.debugsvnlog"
394 default = true
394 default = true
395
395
396 [[items]]
396 [[items]]
397 section = "convert"
397 section = "convert"
398 name = "svn.startrev"
398 name = "svn.startrev"
399 default = 0
399 default = 0
400
400
401 [[items]]
401 [[items]]
402 section = "convert"
402 section = "convert"
403 name = "svn.tags"
403 name = "svn.tags"
404
404
405 [[items]]
405 [[items]]
406 section = "convert"
406 section = "convert"
407 name = "svn.trunk"
407 name = "svn.trunk"
408
408
409 [[items]]
409 [[items]]
410 section = "debug"
410 section = "debug"
411 name = "bundling-stats"
411 name = "bundling-stats"
412 default = false
412 default = false
413 documentation = "Display extra information about the bundling process."
413 documentation = "Display extra information about the bundling process."
414
414
415 [[items]]
415 [[items]]
416 section = "debug"
416 section = "debug"
417 name = "dirstate.delaywrite"
417 name = "dirstate.delaywrite"
418 default = 0
418 default = 0
419
419
420 [[items]]
420 [[items]]
421 section = "debug"
421 section = "debug"
422 name = "revlog.debug-delta"
422 name = "revlog.debug-delta"
423 default = false
423 default = false
424
424
425 [[items]]
425 [[items]]
426 section = "debug"
426 section = "debug"
427 name = "revlog.verifyposition.changelog"
427 name = "revlog.verifyposition.changelog"
428 default = ""
428 default = ""
429
429
430 [[items]]
430 [[items]]
431 section = "debug"
431 section = "debug"
432 name = "unbundling-stats"
432 name = "unbundling-stats"
433 default = false
433 default = false
434 documentation = "Display extra information about the unbundling process."
434 documentation = "Display extra information about the unbundling process."
435
435
436 [[items]]
436 [[items]]
437 section = "defaults"
437 section = "defaults"
438 name = ".*"
438 name = ".*"
439 generic = true
439 generic = true
440
440
441 [[items]]
441 [[items]]
442 section = "devel"
442 section = "devel"
443 name = "all-warnings"
443 name = "all-warnings"
444 default = false
444 default = false
445
445
446 [[items]]
446 [[items]]
447 section = "devel"
447 section = "devel"
448 name = "bundle.delta"
448 name = "bundle.delta"
449 default = ""
449 default = ""
450
450
451 [[items]]
451 [[items]]
452 section = "devel"
452 section = "devel"
453 name = "bundle2.debug"
453 name = "bundle2.debug"
454 default = false
454 default = false
455
455
456 [[items]]
456 [[items]]
457 section = "devel"
457 section = "devel"
458 name = "cache-vfs"
458 name = "cache-vfs"
459
459
460 [[items]]
460 [[items]]
461 section = "devel"
461 section = "devel"
462 name = "check-locks"
462 name = "check-locks"
463 default = false
463 default = false
464
464
465 [[items]]
465 [[items]]
466 section = "devel"
466 section = "devel"
467 name = "check-relroot"
467 name = "check-relroot"
468 default = false
468 default = false
469
469
470 [[items]]
470 [[items]]
471 section = "devel"
471 section = "devel"
472 name = "copy-tracing.multi-thread"
472 name = "copy-tracing.multi-thread"
473 default = true
473 default = true
474
474
475 # Track copy information for all files, not just "added" ones (very slow)
475 # Track copy information for all files, not just "added" ones (very slow)
476 [[items]]
476 [[items]]
477 section = "devel"
477 section = "devel"
478 name = "copy-tracing.trace-all-files"
478 name = "copy-tracing.trace-all-files"
479 default = false
479 default = false
480
480
481 [[items]]
481 [[items]]
482 section = "devel"
482 section = "devel"
483 name = "debug.abort-update"
483 name = "debug.abort-update"
484 default = false
484 default = false
485 documentation = """If true, then any merge with the working copy, \
485 documentation = """If true, then any merge with the working copy, \
486 e.g. [hg update], will be aborted after figuring out what needs to be done, \
486 e.g. [hg update], will be aborted after figuring out what needs to be done, \
487 but before spawning the parallel worker."""
487 but before spawning the parallel worker."""
488
488
489 [[items]]
489 [[items]]
490 section = "devel"
490 section = "devel"
491 name = "debug.copies"
491 name = "debug.copies"
492 default = false
492 default = false
493
493
494 [[items]]
494 [[items]]
495 section = "devel"
495 section = "devel"
496 name = "debug.extensions"
496 name = "debug.extensions"
497 default = false
497 default = false
498
498
499 [[items]]
499 [[items]]
500 section = "devel"
500 section = "devel"
501 name = "debug.peer-request"
501 name = "debug.peer-request"
502 default = false
502 default = false
503
503
504 [[items]]
504 [[items]]
505 section = "devel"
505 section = "devel"
506 name = "debug.repo-filters"
506 name = "debug.repo-filters"
507 default = false
507 default = false
508
508
509 [[items]]
509 [[items]]
510 section = "devel"
510 section = "devel"
511 name = "default-date"
511 name = "default-date"
512
512
513 [[items]]
513 [[items]]
514 section = "devel"
514 section = "devel"
515 name = "deprec-warn"
515 name = "deprec-warn"
516 default = false
516 default = false
517
517
518 # possible values:
518 # possible values:
519 # - auto (the default)
519 # - auto (the default)
520 # - force-append
520 # - force-append
521 # - force-new
521 # - force-new
522 [[items]]
522 [[items]]
523 section = "devel"
523 section = "devel"
524 name = "dirstate.v2.data_update_mode"
524 name = "dirstate.v2.data_update_mode"
525 default = "auto"
525 default = "auto"
526
526
527 [[items]]
527 [[items]]
528 section = "devel"
528 section = "devel"
529 name = "disableloaddefaultcerts"
529 name = "disableloaddefaultcerts"
530 default = false
530 default = false
531
531
532 [[items]]
532 [[items]]
533 section = "devel"
533 section = "devel"
534 name = "discovery.exchange-heads"
534 name = "discovery.exchange-heads"
535 default = true
535 default = true
536 documentation = """If false, the discovery will not start with remote \
536 documentation = """If false, the discovery will not start with remote \
537 head fetching and local head querying."""
537 head fetching and local head querying."""
538
538
539 [[items]]
539 [[items]]
540 section = "devel"
540 section = "devel"
541 name = "discovery.grow-sample"
541 name = "discovery.grow-sample"
542 default = true
542 default = true
543 documentation = """If false, the sample size used in set discovery \
543 documentation = """If false, the sample size used in set discovery \
544 will not be increased through the process."""
544 will not be increased through the process."""
545
545
546 [[items]]
546 [[items]]
547 section = "devel"
547 section = "devel"
548 name = "discovery.grow-sample.dynamic"
548 name = "discovery.grow-sample.dynamic"
549 default = true
549 default = true
550 documentation = """If true, the default, the sample size is adapted to the shape \
550 documentation = """If true, the default, the sample size is adapted to the shape \
551 of the undecided set. It is set to the max of:
551 of the undecided set. It is set to the max of:
552 `<target-size>, len(roots(undecided)), len(heads(undecided))`"""
552 `<target-size>, len(roots(undecided)), len(heads(undecided))`"""
553
553
554 [[items]]
554 [[items]]
555 section = "devel"
555 section = "devel"
556 name = "discovery.grow-sample.rate"
556 name = "discovery.grow-sample.rate"
557 default = 1.05
557 default = 1.05
558 documentation = "Controls the rate at which the sample grows."
558 documentation = "Controls the rate at which the sample grows."
559
559
560 [[items]]
560 [[items]]
561 section = "devel"
561 section = "devel"
562 name = "discovery.randomize"
562 name = "discovery.randomize"
563 default = true
563 default = true
564 documentation = """If false, random samplings during discovery are deterministic. \
564 documentation = """If false, random samplings during discovery are deterministic. \
565 It is meant for integration tests."""
565 It is meant for integration tests."""
566
566
567 [[items]]
567 [[items]]
568 section = "devel"
568 section = "devel"
569 name = "discovery.sample-size"
569 name = "discovery.sample-size"
570 default = 200
570 default = 200
571 documentation = "Controls the initial size of the discovery sample."
571 documentation = "Controls the initial size of the discovery sample."
572
572
573 [[items]]
573 [[items]]
574 section = "devel"
574 section = "devel"
575 name = "discovery.sample-size.initial"
575 name = "discovery.sample-size.initial"
576 default = 100
576 default = 100
577 documentation = "Controls the initial size of the discovery for initial change."
577 documentation = "Controls the initial size of the discovery for initial change."
578
578
579 [[items]]
579 [[items]]
580 section = "devel"
580 section = "devel"
581 name = "legacy.exchange"
581 name = "legacy.exchange"
582 default-type = "list_type"
582 default-type = "list_type"
583
583
584 [[items]]
584 [[items]]
585 section = "devel"
585 section = "devel"
586 name = "persistent-nodemap"
586 name = "persistent-nodemap"
587 default = false
587 default = false
588 documentation = """When true, revlogs use a special reference version of the \
588 documentation = """When true, revlogs use a special reference version of the \
589 nodemap, that is not performant but is "known" to behave properly."""
589 nodemap, that is not performant but is "known" to behave properly."""
590
590
591 [[items]]
591 [[items]]
592 section = "devel"
592 section = "devel"
593 name = "server-insecure-exact-protocol"
593 name = "server-insecure-exact-protocol"
594 default = ""
594 default = ""
595
595
596 [[items]]
596 [[items]]
597 section = "devel"
597 section = "devel"
598 name = "servercafile"
598 name = "servercafile"
599 default = ""
599 default = ""
600
600
601 [[items]]
601 [[items]]
602 section = "devel"
602 section = "devel"
603 name = "serverexactprotocol"
603 name = "serverexactprotocol"
604 default = ""
604 default = ""
605
605
606 [[items]]
606 [[items]]
607 section = "devel"
607 section = "devel"
608 name = "serverrequirecert"
608 name = "serverrequirecert"
609 default = false
609 default = false
610
610
611 [[items]]
611 [[items]]
612 section = "devel"
612 section = "devel"
613 name = "strip-obsmarkers"
613 name = "strip-obsmarkers"
614 default = true
614 default = true
615
615
616 [[items]]
616 [[items]]
617 section = 'devel'
617 section = 'devel'
618 name = 'sync.status.pre-dirstate-write-file'
618 name = 'sync.status.pre-dirstate-write-file'
619 documentation = """
619 documentation = """
620 Makes the status algorithm wait for the existence of this file \
620 Makes the status algorithm wait for the existence of this file \
621 (or until a timeout of `devel.sync.status.pre-dirstate-write-file-timeout` \
621 (or until a timeout of `devel.sync.status.pre-dirstate-write-file-timeout` \
622 seconds) before taking the lock and writing the dirstate. \
622 seconds) before taking the lock and writing the dirstate. \
623 Status signals that it's ready to wait by creating a file \
623 Status signals that it's ready to wait by creating a file \
624 with the same name + `.waiting`. \
624 with the same name + `.waiting`. \
625 Useful when testing race conditions."""
625 Useful when testing race conditions."""
626
626
627 [[items]]
627 [[items]]
628 section = 'devel'
628 section = 'devel'
629 name = 'sync.status.pre-dirstate-write-file-timeout'
629 name = 'sync.status.pre-dirstate-write-file-timeout'
630 default=2
630 default=2
631
631
632 [[items]]
632 [[items]]
633 section = 'devel'
633 section = 'devel'
634 name = 'sync.dirstate.post-docket-read-file'
634 name = 'sync.dirstate.post-docket-read-file'
635
635
636 [[items]]
636 [[items]]
637 section = 'devel'
637 section = 'devel'
638 name = 'sync.dirstate.post-docket-read-file-timeout'
638 name = 'sync.dirstate.post-docket-read-file-timeout'
639 default=2
639 default=2
640
640
641 [[items]]
641 [[items]]
642 section = 'devel'
642 section = 'devel'
643 name = 'sync.dirstate.pre-read-file'
643 name = 'sync.dirstate.pre-read-file'
644
644
645 [[items]]
645 [[items]]
646 section = 'devel'
646 section = 'devel'
647 name = 'sync.dirstate.pre-read-file-timeout'
647 name = 'sync.dirstate.pre-read-file-timeout'
648 default=2
648 default=2
649
649
650 [[items]]
650 [[items]]
651 section = "devel"
651 section = "devel"
652 name = "user.obsmarker"
652 name = "user.obsmarker"
653
653
654 [[items]]
654 [[items]]
655 section = "devel"
655 section = "devel"
656 name = "warn-config"
656 name = "warn-config"
657
657
658 [[items]]
658 [[items]]
659 section = "devel"
659 section = "devel"
660 name = "warn-config-default"
660 name = "warn-config-default"
661
661
662 [[items]]
662 [[items]]
663 section = "devel"
663 section = "devel"
664 name = "warn-config-unknown"
664 name = "warn-config-unknown"
665
665
666 [[items]]
666 [[items]]
667 section = "devel"
667 section = "devel"
668 name = "warn-empty-changegroup"
668 name = "warn-empty-changegroup"
669 default = false
669 default = false
670
670
671 [[items]]
671 [[items]]
672 section = "diff"
672 section = "diff"
673 name = "merge"
673 name = "merge"
674 default = false
674 default = false
675 experimental = true
675 experimental = true
676
676
677 [[items]]
677 [[items]]
678 section = "email"
678 section = "email"
679 name = "bcc"
679 name = "bcc"
680
680
681 [[items]]
681 [[items]]
682 section = "email"
682 section = "email"
683 name = "cc"
683 name = "cc"
684
684
685 [[items]]
685 [[items]]
686 section = "email"
686 section = "email"
687 name = "charsets"
687 name = "charsets"
688 default-type = "list_type"
688 default-type = "list_type"
689
689
690 [[items]]
690 [[items]]
691 section = "email"
691 section = "email"
692 name = "from"
692 name = "from"
693
693
694 [[items]]
694 [[items]]
695 section = "email"
695 section = "email"
696 name = "method"
696 name = "method"
697 default = "smtp"
697 default = "smtp"
698
698
699 [[items]]
699 [[items]]
700 section = "email"
700 section = "email"
701 name = "reply-to"
701 name = "reply-to"
702
702
703 [[items]]
703 [[items]]
704 section = "email"
704 section = "email"
705 name = "to"
705 name = "to"
706
706
707 [[items]]
707 [[items]]
708 section = "experimental"
708 section = "experimental"
709 name = "archivemetatemplate"
709 name = "archivemetatemplate"
710 default-type = "dynamic"
710 default-type = "dynamic"
711
711
712 [[items]]
712 [[items]]
713 section = "experimental"
713 section = "experimental"
714 name = "auto-publish"
714 name = "auto-publish"
715 default = "publish"
715 default = "publish"
716
716
717 [[items]]
717 [[items]]
718 section = "experimental"
718 section = "experimental"
719 name = "bundle-phases"
719 name = "bundle-phases"
720 default = false
720 default = false
721
721
722 [[items]]
722 [[items]]
723 section = "experimental"
723 section = "experimental"
724 name = "bundle2-advertise"
724 name = "bundle2-advertise"
725 default = true
725 default = true
726
726
727 [[items]]
727 [[items]]
728 section = "experimental"
728 section = "experimental"
729 name = "bundle2-output-capture"
729 name = "bundle2-output-capture"
730 default = false
730 default = false
731
731
732 [[items]]
732 [[items]]
733 section = "experimental"
733 section = "experimental"
734 name = "bundle2.pushback"
734 name = "bundle2.pushback"
735 default = false
735 default = false
736
736
737 [[items]]
737 [[items]]
738 section = "experimental"
738 section = "experimental"
739 name = "bundle2lazylocking"
739 name = "bundle2lazylocking"
740 default = false
740 default = false
741
741
742 [[items]]
742 [[items]]
743 section = "experimental"
743 section = "experimental"
744 name = "bundlecomplevel"
744 name = "bundlecomplevel"
745
745
746 [[items]]
746 [[items]]
747 section = "experimental"
747 section = "experimental"
748 name = "bundlecomplevel.bzip2"
748 name = "bundlecomplevel.bzip2"
749
749
750 [[items]]
750 [[items]]
751 section = "experimental"
751 section = "experimental"
752 name = "bundlecomplevel.gzip"
752 name = "bundlecomplevel.gzip"
753
753
754 [[items]]
754 [[items]]
755 section = "experimental"
755 section = "experimental"
756 name = "bundlecomplevel.none"
756 name = "bundlecomplevel.none"
757
757
758 [[items]]
758 [[items]]
759 section = "experimental"
759 section = "experimental"
760 name = "bundlecomplevel.zstd"
760 name = "bundlecomplevel.zstd"
761
761
762 [[items]]
762 [[items]]
763 section = "experimental"
763 section = "experimental"
764 name = "bundlecompthreads"
764 name = "bundlecompthreads"
765
765
766 [[items]]
766 [[items]]
767 section = "experimental"
767 section = "experimental"
768 name = "bundlecompthreads.bzip2"
768 name = "bundlecompthreads.bzip2"
769
769
770 [[items]]
770 [[items]]
771 section = "experimental"
771 section = "experimental"
772 name = "bundlecompthreads.gzip"
772 name = "bundlecompthreads.gzip"
773
773
774 [[items]]
774 [[items]]
775 section = "experimental"
775 section = "experimental"
776 name = "bundlecompthreads.none"
776 name = "bundlecompthreads.none"
777
777
778 [[items]]
778 [[items]]
779 section = "experimental"
779 section = "experimental"
780 name = "bundlecompthreads.zstd"
780 name = "bundlecompthreads.zstd"
781
781
782 [[items]]
782 [[items]]
783 section = "experimental"
783 section = "experimental"
784 name = "changegroup3"
784 name = "changegroup3"
785 default = true
785 default = true
786
786
787 [[items]]
787 [[items]]
788 section = "experimental"
788 section = "experimental"
789 name = "changegroup4"
789 name = "changegroup4"
790 default = false
790 default = false
791
791
792 # might remove rank configuration once the computation has no impact
792 # might remove rank configuration once the computation has no impact
793 [[items]]
793 [[items]]
794 section = "experimental"
794 section = "experimental"
795 name = "changelog-v2.compute-rank"
795 name = "changelog-v2.compute-rank"
796 default = true
796 default = true
797
797
798 [[items]]
798 [[items]]
799 section = "experimental"
799 section = "experimental"
800 name = "cleanup-as-archived"
800 name = "cleanup-as-archived"
801 default = false
801 default = false
802
802
803 [[items]]
803 [[items]]
804 section = "experimental"
804 section = "experimental"
805 name = "clientcompressionengines"
805 name = "clientcompressionengines"
806 default-type = "list_type"
806 default-type = "list_type"
807
807
808 [[items]]
808 [[items]]
809 section = "experimental"
809 section = "experimental"
810 name = "copies.read-from"
810 name = "copies.read-from"
811 default = "filelog-only"
811 default = "filelog-only"
812
812
813 [[items]]
813 [[items]]
814 section = "experimental"
814 section = "experimental"
815 name = "copies.write-to"
815 name = "copies.write-to"
816 default = "filelog-only"
816 default = "filelog-only"
817
817
818 [[items]]
818 [[items]]
819 section = "experimental"
819 section = "experimental"
820 name = "copytrace"
820 name = "copytrace"
821 default = "on"
821 default = "on"
822
822
823 [[items]]
823 [[items]]
824 section = "experimental"
824 section = "experimental"
825 name = "copytrace.movecandidateslimit"
825 name = "copytrace.movecandidateslimit"
826 default = 100
826 default = 100
827
827
828 [[items]]
828 [[items]]
829 section = "experimental"
829 section = "experimental"
830 name = "copytrace.sourcecommitlimit"
830 name = "copytrace.sourcecommitlimit"
831 default = 100
831 default = 100
832
832
833 [[items]]
833 [[items]]
834 section = "experimental"
834 section = "experimental"
835 name = "crecordtest"
835 name = "crecordtest"
836
836
837 [[items]]
837 [[items]]
838 section = "experimental"
838 section = "experimental"
839 name = "directaccess"
839 name = "directaccess"
840 default = false
840 default = false
841
841
842 [[items]]
842 [[items]]
843 section = "experimental"
843 section = "experimental"
844 name = "directaccess.revnums"
844 name = "directaccess.revnums"
845 default = false
845 default = false
846
846
847 [[items]]
847 [[items]]
848 section = "experimental"
848 section = "experimental"
849 name = "editortmpinhg"
849 name = "editortmpinhg"
850 default = false
850 default = false
851
851
852 [[items]]
852 [[items]]
853 section = "experimental"
853 section = "experimental"
854 name = "evolution"
854 name = "evolution"
855 default-type = "list_type"
855 default-type = "list_type"
856
856
857 [[items]]
857 [[items]]
858 section = "experimental"
858 section = "experimental"
859 name = "evolution.allowdivergence"
859 name = "evolution.allowdivergence"
860 default = false
860 default = false
861 alias = [["experimental", "allowdivergence"]]
861 alias = [["experimental", "allowdivergence"]]
862
862
863 [[items]]
863 [[items]]
864 section = "experimental"
864 section = "experimental"
865 name = "evolution.allowunstable"
865 name = "evolution.allowunstable"
866
866
867 [[items]]
867 [[items]]
868 section = "experimental"
868 section = "experimental"
869 name = "evolution.bundle-obsmarker"
869 name = "evolution.bundle-obsmarker"
870 default = false
870 default = false
871
871
872 [[items]]
872 [[items]]
873 section = "experimental"
873 section = "experimental"
874 name = "evolution.bundle-obsmarker:mandatory"
874 name = "evolution.bundle-obsmarker:mandatory"
875 default = true
875 default = true
876
876
877 [[items]]
877 [[items]]
878 section = "experimental"
878 section = "experimental"
879 name = "evolution.createmarkers"
879 name = "evolution.createmarkers"
880
880
881 [[items]]
881 [[items]]
882 section = "experimental"
882 section = "experimental"
883 name = "evolution.effect-flags"
883 name = "evolution.effect-flags"
884 default = true
884 default = true
885 alias = [["experimental", "effect-flags"]]
885 alias = [["experimental", "effect-flags"]]
886
886
887 [[items]]
887 [[items]]
888 section = "experimental"
888 section = "experimental"
889 name = "evolution.exchange"
889 name = "evolution.exchange"
890
890
891 [[items]]
891 [[items]]
892 section = "experimental"
892 section = "experimental"
893 name = "evolution.report-instabilities"
893 name = "evolution.report-instabilities"
894 default = true
894 default = true
895
895
896 [[items]]
896 [[items]]
897 section = "experimental"
897 section = "experimental"
898 name = "evolution.track-operation"
898 name = "evolution.track-operation"
899 default = true
899 default = true
900
900
901 [[items]]
901 [[items]]
902 section = "experimental"
902 section = "experimental"
903 name = "exportableenviron"
903 name = "exportableenviron"
904 default-type = "list_type"
904 default-type = "list_type"
905
905
906 [[items]]
906 [[items]]
907 section = "experimental"
907 section = "experimental"
908 name = "extendedheader.index"
908 name = "extendedheader.index"
909
909
910 [[items]]
910 [[items]]
911 section = "experimental"
911 section = "experimental"
912 name = "extendedheader.similarity"
912 name = "extendedheader.similarity"
913 default = false
913 default = false
914
914
915 [[items]]
915 [[items]]
916 section = "experimental"
916 section = "experimental"
917 name = "extra-filter-revs"
917 name = "extra-filter-revs"
918 documentation = """Repo-level config to prevent a revset from being visible.
918 documentation = """Repo-level config to prevent a revset from being visible.
919 The target use case is to use `share` to expose different subsets of the same \
919 The target use case is to use `share` to expose different subsets of the same \
920 repository, especially server side. See also `server.view`."""
920 repository, especially server side. See also `server.view`."""
921
921
922 [[items]]
922 [[items]]
923 section = "experimental"
923 section = "experimental"
924 name = "graphshorten"
924 name = "graphshorten"
925 default = false
925 default = false
926
926
927 [[items]]
927 [[items]]
928 section = "experimental"
928 section = "experimental"
929 name = "graphstyle.grandparent"
929 name = "graphstyle.grandparent"
930 default-type = "dynamic"
930 default-type = "dynamic"
931
931
932 [[items]]
932 [[items]]
933 section = "experimental"
933 section = "experimental"
934 name = "graphstyle.missing"
934 name = "graphstyle.missing"
935 default-type = "dynamic"
935 default-type = "dynamic"
936
936
937 [[items]]
937 [[items]]
938 section = "experimental"
938 section = "experimental"
939 name = "graphstyle.parent"
939 name = "graphstyle.parent"
940 default-type = "dynamic"
940 default-type = "dynamic"
941
941
942 [[items]]
942 [[items]]
943 section = "experimental"
943 section = "experimental"
944 name = "hook-track-tags"
944 name = "hook-track-tags"
945 default = false
945 default = false
946
946
947 [[items]]
947 [[items]]
948 section = "experimental"
948 section = "experimental"
949 name = "httppostargs"
949 name = "httppostargs"
950 default = false
950 default = false
951
951
952 [[items]]
952 [[items]]
953 section = "experimental"
953 section = "experimental"
954 name = "log.topo"
954 name = "log.topo"
955 default = false
955 default = false
956
956
957 [[items]]
957 [[items]]
958 section = "experimental"
958 section = "experimental"
959 name = "maxdeltachainspan"
959 name = "maxdeltachainspan"
960 default = -1
960 default = -1
961
961
962 [[items]]
962 [[items]]
963 section = "experimental"
963 section = "experimental"
964 name = "merge-track-salvaged"
964 name = "merge-track-salvaged"
965 default = false
965 default = false
966 documentation = """Tracks files which were undeleted (merge might delete them \
966 documentation = """Tracks files which were undeleted (merge might delete them \
967 but we explicitly kept/undeleted them) and creates new filenodes for them."""
967 but we explicitly kept/undeleted them) and creates new filenodes for them."""
968
968
969 [[items]]
969 [[items]]
970 section = "experimental"
970 section = "experimental"
971 name = "merge.checkpathconflicts"
971 name = "merge.checkpathconflicts"
972 default = false
972 default = false
973
973
974 [[items]]
974 [[items]]
975 section = "experimental"
975 section = "experimental"
976 name = "mmapindexthreshold"
976 name = "mmapindexthreshold"
977
977
978 [[items]]
978 [[items]]
979 section = "experimental"
979 section = "experimental"
980 name = "narrow"
980 name = "narrow"
981 default = false
981 default = false
982
982
983 [[items]]
983 [[items]]
984 section = "experimental"
984 section = "experimental"
985 name = "nointerrupt"
985 name = "nointerrupt"
986 default = false
986 default = false
987
987
988 [[items]]
988 [[items]]
989 section = "experimental"
989 section = "experimental"
990 name = "nointerrupt-interactiveonly"
990 name = "nointerrupt-interactiveonly"
991 default = true
991 default = true
992
992
993 [[items]]
993 [[items]]
994 section = "experimental"
994 section = "experimental"
995 name = "nonnormalparanoidcheck"
995 name = "nonnormalparanoidcheck"
996 default = false
996 default = false
997
997
998 [[items]]
998 [[items]]
999 section = "experimental"
999 section = "experimental"
1000 name = "obsmarkers-exchange-debug"
1000 name = "obsmarkers-exchange-debug"
1001 default = false
1001 default = false
1002
1002
1003 [[items]]
1003 [[items]]
1004 section = "experimental"
1004 section = "experimental"
1005 name = "rebaseskipobsolete"
1005 name = "rebaseskipobsolete"
1006 default = true
1006 default = true
1007
1007
1008 [[items]]
1008 [[items]]
1009 section = "experimental"
1009 section = "experimental"
1010 name = "remotenames"
1010 name = "remotenames"
1011 default = false
1011 default = false
1012
1012
1013 [[items]]
1013 [[items]]
1014 section = "experimental"
1014 section = "experimental"
1015 name = "removeemptydirs"
1015 name = "removeemptydirs"
1016 default = true
1016 default = true
1017
1017
1018 [[items]]
1018 [[items]]
1019 section = "experimental"
1019 section = "experimental"
1020 name = "revert.interactive.select-to-keep"
1020 name = "revert.interactive.select-to-keep"
1021 default = false
1021 default = false
1022
1022
1023 [[items]]
1023 [[items]]
1024 section = "experimental"
1024 section = "experimental"
1025 name = "revisions.disambiguatewithin"
1025 name = "revisions.disambiguatewithin"
1026
1026
1027 [[items]]
1027 [[items]]
1028 section = "experimental"
1028 section = "experimental"
1029 name = "revisions.prefixhexnode"
1029 name = "revisions.prefixhexnode"
1030 default = false
1030 default = false
1031
1031
1032 # "out of experimental" todo list.
1032 # "out of experimental" todo list.
1033 #
1033 #
1034 # * include management of a persistent nodemap in the main docket
1034 # * include management of a persistent nodemap in the main docket
1035 # * enforce a "no-truncate" policy for mmap safety
1035 # * enforce a "no-truncate" policy for mmap safety
1036 # - for censoring operation
1036 # - for censoring operation
1037 # - for stripping operation
1037 # - for stripping operation
1038 # - for rollback operation
1038 # - for rollback operation
1039 # * proper streaming (race free) of the docket file
1039 # * proper streaming (race free) of the docket file
1040 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1040 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1041 # * Exchange-wise, we will also need to do something more efficient than
1041 # * Exchange-wise, we will also need to do something more efficient than
1042 # keeping references to the affected revlogs, especially memory-wise when
1042 # keeping references to the affected revlogs, especially memory-wise when
1043 # rewriting sidedata.
1043 # rewriting sidedata.
1044 # * introduce a proper solution to reduce the number of filelog related files.
1044 # * introduce a proper solution to reduce the number of filelog related files.
1045 # * use caching for reading sidedata (similar to what we do for data).
1045 # * use caching for reading sidedata (similar to what we do for data).
1046 # * no longer set offset=0 if sidedata_size=0 (simplify cutoff computation).
1046 # * no longer set offset=0 if sidedata_size=0 (simplify cutoff computation).
1047 # * Improvement to consider
1047 # * Improvement to consider
1048 # - avoid compression header in chunk using the default compression?
1048 # - avoid compression header in chunk using the default compression?
1049 # - forbid "inline" compression mode entirely?
1049 # - forbid "inline" compression mode entirely?
1050 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1050 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1051 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1051 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1052 # - keep track of chain base or size (probably not that useful anymore)
1052 # - keep track of chain base or size (probably not that useful anymore)
1053 [[items]]
1053 [[items]]
1054 section = "experimental"
1054 section = "experimental"
1055 name = "revlogv2"
1055 name = "revlogv2"
1056
1056
1057 [[items]]
1057 [[items]]
1058 section = "experimental"
1058 section = "experimental"
1059 name = "rust.index"
1059 name = "rust.index"
1060 default = false
1060 default = false
1061
1061
1062 [[items]]
1062 [[items]]
1063 section = "experimental"
1063 section = "experimental"
1064 name = "server.allow-hidden-access"
1064 name = "server.allow-hidden-access"
1065 default-type = "list_type"
1065 default-type = "list_type"
1066
1066
1067 [[items]]
1067 [[items]]
1068 section = "experimental"
1068 section = "experimental"
1069 name = "server.filesdata.recommended-batch-size"
1069 name = "server.filesdata.recommended-batch-size"
1070 default = 50000
1070 default = 50000
1071
1071
1072 [[items]]
1072 [[items]]
1073 section = "experimental"
1073 section = "experimental"
1074 name = "server.manifestdata.recommended-batch-size"
1074 name = "server.manifestdata.recommended-batch-size"
1075 default = 100000
1075 default = 100000
1076
1076
1077 [[items]]
1077 [[items]]
1078 section = "experimental"
1078 section = "experimental"
1079 name = "server.stream-narrow-clones"
1079 name = "server.stream-narrow-clones"
1080 default = false
1080 default = false
1081
1081
1082 [[items]]
1082 [[items]]
1083 section = "experimental"
1083 section = "experimental"
1084 name = "single-head-per-branch"
1084 name = "single-head-per-branch"
1085 default = false
1085 default = false
1086
1086
1087 [[items]]
1087 [[items]]
1088 section = "experimental"
1088 section = "experimental"
1089 name = "single-head-per-branch:account-closed-heads"
1089 name = "single-head-per-branch:account-closed-heads"
1090 default = false
1090 default = false
1091
1091
1092 [[items]]
1092 [[items]]
1093 section = "experimental"
1093 section = "experimental"
1094 name = "single-head-per-branch:public-changes-only"
1094 name = "single-head-per-branch:public-changes-only"
1095 default = false
1095 default = false
1096
1096
1097 [[items]]
1097 [[items]]
1098 section = "experimental"
1098 section = "experimental"
1099 name = "sparse-read"
1099 name = "sparse-read"
1100 default = false
1100 default = false
1101
1101
1102 [[items]]
1102 [[items]]
1103 section = "experimental"
1103 section = "experimental"
1104 name = "sparse-read.density-threshold"
1104 name = "sparse-read.density-threshold"
1105 default = 0.5
1105 default = 0.5
1106
1106
1107 [[items]]
1107 [[items]]
1108 section = "experimental"
1108 section = "experimental"
1109 name = "sparse-read.min-gap-size"
1109 name = "sparse-read.min-gap-size"
1110 default = "65K"
1110 default = "65K"
1111
1111
1112 [[items]]
1112 [[items]]
1113 section = "experimental"
1113 section = "experimental"
1114 name = "stream-v3"
1114 name = "stream-v3"
1115 default = false
1115 default = false
1116
1116
1117 [[items]]
1117 [[items]]
1118 section = "experimental"
1118 section = "experimental"
1119 name = "treemanifest"
1119 name = "treemanifest"
1120 default = false
1120 default = false
1121
1121
1122 [[items]]
1122 [[items]]
1123 section = "experimental"
1123 section = "experimental"
1124 name = "update.atomic-file"
1124 name = "update.atomic-file"
1125 default = false
1125 default = false
1126
1126
1127 [[items]]
1127 [[items]]
1128 section = "experimental"
1128 section = "experimental"
1129 name = "web.full-garbage-collection-rate"
1129 name = "web.full-garbage-collection-rate"
1130 default = 1 # still forcing a full collection on each request
1130 default = 1 # still forcing a full collection on each request
1131
1131
1132 [[items]]
1132 [[items]]
1133 section = "experimental"
1133 section = "experimental"
1134 name = "worker.repository-upgrade"
1134 name = "worker.repository-upgrade"
1135 default = false
1135 default = false
1136
1136
1137 [[items]]
1137 [[items]]
1138 section = "experimental"
1138 section = "experimental"
1139 name = "worker.wdir-get-thread-safe"
1139 name = "worker.wdir-get-thread-safe"
1140 default = false
1140 default = false
1141
1141
1142 [[items]]
1142 [[items]]
1143 section = "experimental"
1143 section = "experimental"
1144 name = "xdiff"
1144 name = "xdiff"
1145 default = false
1145 default = false
1146
1146
1147 [[items]]
1147 [[items]]
1148 section = "extdata"
1148 section = "extdata"
1149 name = ".*"
1149 name = ".*"
1150 generic = true
1150 generic = true
1151
1151
1152 [[items]]
1152 [[items]]
1153 section = "extensions"
1153 section = "extensions"
1154 name = "[^:]*"
1154 name = "[^:]*"
1155 generic = true
1155 generic = true
1156
1156
1157 [[items]]
1157 [[items]]
1158 section = "extensions"
1158 section = "extensions"
1159 name = "[^:]*:required"
1159 name = "[^:]*:required"
1160 default = false
1160 default = false
1161 generic = true
1161 generic = true
1162
1162
1163 [[items]]
1163 [[items]]
1164 section = "format"
1164 section = "format"
1165 name = "bookmarks-in-store"
1165 name = "bookmarks-in-store"
1166 default = false
1166 default = false
1167
1167
1168 [[items]]
1168 [[items]]
1169 section = "format"
1169 section = "format"
1170 name = "chunkcachesize"
1170 name = "chunkcachesize"
1171 experimental = true
1171 experimental = true
1172
1172
1173 [[items]]
1173 [[items]]
1174 section = "format"
1174 section = "format"
1175 name = "dotencode"
1175 name = "dotencode"
1176 default = true
1176 default = true
1177
1177
1178 # The interaction between the archived phase and obsolescence markers needs to
1178 # The interaction between the archived phase and obsolescence markers needs to
1179 # be sorted out before wider usage of this are to be considered.
1179 # be sorted out before wider usage of this are to be considered.
1180 #
1180 #
1181 # At the time this message is written, behavior when archiving obsolete
1181 # At the time this message is written, behavior when archiving obsolete
1182 # changeset differ significantly from stripping. As part of stripping, we also
1182 # changeset differ significantly from stripping. As part of stripping, we also
1183 # remove the obsolescence marker associated to the stripped changesets,
1183 # remove the obsolescence marker associated to the stripped changesets,
1184 # revealing the precedecessors changesets when applicable. When archiving, we
1184 # revealing the precedecessors changesets when applicable. When archiving, we
1185 # don't touch the obsolescence markers, keeping everything hidden. This can
1185 # don't touch the obsolescence markers, keeping everything hidden. This can
1186 # result in quite confusing situation for people combining exchanging draft
1186 # result in quite confusing situation for people combining exchanging draft
1187 # with the archived phases. As some markers needed by others may be skipped
1187 # with the archived phases. As some markers needed by others may be skipped
1188 # during exchange.
1188 # during exchange.
1189 [[items]]
1189 [[items]]
1190 section = "format"
1190 section = "format"
1191 name = "exp-archived-phase"
1191 name = "exp-archived-phase"
1192 default = false
1192 default = false
1193 experimental = true
1193 experimental = true
1194
1194
1195 # Experimental TODOs:
1195 # Experimental TODOs:
1196 #
1196 #
1197 # * Same as for revlogv2 (but for the reduction of the number of files)
1197 # * Same as for revlogv2 (but for the reduction of the number of files)
1198 # * Actually computing the rank of changesets
1198 # * Actually computing the rank of changesets
1199 # * Improvement to investigate
1199 # * Improvement to investigate
1200 # - storing .hgtags fnode
1200 # - storing .hgtags fnode
1201 # - storing branch related identifier
1201 # - storing branch related identifier
1202 [[items]]
1202 [[items]]
1203 section = "format"
1203 section = "format"
1204 name = "exp-use-changelog-v2"
1204 name = "exp-use-changelog-v2"
1205 experimental = true
1205 experimental = true
1206
1206
1207 [[items]]
1207 [[items]]
1208 section = "format"
1208 section = "format"
1209 name = "exp-use-copies-side-data-changeset"
1209 name = "exp-use-copies-side-data-changeset"
1210 default = false
1210 default = false
1211 experimental = true
1211 experimental = true
1212
1212
1213 [[items]]
1213 [[items]]
1214 section = "format"
1214 section = "format"
1215 name = "generaldelta"
1215 name = "generaldelta"
1216 default = false
1216 default = false
1217 experimental = true
1217 experimental = true
1218
1218
1219 [[items]]
1219 [[items]]
1220 section = "format"
1220 section = "format"
1221 name = "manifestcachesize"
1221 name = "manifestcachesize"
1222 experimental = true
1222 experimental = true
1223
1223
1224 [[items]]
1224 [[items]]
1225 section = "format"
1225 section = "format"
1226 name = "maxchainlen"
1226 name = "maxchainlen"
1227 default-type = "dynamic"
1227 default-type = "dynamic"
1228 experimental = true
1228 experimental = true
1229
1229
1230 [[items]]
1230 [[items]]
1231 section = "format"
1231 section = "format"
1232 name = "obsstore-version"
1232 name = "obsstore-version"
1233
1233
1234 [[items]]
1234 [[items]]
1235 section = "format"
1235 section = "format"
1236 name = "revlog-compression"
1236 name = "revlog-compression"
1237 default-type = "lambda"
1237 default-type = "lambda"
1238 alias = [["experimental", "format.compression"]]
1238 alias = [["experimental", "format.compression"]]
1239 default = [ "zstd", "zlib",]
1239 default = [ "zstd", "zlib",]
1240
1240
1241 [[items]]
1241 [[items]]
1242 section = "format"
1242 section = "format"
1243 name = "sparse-revlog"
1243 name = "sparse-revlog"
1244 default = true
1244 default = true
1245
1245
1246 [[items]]
1246 [[items]]
1247 section = "format"
1247 section = "format"
1248 name = "use-dirstate-tracked-hint"
1248 name = "use-dirstate-tracked-hint"
1249 default = false
1249 default = false
1250 experimental = true
1250 experimental = true
1251
1251
1252 [[items]]
1252 [[items]]
1253 section = "format"
1253 section = "format"
1254 name = "use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories"
1254 name = "use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories"
1255 default = false
1255 default = false
1256 experimental = true
1256 experimental = true
1257
1257
1258 [[items]]
1258 [[items]]
1259 section = "format"
1259 section = "format"
1260 name = "use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories:quiet"
1260 name = "use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories:quiet"
1261 default = false
1261 default = false
1262 experimental = true
1262 experimental = true
1263
1263
1264 [[items]]
1264 [[items]]
1265 section = "format"
1265 section = "format"
1266 name = "use-dirstate-tracked-hint.version"
1266 name = "use-dirstate-tracked-hint.version"
1267 default = 1
1267 default = 1
1268 experimental = true
1268 experimental = true
1269
1269
1270 [[items]]
1270 [[items]]
1271 section = "format"
1271 section = "format"
1272 name = "use-dirstate-v2"
1272 name = "use-dirstate-v2"
1273 default = false
1273 default = false
1274 alias = [["format", "exp-rc-dirstate-v2"]]
1274 alias = [["format", "exp-rc-dirstate-v2"]]
1275 experimental = true
1275 experimental = true
1276 documentation = """Enables dirstate-v2 format *when creating a new repository*.
1276 documentation = """Enables dirstate-v2 format *when creating a new repository*.
1277 Which format to use for existing repos is controlled by `.hg/requires`."""
1277 Which format to use for existing repos is controlled by `.hg/requires`."""
1278
1278
1279 [[items]]
1279 [[items]]
1280 section = "format"
1280 section = "format"
1281 name = "use-dirstate-v2.automatic-upgrade-of-mismatching-repositories"
1281 name = "use-dirstate-v2.automatic-upgrade-of-mismatching-repositories"
1282 default = false
1282 default = false
1283 experimental = true
1283 experimental = true
1284
1284
1285 [[items]]
1285 [[items]]
1286 section = "format"
1286 section = "format"
1287 name = "use-dirstate-v2.automatic-upgrade-of-mismatching-repositories:quiet"
1287 name = "use-dirstate-v2.automatic-upgrade-of-mismatching-repositories:quiet"
1288 default = false
1288 default = false
1289 experimental = true
1289 experimental = true
1290
1290
1291 # Having this on by default means we are confident about the scaling of phases.
1291 # Having this on by default means we are confident about the scaling of phases.
1292 # This is not garanteed to be the case at the time this message is written.
1292 # This is not garanteed to be the case at the time this message is written.
1293 [[items]]
1293 [[items]]
1294 section = "format"
1294 section = "format"
1295 name = "use-internal-phase"
1295 name = "use-internal-phase"
1296 default = false
1296 default = false
1297 experimental = true
1297 experimental = true
1298
1298
1299 [[items]]
1299 [[items]]
1300 section = "format"
1300 section = "format"
1301 name = "use-persistent-nodemap"
1301 name = "use-persistent-nodemap"
1302 default-type = "dynamic"
1302 default-type = "dynamic"
1303
1303
1304 [[items]]
1304 [[items]]
1305 section = "format"
1305 section = "format"
1306 name = "use-share-safe"
1306 name = "use-share-safe"
1307 default = true
1307 default = true
1308
1308
1309 [[items]]
1309 [[items]]
1310 section = "format"
1310 section = "format"
1311 name = "use-share-safe.automatic-upgrade-of-mismatching-repositories"
1311 name = "use-share-safe.automatic-upgrade-of-mismatching-repositories"
1312 default = false
1312 default = false
1313 experimental = true
1313 experimental = true
1314
1314
1315 [[items]]
1315 [[items]]
1316 section = "format"
1316 section = "format"
1317 name = "use-share-safe.automatic-upgrade-of-mismatching-repositories:quiet"
1317 name = "use-share-safe.automatic-upgrade-of-mismatching-repositories:quiet"
1318 default = false
1318 default = false
1319 experimental = true
1319 experimental = true
1320
1320
1321 [[items]]
1321 [[items]]
1322 section = "format"
1322 section = "format"
1323 name = "usefncache"
1323 name = "usefncache"
1324 default = true
1324 default = true
1325
1325
1326 [[items]]
1326 [[items]]
1327 section = "format"
1327 section = "format"
1328 name = "usegeneraldelta"
1328 name = "usegeneraldelta"
1329 default = true
1329 default = true
1330
1330
1331 [[items]]
1331 [[items]]
1332 section = "format"
1332 section = "format"
1333 name = "usestore"
1333 name = "usestore"
1334 default = true
1334 default = true
1335
1335
1336 [[items]]
1336 [[items]]
1337 section = "fsmonitor"
1337 section = "fsmonitor"
1338 name = "warn_update_file_count"
1338 name = "warn_update_file_count"
1339 default = 50000
1339 default = 50000
1340
1340
1341 [[items]]
1341 [[items]]
1342 section = "fsmonitor"
1342 section = "fsmonitor"
1343 name = "warn_update_file_count_rust"
1343 name = "warn_update_file_count_rust"
1344 default = 400000
1344 default = 400000
1345
1345
1346 [[items]]
1346 [[items]]
1347 section = "fsmonitor"
1347 section = "fsmonitor"
1348 name = "warn_when_unused"
1348 name = "warn_when_unused"
1349 default = true
1349 default = true
1350
1350
1351 [[items]]
1351 [[items]]
1352 section = "help"
1352 section = "help"
1353 name = 'hidden-command\..*'
1353 name = 'hidden-command\..*'
1354 default = false
1354 default = false
1355 generic = true
1355 generic = true
1356
1356
1357 [[items]]
1357 [[items]]
1358 section = "help"
1358 section = "help"
1359 name = 'hidden-topic\..*'
1359 name = 'hidden-topic\..*'
1360 default = false
1360 default = false
1361 generic = true
1361 generic = true
1362
1362
1363 [[items]]
1363 [[items]]
1364 section = "hgweb-paths"
1364 section = "hgweb-paths"
1365 name = ".*"
1365 name = ".*"
1366 default-type = "list_type"
1366 default-type = "list_type"
1367 generic = true
1367 generic = true
1368
1368
1369 [[items]]
1369 [[items]]
1370 section = "hooks"
1370 section = "hooks"
1371 name = ".*:run-with-plain"
1371 name = ".*:run-with-plain"
1372 default = true
1372 default = true
1373 generic = true
1373 generic = true
1374
1374
1375 [[items]]
1375 [[items]]
1376 section = "hooks"
1376 section = "hooks"
1377 name = "[^:]*"
1377 name = "[^:]*"
1378 default-type = "dynamic"
1378 default-type = "dynamic"
1379 generic = true
1379 generic = true
1380
1380
1381 [[items]]
1381 [[items]]
1382 section = "hostfingerprints"
1382 section = "hostfingerprints"
1383 name = ".*"
1383 name = ".*"
1384 default-type = "list_type"
1384 default-type = "list_type"
1385 generic = true
1385 generic = true
1386
1386
1387 [[items]]
1387 [[items]]
1388 section = "hostsecurity"
1388 section = "hostsecurity"
1389 name = ".*:ciphers$"
1389 name = ".*:ciphers$"
1390 default-type = "dynamic"
1390 default-type = "dynamic"
1391 generic = true
1391 generic = true
1392
1392
1393 [[items]]
1393 [[items]]
1394 section = "hostsecurity"
1394 section = "hostsecurity"
1395 name = ".*:fingerprints$"
1395 name = ".*:fingerprints$"
1396 default-type = "list_type"
1396 default-type = "list_type"
1397 generic = true
1397 generic = true
1398
1398
1399 [[items]]
1399 [[items]]
1400 section = "hostsecurity"
1400 section = "hostsecurity"
1401 name = ".*:minimumprotocol$"
1401 name = ".*:minimumprotocol$"
1402 default-type = "dynamic"
1402 default-type = "dynamic"
1403 generic = true
1403 generic = true
1404
1404
1405 [[items]]
1405 [[items]]
1406 section = "hostsecurity"
1406 section = "hostsecurity"
1407 name = ".*:verifycertsfile$"
1407 name = ".*:verifycertsfile$"
1408 generic = true
1408 generic = true
1409
1409
1410 [[items]]
1410 [[items]]
1411 section = "hostsecurity"
1411 section = "hostsecurity"
1412 name = "ciphers"
1412 name = "ciphers"
1413
1413
1414 [[items]]
1414 [[items]]
1415 section = "hostsecurity"
1415 section = "hostsecurity"
1416 name = "minimumprotocol"
1416 name = "minimumprotocol"
1417 default-type = "dynamic"
1417 default-type = "dynamic"
1418
1418
1419 [[items]]
1419 [[items]]
1420 section = "http"
1420 section = "http"
1421 name = "timeout"
1421 name = "timeout"
1422
1422
1423 [[items]]
1423 [[items]]
1424 section = "http_proxy"
1424 section = "http_proxy"
1425 name = "always"
1425 name = "always"
1426 default = false
1426 default = false
1427
1427
1428 [[items]]
1428 [[items]]
1429 section = "http_proxy"
1429 section = "http_proxy"
1430 name = "host"
1430 name = "host"
1431
1431
1432 [[items]]
1432 [[items]]
1433 section = "http_proxy"
1433 section = "http_proxy"
1434 name = "no"
1434 name = "no"
1435 default-type = "list_type"
1435 default-type = "list_type"
1436
1436
1437 [[items]]
1437 [[items]]
1438 section = "http_proxy"
1438 section = "http_proxy"
1439 name = "passwd"
1439 name = "passwd"
1440
1440
1441 [[items]]
1441 [[items]]
1442 section = "http_proxy"
1442 section = "http_proxy"
1443 name = "user"
1443 name = "user"
1444
1444
1445 [[items]]
1445 [[items]]
1446 section = "logtoprocess"
1446 section = "logtoprocess"
1447 name = "command"
1447 name = "command"
1448
1448
1449 [[items]]
1449 [[items]]
1450 section = "logtoprocess"
1450 section = "logtoprocess"
1451 name = "commandexception"
1451 name = "commandexception"
1452
1452
1453 [[items]]
1453 [[items]]
1454 section = "logtoprocess"
1454 section = "logtoprocess"
1455 name = "commandfinish"
1455 name = "commandfinish"
1456
1456
1457 [[items]]
1457 [[items]]
1458 section = "logtoprocess"
1458 section = "logtoprocess"
1459 name = "develwarn"
1459 name = "develwarn"
1460
1460
1461 [[items]]
1461 [[items]]
1462 section = "logtoprocess"
1462 section = "logtoprocess"
1463 name = "uiblocked"
1463 name = "uiblocked"
1464
1464
1465 [[items]]
1465 [[items]]
1466 section = "merge"
1466 section = "merge"
1467 name = "checkignored"
1467 name = "checkignored"
1468 default = "abort"
1468 default = "abort"
1469
1469
1470 [[items]]
1470 [[items]]
1471 section = "merge"
1471 section = "merge"
1472 name = "checkunknown"
1472 name = "checkunknown"
1473 default = "abort"
1473 default = "abort"
1474
1474
1475 [[items]]
1475 [[items]]
1476 section = "merge"
1476 section = "merge"
1477 name = "disable-partial-tools"
1477 name = "disable-partial-tools"
1478 default = false
1478 default = false
1479 experimental = true
1479 experimental = true
1480
1480
1481 [[items]]
1481 [[items]]
1482 section = "merge"
1482 section = "merge"
1483 name = "followcopies"
1483 name = "followcopies"
1484 default = true
1484 default = true
1485
1485
1486 [[items]]
1486 [[items]]
1487 section = "merge"
1487 section = "merge"
1488 name = "on-failure"
1488 name = "on-failure"
1489 default = "continue"
1489 default = "continue"
1490
1490
1491 [[items]]
1491 [[items]]
1492 section = "merge"
1492 section = "merge"
1493 name = "preferancestor"
1493 name = "preferancestor"
1494 default-type = "lambda"
1494 default-type = "lambda"
1495 default = ["*"]
1495 default = ["*"]
1496 experimental = true
1496 experimental = true
1497
1497
1498 [[items]]
1498 [[items]]
1499 section = "merge"
1499 section = "merge"
1500 name = "strict-capability-check"
1500 name = "strict-capability-check"
1501 default = false
1501 default = false
1502
1502
1503 [[items]]
1503 [[items]]
1504 section = "merge-tools"
1504 section = "merge-tools"
1505 name = ".*"
1505 name = ".*"
1506 generic = true
1506 generic = true
1507
1507
1508 [[items]]
1508 [[items]]
1509 section = "merge-tools"
1509 section = "merge-tools"
1510 name = '.*\.args$'
1510 name = '.*\.args$'
1511 default = "$local $base $other"
1511 default = "$local $base $other"
1512 generic = true
1512 generic = true
1513 priority = -1
1513 priority = -1
1514
1514
1515 [[items]]
1515 [[items]]
1516 section = "merge-tools"
1516 section = "merge-tools"
1517 name = '.*\.binary$'
1517 name = '.*\.binary$'
1518 default = false
1518 default = false
1519 generic = true
1519 generic = true
1520 priority = -1
1520 priority = -1
1521
1521
1522 [[items]]
1522 [[items]]
1523 section = "merge-tools"
1523 section = "merge-tools"
1524 name = '.*\.check$'
1524 name = '.*\.check$'
1525 default-type = "list_type"
1525 default-type = "list_type"
1526 generic = true
1526 generic = true
1527 priority = -1
1527 priority = -1
1528
1528
1529 [[items]]
1529 [[items]]
1530 section = "merge-tools"
1530 section = "merge-tools"
1531 name = '.*\.checkchanged$'
1531 name = '.*\.checkchanged$'
1532 default = false
1532 default = false
1533 generic = true
1533 generic = true
1534 priority = -1
1534 priority = -1
1535
1535
1536 [[items]]
1536 [[items]]
1537 section = "merge-tools"
1537 section = "merge-tools"
1538 name = '.*\.executable$'
1538 name = '.*\.executable$'
1539 default-type = "dynamic"
1539 default-type = "dynamic"
1540 generic = true
1540 generic = true
1541 priority = -1
1541 priority = -1
1542
1542
1543 [[items]]
1543 [[items]]
1544 section = "merge-tools"
1544 section = "merge-tools"
1545 name = '.*\.fixeol$'
1545 name = '.*\.fixeol$'
1546 default = false
1546 default = false
1547 generic = true
1547 generic = true
1548 priority = -1
1548 priority = -1
1549
1549
1550 [[items]]
1550 [[items]]
1551 section = "merge-tools"
1551 section = "merge-tools"
1552 name = '.*\.gui$'
1552 name = '.*\.gui$'
1553 default = false
1553 default = false
1554 generic = true
1554 generic = true
1555 priority = -1
1555 priority = -1
1556
1556
1557 [[items]]
1557 [[items]]
1558 section = "merge-tools"
1558 section = "merge-tools"
1559 name = '.*\.mergemarkers$'
1559 name = '.*\.mergemarkers$'
1560 default = "basic"
1560 default = "basic"
1561 generic = true
1561 generic = true
1562 priority = -1
1562 priority = -1
1563
1563
1564 [[items]]
1564 [[items]]
1565 section = "merge-tools"
1565 section = "merge-tools"
1566 name = '.*\.mergemarkertemplate$' # take from command-templates.mergemarker
1566 name = '.*\.mergemarkertemplate$' # take from command-templates.mergemarker
1567 default-type = "dynamic"
1567 default-type = "dynamic"
1568 generic = true
1568 generic = true
1569 priority = -1
1569 priority = -1
1570
1570
1571 [[items]]
1571 [[items]]
1572 section = "merge-tools"
1572 section = "merge-tools"
1573 name = '.*\.premerge$'
1573 name = '.*\.premerge$'
1574 default-type = "dynamic"
1574 default-type = "dynamic"
1575 generic = true
1575 generic = true
1576 priority = -1
1576 priority = -1
1577
1577
1578 [[items]]
1578 [[items]]
1579 section = "merge-tools"
1579 section = "merge-tools"
1580 name = '.*\.priority$'
1580 name = '.*\.priority$'
1581 default = 0
1581 default = 0
1582 generic = true
1582 generic = true
1583 priority = -1
1583 priority = -1
1584
1584
1585 [[items]]
1585 [[items]]
1586 section = "merge-tools"
1586 section = "merge-tools"
1587 name = '.*\.regappend$'
1587 name = '.*\.regappend$'
1588 default = ""
1588 default = ""
1589 generic = true
1589 generic = true
1590 priority = -1
1590 priority = -1
1591
1591
1592 [[items]]
1592 [[items]]
1593 section = "merge-tools"
1593 section = "merge-tools"
1594 name = '.*\.symlink$'
1594 name = '.*\.symlink$'
1595 default = false
1595 default = false
1596 generic = true
1596 generic = true
1597 priority = -1
1597 priority = -1
1598
1598
1599 [[items]]
1599 [[items]]
1600 section = "pager"
1600 section = "pager"
1601 name = "attend-.*"
1601 name = "attend-.*"
1602 default-type = "dynamic"
1602 default-type = "dynamic"
1603 generic = true
1603 generic = true
1604
1604
1605 [[items]]
1605 [[items]]
1606 section = "pager"
1606 section = "pager"
1607 name = "ignore"
1607 name = "ignore"
1608 default-type = "list_type"
1608 default-type = "list_type"
1609
1609
1610 [[items]]
1610 [[items]]
1611 section = "pager"
1611 section = "pager"
1612 name = "pager"
1612 name = "pager"
1613 default-type = "dynamic"
1613 default-type = "dynamic"
1614
1614
1615 [[items]]
1615 [[items]]
1616 section = "partial-merge-tools"
1616 section = "partial-merge-tools"
1617 name = ".*"
1617 name = ".*"
1618 generic = true
1618 generic = true
1619 experimental = true
1619 experimental = true
1620
1620
1621 [[items]]
1621 [[items]]
1622 section = "partial-merge-tools"
1622 section = "partial-merge-tools"
1623 name = '.*\.args'
1623 name = '.*\.args'
1624 default = "$local $base $other"
1624 default = "$local $base $other"
1625 generic = true
1625 generic = true
1626 priority = -1
1626 priority = -1
1627 experimental = true
1627 experimental = true
1628
1628
1629 [[items]]
1629 [[items]]
1630 section = "partial-merge-tools"
1630 section = "partial-merge-tools"
1631 name = '.*\.disable'
1631 name = '.*\.disable'
1632 default = false
1632 default = false
1633 generic = true
1633 generic = true
1634 priority = -1
1634 priority = -1
1635 experimental = true
1635 experimental = true
1636
1636
1637 [[items]]
1637 [[items]]
1638 section = "partial-merge-tools"
1638 section = "partial-merge-tools"
1639 name = '.*\.executable$'
1639 name = '.*\.executable$'
1640 default-type = "dynamic"
1640 default-type = "dynamic"
1641 generic = true
1641 generic = true
1642 priority = -1
1642 priority = -1
1643 experimental = true
1643 experimental = true
1644
1644
1645 [[items]]
1645 [[items]]
1646 section = "partial-merge-tools"
1646 section = "partial-merge-tools"
1647 name = '.*\.order'
1647 name = '.*\.order'
1648 default = 0
1648 default = 0
1649 generic = true
1649 generic = true
1650 priority = -1
1650 priority = -1
1651 experimental = true
1651 experimental = true
1652
1652
1653 [[items]]
1653 [[items]]
1654 section = "partial-merge-tools"
1654 section = "partial-merge-tools"
1655 name = '.*\.patterns'
1655 name = '.*\.patterns'
1656 default-type = "dynamic"
1656 default-type = "dynamic"
1657 generic = true
1657 generic = true
1658 priority = -1
1658 priority = -1
1659 experimental = true
1659 experimental = true
1660
1660
1661 [[items]]
1661 [[items]]
1662 section = "patch"
1662 section = "patch"
1663 name = "eol"
1663 name = "eol"
1664 default = "strict"
1664 default = "strict"
1665
1665
1666 [[items]]
1666 [[items]]
1667 section = "patch"
1667 section = "patch"
1668 name = "fuzz"
1668 name = "fuzz"
1669 default = 2
1669 default = 2
1670
1670
1671 [[items]]
1671 [[items]]
1672 section = "paths"
1672 section = "paths"
1673 name = "[^:]*"
1673 name = "[^:]*"
1674 generic = true
1674 generic = true
1675
1675
1676 [[items]]
1676 [[items]]
1677 section = "paths"
1677 section = "paths"
1678 name = ".*:bookmarks.mode"
1678 name = ".*:bookmarks.mode"
1679 default = "default"
1679 default = "default"
1680 generic = true
1680 generic = true
1681
1681
1682 [[items]]
1682 [[items]]
1683 section = "paths"
1683 section = "paths"
1684 name = ".*:multi-urls"
1684 name = ".*:multi-urls"
1685 default = false
1685 default = false
1686 generic = true
1686 generic = true
1687
1687
1688 [[items]]
1688 [[items]]
1689 section = "paths"
1689 section = "paths"
1690 name = ".*:pulled-delta-reuse-policy"
1690 name = ".*:pulled-delta-reuse-policy"
1691 generic = true
1691 generic = true
1692
1692
1693 [[items]]
1693 [[items]]
1694 section = "paths"
1694 section = "paths"
1695 name = ".*:pushrev"
1695 name = ".*:pushrev"
1696 generic = true
1696 generic = true
1697
1697
1698 [[items]]
1698 [[items]]
1699 section = "paths"
1699 section = "paths"
1700 name = ".*:pushurl"
1700 name = ".*:pushurl"
1701 generic = true
1701 generic = true
1702
1702
1703 [[items]]
1703 [[items]]
1704 section = "paths"
1704 section = "paths"
1705 name = "default"
1705 name = "default"
1706
1706
1707 [[items]]
1707 [[items]]
1708 section = "paths"
1708 section = "paths"
1709 name = "default-push"
1709 name = "default-push"
1710
1710
1711 [[items]]
1711 [[items]]
1712 section = "phases"
1712 section = "phases"
1713 name = "checksubrepos"
1713 name = "checksubrepos"
1714 default = "follow"
1714 default = "follow"
1715
1715
1716 [[items]]
1716 [[items]]
1717 section = "phases"
1717 section = "phases"
1718 name = "new-commit"
1718 name = "new-commit"
1719 default = "draft"
1719 default = "draft"
1720
1720
1721 [[items]]
1721 [[items]]
1722 section = "phases"
1722 section = "phases"
1723 name = "publish"
1723 name = "publish"
1724 default = true
1724 default = true
1725
1725
1726 [[items]]
1726 [[items]]
1727 section = "profiling"
1727 section = "profiling"
1728 name = "enabled"
1728 name = "enabled"
1729 default = false
1729 default = false
1730
1730
1731 [[items]]
1731 [[items]]
1732 section = "profiling"
1732 section = "profiling"
1733 name = "format"
1733 name = "format"
1734 default = "text"
1734 default = "text"
1735
1735
1736 [[items]]
1736 [[items]]
1737 section = "profiling"
1737 section = "profiling"
1738 name = "freq"
1738 name = "freq"
1739 default = 1000
1739 default = 1000
1740
1740
1741 [[items]]
1741 [[items]]
1742 section = "profiling"
1742 section = "profiling"
1743 name = "limit"
1743 name = "limit"
1744 default = 30
1744 default = 30
1745
1745
1746 [[items]]
1746 [[items]]
1747 section = "profiling"
1747 section = "profiling"
1748 name = "nested"
1748 name = "nested"
1749 default = 0
1749 default = 0
1750
1750
1751 [[items]]
1751 [[items]]
1752 section = "profiling"
1752 section = "profiling"
1753 name = "output"
1753 name = "output"
1754
1754
1755 [[items]]
1755 [[items]]
1756 section = "profiling"
1756 section = "profiling"
1757 name = "showmax"
1757 name = "showmax"
1758 default = 0.999
1758 default = 0.999
1759
1759
1760 [[items]]
1760 [[items]]
1761 section = "profiling"
1761 section = "profiling"
1762 name = "showmin"
1762 name = "showmin"
1763 default-type = "dynamic"
1763 default-type = "dynamic"
1764
1764
1765 [[items]]
1765 [[items]]
1766 section = "profiling"
1766 section = "profiling"
1767 name = "showtime"
1767 name = "showtime"
1768 default = true
1768 default = true
1769
1769
1770 [[items]]
1770 [[items]]
1771 section = "profiling"
1771 section = "profiling"
1772 name = "sort"
1772 name = "sort"
1773 default = "inlinetime"
1773 default = "inlinetime"
1774
1774
1775 [[items]]
1775 [[items]]
1776 section = "profiling"
1776 section = "profiling"
1777 name = "statformat"
1777 name = "statformat"
1778 default = "hotpath"
1778 default = "hotpath"
1779
1779
1780 [[items]]
1780 [[items]]
1781 section = "profiling"
1781 section = "profiling"
1782 name = "time-track"
1782 name = "time-track"
1783 default-type = "dynamic"
1783 default-type = "dynamic"
1784
1784
1785 [[items]]
1785 [[items]]
1786 section = "profiling"
1786 section = "profiling"
1787 name = "type"
1787 name = "type"
1788 default = "stat"
1788 default = "stat"
1789
1789
1790 [[items]]
1790 [[items]]
1791 section = "progress"
1791 section = "progress"
1792 name = "assume-tty"
1792 name = "assume-tty"
1793 default = false
1793 default = false
1794
1794
1795 [[items]]
1795 [[items]]
1796 section = "progress"
1796 section = "progress"
1797 name = "changedelay"
1797 name = "changedelay"
1798 default = 1
1798 default = 1
1799
1799
1800 [[items]]
1800 [[items]]
1801 section = "progress"
1801 section = "progress"
1802 name = "clear-complete"
1802 name = "clear-complete"
1803 default = true
1803 default = true
1804
1804
1805 [[items]]
1805 [[items]]
1806 section = "progress"
1806 section = "progress"
1807 name = "debug"
1807 name = "debug"
1808 default = false
1808 default = false
1809
1809
1810 [[items]]
1810 [[items]]
1811 section = "progress"
1811 section = "progress"
1812 name = "delay"
1812 name = "delay"
1813 default = 3
1813 default = 3
1814
1814
1815 [[items]]
1815 [[items]]
1816 section = "progress"
1816 section = "progress"
1817 name = "disable"
1817 name = "disable"
1818 default = false
1818 default = false
1819
1819
1820 [[items]]
1820 [[items]]
1821 section = "progress"
1821 section = "progress"
1822 name = "estimateinterval"
1822 name = "estimateinterval"
1823 default = 60.0
1823 default = 60.0
1824
1824
1825 [[items]]
1825 [[items]]
1826 section = "progress"
1826 section = "progress"
1827 name = "format"
1827 name = "format"
1828 default-type = "lambda"
1828 default-type = "lambda"
1829 default = [ "topic", "bar", "number", "estimate",]
1829 default = [ "topic", "bar", "number", "estimate",]
1830
1830
1831 [[items]]
1831 [[items]]
1832 section = "progress"
1832 section = "progress"
1833 name = "refresh"
1833 name = "refresh"
1834 default = 0.1
1834 default = 0.1
1835
1835
1836 [[items]]
1836 [[items]]
1837 section = "progress"
1837 section = "progress"
1838 name = "width"
1838 name = "width"
1839 default-type = "dynamic"
1839 default-type = "dynamic"
1840
1840
1841 [[items]]
1841 [[items]]
1842 section = "pull"
1842 section = "pull"
1843 name = "confirm"
1843 name = "confirm"
1844 default = false
1844 default = false
1845
1845
1846 [[items]]
1846 [[items]]
1847 section = "push"
1847 section = "push"
1848 name = "pushvars.server"
1848 name = "pushvars.server"
1849 default = false
1849 default = false
1850
1850
1851 [[items]]
1851 [[items]]
1852 section = "rebase"
1852 section = "rebase"
1853 name = "experimental.inmemory"
1853 name = "experimental.inmemory"
1854 default = false
1854 default = false
1855
1855
1856 [[items]]
1856 [[items]]
1857 section = "rebase"
1857 section = "rebase"
1858 name = "singletransaction"
1858 name = "singletransaction"
1859 default = false
1859 default = false
1860
1860
1861 [[items]]
1861 [[items]]
1862 section = "rebase"
1862 section = "rebase"
1863 name = "store-source"
1863 name = "store-source"
1864 default = true
1864 default = true
1865 experimental = true
1865 experimental = true
1866 documentation = """Controls creation of a `rebase_source` extra field during rebase.
1866 documentation = """Controls creation of a `rebase_source` extra field during rebase.
1867 When false, no such field is created. This is useful e.g. for incrementally \
1867 When false, no such field is created. This is useful e.g. for incrementally \
1868 converting changesets and then rebasing them onto an existing repo.
1868 converting changesets and then rebasing them onto an existing repo.
1869 WARNING: this is an advanced setting reserved for people who know \
1869 WARNING: this is an advanced setting reserved for people who know \
1870 exactly what they are doing. Misuse of this setting can easily \
1870 exactly what they are doing. Misuse of this setting can easily \
1871 result in obsmarker cycles and a vivid headache."""
1871 result in obsmarker cycles and a vivid headache."""
1872
1872
1873 [[items]]
1873 [[items]]
1874 section = "rewrite"
1874 section = "rewrite"
1875 name = "backup-bundle"
1875 name = "backup-bundle"
1876 default = true
1876 default = true
1877 alias = [["ui", "history-editing-backup"]]
1877 alias = [["ui", "history-editing-backup"]]
1878
1878
1879 [[items]]
1879 [[items]]
1880 section = "rewrite"
1880 section = "rewrite"
1881 name = "empty-successor"
1881 name = "empty-successor"
1882 default = "skip"
1882 default = "skip"
1883 experimental = true
1883 experimental = true
1884
1884
1885 [[items]]
1885 [[items]]
1886 section = "rewrite"
1886 section = "rewrite"
1887 name = "update-timestamp"
1887 name = "update-timestamp"
1888 default = false
1888 default = false
1889
1889
1890 [[items]]
1890 [[items]]
1891 section = "rhg"
1891 section = "rhg"
1892 name = "cat"
1892 name = "cat"
1893 default = true
1893 default = true
1894 experimental = true
1894 experimental = true
1895 documentation = """rhg cat has some quirks that need to be ironed out. \
1895 documentation = """rhg cat has some quirks that need to be ironed out. \
1896 In particular, the `-r` argument accepts a partial hash, but does not \
1896 In particular, the `-r` argument accepts a partial hash, but does not \
1897 correctly resolve `abcdef` as a potential bookmark, tag or branch name."""
1897 correctly resolve `abcdef` as a potential bookmark, tag or branch name."""
1898
1898
1899 [[items]]
1899 [[items]]
1900 section = "rhg"
1900 section = "rhg"
1901 name = "fallback-exectutable"
1901 name = "fallback-exectutable"
1902 experimental = true
1902 experimental = true
1903
1903
1904 [[items]]
1904 [[items]]
1905 section = "rhg"
1905 section = "rhg"
1906 name = "fallback-immediately"
1906 name = "fallback-immediately"
1907 default = false
1907 default = false
1908 experimental = true
1908 experimental = true
1909
1909
1910 [[items]]
1910 [[items]]
1911 section = "rhg"
1911 section = "rhg"
1912 name = "ignored-extensions"
1912 name = "ignored-extensions"
1913 default-type = "list_type"
1913 default-type = "list_type"
1914 experimental = true
1914 experimental = true
1915
1915
1916 [[items]]
1916 [[items]]
1917 section = "rhg"
1917 section = "rhg"
1918 name = "on-unsupported"
1918 name = "on-unsupported"
1919 default = "abort"
1919 default = "abort"
1920 experimental = true
1920 experimental = true
1921
1921
1922 [[items]]
1922 [[items]]
1923 section = "server"
1923 section = "server"
1924 name = "bookmarks-pushkey-compat"
1924 name = "bookmarks-pushkey-compat"
1925 default = true
1925 default = true
1926
1926
1927 [[items]]
1927 [[items]]
1928 section = "server"
1928 section = "server"
1929 name = "bundle1"
1929 name = "bundle1"
1930 default = true
1930 default = true
1931
1931
1932 [[items]]
1932 [[items]]
1933 section = "server"
1933 section = "server"
1934 name = "bundle1.pull"
1934 name = "bundle1.pull"
1935
1935
1936 [[items]]
1936 [[items]]
1937 section = "server"
1937 section = "server"
1938 name = "bundle1.push"
1938 name = "bundle1.push"
1939
1939
1940 [[items]]
1940 [[items]]
1941 section = "server"
1941 section = "server"
1942 name = "bundle1gd"
1942 name = "bundle1gd"
1943
1943
1944 [[items]]
1944 [[items]]
1945 section = "server"
1945 section = "server"
1946 name = "bundle1gd.pull"
1946 name = "bundle1gd.pull"
1947
1947
1948 [[items]]
1948 [[items]]
1949 section = "server"
1949 section = "server"
1950 name = "bundle1gd.push"
1950 name = "bundle1gd.push"
1951
1951
1952 [[items]]
1952 [[items]]
1953 section = "server"
1953 section = "server"
1954 name = "bundle2.stream"
1954 name = "bundle2.stream"
1955 default = true
1955 default = true
1956 alias = [["experimental", "bundle2.stream"]]
1956 alias = [["experimental", "bundle2.stream"]]
1957
1957
1958 [[items]]
1958 [[items]]
1959 section = "server"
1959 section = "server"
1960 name = "compressionengines"
1960 name = "compressionengines"
1961 default-type = "list_type"
1961 default-type = "list_type"
1962
1962
1963 [[items]]
1963 [[items]]
1964 section = "server"
1964 section = "server"
1965 name = "concurrent-push-mode"
1965 name = "concurrent-push-mode"
1966 default = "check-related"
1966 default = "check-related"
1967
1967
1968 [[items]]
1968 [[items]]
1969 section = "server"
1969 section = "server"
1970 name = "disablefullbundle"
1970 name = "disablefullbundle"
1971 default = false
1971 default = false
1972
1972
1973 [[items]]
1973 [[items]]
1974 section = "server"
1974 section = "server"
1975 name = "maxhttpheaderlen"
1975 name = "maxhttpheaderlen"
1976 default = 1024
1976 default = 1024
1977
1977
1978 [[items]]
1978 [[items]]
1979 section = "server"
1979 section = "server"
1980 name = "preferuncompressed"
1980 name = "preferuncompressed"
1981 default = false
1981 default = false
1982
1982
1983 [[items]]
1983 [[items]]
1984 section = "server"
1984 section = "server"
1985 name = "pullbundle"
1985 name = "pullbundle"
1986 default = true
1986 default = true
1987
1987
1988 [[items]]
1988 [[items]]
1989 section = "server"
1989 section = "server"
1990 name = "streamunbundle"
1990 name = "streamunbundle"
1991 default = false
1991 default = false
1992
1992
1993 [[items]]
1993 [[items]]
1994 section = "server"
1994 section = "server"
1995 name = "uncompressed"
1995 name = "uncompressed"
1996 default = true
1996 default = true
1997
1997
1998 [[items]]
1998 [[items]]
1999 section = "server"
1999 section = "server"
2000 name = "uncompressedallowsecret"
2000 name = "uncompressedallowsecret"
2001 default = false
2001 default = false
2002
2002
2003 [[items]]
2003 [[items]]
2004 section = "server"
2004 section = "server"
2005 name = "validate"
2005 name = "validate"
2006 default = false
2006 default = false
2007
2007
2008 [[items]]
2008 [[items]]
2009 section = "server"
2009 section = "server"
2010 name = "view"
2010 name = "view"
2011 default = "served"
2011 default = "served"
2012
2012
2013 [[items]]
2013 [[items]]
2014 section = "server"
2014 section = "server"
2015 name = "zliblevel"
2015 name = "zliblevel"
2016 default = -1
2016 default = -1
2017
2017
2018 [[items]]
2018 [[items]]
2019 section = "server"
2019 section = "server"
2020 name = "zstdlevel"
2020 name = "zstdlevel"
2021 default = 3
2021 default = 3
2022
2022
2023 [[items]]
2023 [[items]]
2024 section = "share"
2024 section = "share"
2025 name = "pool"
2025 name = "pool"
2026
2026
2027 [[items]]
2027 [[items]]
2028 section = "share"
2028 section = "share"
2029 name = "poolnaming"
2029 name = "poolnaming"
2030 default = "identity"
2030 default = "identity"
2031
2031
2032 [[items]]
2032 [[items]]
2033 section = "share"
2033 section = "share"
2034 name = "safe-mismatch.source-not-safe"
2034 name = "safe-mismatch.source-not-safe"
2035 default = "abort"
2035 default = "abort"
2036
2036
2037 [[items]]
2037 [[items]]
2038 section = "share"
2038 section = "share"
2039 name = "safe-mismatch.source-not-safe.warn"
2039 name = "safe-mismatch.source-not-safe.warn"
2040 default = true
2040 default = true
2041
2041
2042 [[items]]
2042 [[items]]
2043 section = "share"
2043 section = "share"
2044 name = "safe-mismatch.source-not-safe:verbose-upgrade"
2044 name = "safe-mismatch.source-not-safe:verbose-upgrade"
2045 default = true
2045 default = true
2046
2046
2047 [[items]]
2047 [[items]]
2048 section = "share"
2048 section = "share"
2049 name = "safe-mismatch.source-safe"
2049 name = "safe-mismatch.source-safe"
2050 default = "abort"
2050 default = "abort"
2051
2051
2052 [[items]]
2052 [[items]]
2053 section = "share"
2053 section = "share"
2054 name = "safe-mismatch.source-safe.warn"
2054 name = "safe-mismatch.source-safe.warn"
2055 default = true
2055 default = true
2056
2056
2057 [[items]]
2057 [[items]]
2058 section = "share"
2058 section = "share"
2059 name = "safe-mismatch.source-safe:verbose-upgrade"
2059 name = "safe-mismatch.source-safe:verbose-upgrade"
2060 default = true
2060 default = true
2061
2061
2062 [[items]]
2062 [[items]]
2063 section = "shelve"
2063 section = "shelve"
2064 name = "maxbackups"
2064 name = "maxbackups"
2065 default = 10
2065 default = 10
2066
2066
2067 [[items]]
2067 [[items]]
2068 section = "shelve"
2068 section = "shelve"
2069 name = "store"
2069 name = "store"
2070 default = "internal"
2070 default = "internal"
2071 experimental = true
2071 experimental = true
2072
2072
2073 [[items]]
2073 [[items]]
2074 section = "smtp"
2074 section = "smtp"
2075 name = "host"
2075 name = "host"
2076
2076
2077 [[items]]
2077 [[items]]
2078 section = "smtp"
2078 section = "smtp"
2079 name = "local_hostname"
2079 name = "local_hostname"
2080
2080
2081 [[items]]
2081 [[items]]
2082 section = "smtp"
2082 section = "smtp"
2083 name = "password"
2083 name = "password"
2084
2084
2085 [[items]]
2085 [[items]]
2086 section = "smtp"
2086 section = "smtp"
2087 name = "port"
2087 name = "port"
2088 default-type = "dynamic"
2088 default-type = "dynamic"
2089
2089
2090 [[items]]
2090 [[items]]
2091 section = "smtp"
2091 section = "smtp"
2092 name = "tls"
2092 name = "tls"
2093 default = "none"
2093 default = "none"
2094
2094
2095 [[items]]
2095 [[items]]
2096 section = "smtp"
2096 section = "smtp"
2097 name = "username"
2097 name = "username"
2098
2098
2099 [[items]]
2099 [[items]]
2100 section = "sparse"
2100 section = "sparse"
2101 name = "missingwarning"
2101 name = "missingwarning"
2102 default = true
2102 default = true
2103 experimental = true
2103 experimental = true
2104
2104
2105 [[items]]
2105 [[items]]
2106 section = "storage"
2106 section = "storage"
2107 name = "dirstate-v2.slow-path"
2107 name = "dirstate-v2.slow-path"
2108 default = "abort"
2108 default = "abort"
2109 experimental = true # experimental as long as format.use-dirstate-v2 is.
2109 experimental = true # experimental as long as format.use-dirstate-v2 is.
2110
2110
2111 [[items]]
2111 [[items]]
2112 section = "storage"
2112 section = "storage"
2113 name = "new-repo-backend"
2113 name = "new-repo-backend"
2114 default = "revlogv1"
2114 default = "revlogv1"
2115 experimental = true
2115 experimental = true
2116
2116
2117 [[items]]
2117 [[items]]
2118 section = "storage"
2118 section = "storage"
2119 name = "revlog.delta-parent-search.candidate-group-chunk-size"
2119 name = "revlog.delta-parent-search.candidate-group-chunk-size"
2120 default = 20
2120 default = 20
2121
2121
2122 [[items]]
2122 [[items]]
2123 section = "storage"
2123 section = "storage"
2124 name = "revlog.issue6528.fix-incoming"
2124 name = "revlog.issue6528.fix-incoming"
2125 default = true
2125 default = true
2126
2126
2127 [[items]]
2127 [[items]]
2128 section = "storage"
2128 section = "storage"
2129 name = "revlog.optimize-delta-parent-choice"
2129 name = "revlog.optimize-delta-parent-choice"
2130 default = true
2130 default = true
2131 alias = [["format", "aggressivemergedeltas"]]
2131 alias = [["format", "aggressivemergedeltas"]]
2132
2132
2133 [[items]]
2133 [[items]]
2134 section = "storage"
2134 section = "storage"
2135 name = "revlog.persistent-nodemap.mmap"
2135 name = "revlog.persistent-nodemap.mmap"
2136 default = true
2136 default = true
2137
2137
2138 [[items]]
2138 [[items]]
2139 section = "storage"
2139 section = "storage"
2140 name = "revlog.persistent-nodemap.slow-path"
2140 name = "revlog.persistent-nodemap.slow-path"
2141 default = "abort"
2141 default = "abort"
2142
2142
2143 [[items]]
2143 [[items]]
2144 section = "storage"
2144 section = "storage"
2145 name = "revlog.reuse-external-delta"
2145 name = "revlog.reuse-external-delta"
2146 default = true
2146 default = true
2147
2147
2148 [[items]]
2148 [[items]]
2149 section = "storage"
2149 section = "storage"
2150 name = "revlog.reuse-external-delta-parent"
2150 name = "revlog.reuse-external-delta-parent"
2151 documentation = """This option is true unless `format.generaldelta` is set."""
2151 documentation = """This option is true unless `format.generaldelta` is set."""
2152
2152
2153 [[items]]
2153 [[items]]
2154 section = "storage"
2154 section = "storage"
2155 name = "revlog.zlib.level"
2155 name = "revlog.zlib.level"
2156
2156
2157 [[items]]
2157 [[items]]
2158 section = "storage"
2158 section = "storage"
2159 name = "revlog.zstd.level"
2159 name = "revlog.zstd.level"
2160
2160
2161 [[items]]
2161 [[items]]
2162 section = "subrepos"
2162 section = "subrepos"
2163 name = "allowed"
2163 name = "allowed"
2164 default-type = "dynamic" # to make backporting simpler
2164 default-type = "dynamic" # to make backporting simpler
2165
2165
2166 [[items]]
2166 [[items]]
2167 section = "subrepos"
2167 section = "subrepos"
2168 name = "git:allowed"
2168 name = "git:allowed"
2169 default-type = "dynamic"
2169 default-type = "dynamic"
2170
2170
2171 [[items]]
2171 [[items]]
2172 section = "subrepos"
2172 section = "subrepos"
2173 name = "hg:allowed"
2173 name = "hg:allowed"
2174 default-type = "dynamic"
2174 default-type = "dynamic"
2175
2175
2176 [[items]]
2176 [[items]]
2177 section = "subrepos"
2177 section = "subrepos"
2178 name = "svn:allowed"
2178 name = "svn:allowed"
2179 default-type = "dynamic"
2179 default-type = "dynamic"
2180
2180
2181 [[items]]
2181 [[items]]
2182 section = "templateconfig"
2182 section = "templateconfig"
2183 name = ".*"
2183 name = ".*"
2184 default-type = "dynamic"
2184 default-type = "dynamic"
2185 generic = true
2185 generic = true
2186
2186
2187 [[items]]
2187 [[items]]
2188 section = "templates"
2188 section = "templates"
2189 name = ".*"
2189 name = ".*"
2190 generic = true
2190 generic = true
2191
2191
2192 [[items]]
2192 [[items]]
2193 section = "trusted"
2193 section = "trusted"
2194 name = "groups"
2194 name = "groups"
2195 default-type = "list_type"
2195 default-type = "list_type"
2196
2196
2197 [[items]]
2197 [[items]]
2198 section = "trusted"
2198 section = "trusted"
2199 name = "users"
2199 name = "users"
2200 default-type = "list_type"
2200 default-type = "list_type"
2201
2201
2202 [[items]]
2202 [[items]]
2203 section = "ui"
2203 section = "ui"
2204 name = "_usedassubrepo"
2204 name = "_usedassubrepo"
2205 default = false
2205 default = false
2206
2206
2207 [[items]]
2207 [[items]]
2208 section = "ui"
2208 section = "ui"
2209 name = "allowemptycommit"
2209 name = "allowemptycommit"
2210 default = false
2210 default = false
2211
2211
2212 [[items]]
2212 [[items]]
2213 section = "ui"
2213 section = "ui"
2214 name = "archivemeta"
2214 name = "archivemeta"
2215 default = true
2215 default = true
2216
2216
2217 [[items]]
2217 [[items]]
2218 section = "ui"
2218 section = "ui"
2219 name = "askusername"
2219 name = "askusername"
2220 default = false
2220 default = false
2221
2221
2222 [[items]]
2222 [[items]]
2223 section = "ui"
2223 section = "ui"
2224 name = "available-memory"
2224 name = "available-memory"
2225
2225
2226 [[items]]
2226 [[items]]
2227 section = "ui"
2227 section = "ui"
2228 name = "clonebundlefallback"
2228 name = "clonebundlefallback"
2229 default = false
2229 default = false
2230
2230
2231 [[items]]
2231 [[items]]
2232 section = "ui"
2232 section = "ui"
2233 name = "clonebundleprefers"
2233 name = "clonebundleprefers"
2234 default-type = "list_type"
2234 default-type = "list_type"
2235
2235
2236 [[items]]
2236 [[items]]
2237 section = "ui"
2237 section = "ui"
2238 name = "clonebundles"
2238 name = "clonebundles"
2239 default = true
2239 default = true
2240
2240
2241 [[items]]
2241 [[items]]
2242 section = "ui"
2242 section = "ui"
2243 name = "color"
2243 name = "color"
2244 default = "auto"
2244 default = "auto"
2245
2245
2246 [[items]]
2246 [[items]]
2247 section = "ui"
2247 section = "ui"
2248 name = "commitsubrepos"
2248 name = "commitsubrepos"
2249 default = false
2249 default = false
2250
2250
2251 [[items]]
2251 [[items]]
2252 section = "ui"
2252 section = "ui"
2253 name = "debug"
2253 name = "debug"
2254 default = false
2254 default = false
2255
2255
2256 [[items]]
2256 [[items]]
2257 section = "ui"
2257 section = "ui"
2258 name = "debugger"
2258 name = "debugger"
2259
2259
2260 [[items]]
2260 [[items]]
2261 section = "ui"
2261 section = "ui"
2262 name = "detailed-exit-code"
2262 name = "detailed-exit-code"
2263 default = false
2263 default = false
2264 experimental = true
2264 experimental = true
2265
2265
2266 [[items]]
2266 [[items]]
2267 section = "ui"
2267 section = "ui"
2268 name = "editor"
2268 name = "editor"
2269 default-type = "dynamic"
2269 default-type = "dynamic"
2270
2270
2271 [[items]]
2271 [[items]]
2272 section = "ui"
2272 section = "ui"
2273 name = "fallbackencoding"
2273 name = "fallbackencoding"
2274
2274
2275 [[items]]
2275 [[items]]
2276 section = "ui"
2276 section = "ui"
2277 name = "forcecwd"
2277 name = "forcecwd"
2278
2278
2279 [[items]]
2279 [[items]]
2280 section = "ui"
2280 section = "ui"
2281 name = "forcemerge"
2281 name = "forcemerge"
2282
2282
2283 [[items]]
2283 [[items]]
2284 section = "ui"
2284 section = "ui"
2285 name = "formatdebug"
2285 name = "formatdebug"
2286 default = false
2286 default = false
2287
2287
2288 [[items]]
2288 [[items]]
2289 section = "ui"
2289 section = "ui"
2290 name = "formatjson"
2290 name = "formatjson"
2291 default = false
2291 default = false
2292
2292
2293 [[items]]
2293 [[items]]
2294 section = "ui"
2294 section = "ui"
2295 name = "formatted"
2295 name = "formatted"
2296
2296
2297 [[items]]
2297 [[items]]
2298 section = "ui"
2298 section = "ui"
2299 name = "interactive"
2299 name = "interactive"
2300
2300
2301 [[items]]
2301 [[items]]
2302 section = "ui"
2302 section = "ui"
2303 name = "interface"
2303 name = "interface"
2304
2304
2305 [[items]]
2305 [[items]]
2306 section = "ui"
2306 section = "ui"
2307 name = "interface.chunkselector"
2307 name = "interface.chunkselector"
2308
2308
2309 [[items]]
2309 [[items]]
2310 section = "ui"
2310 section = "ui"
2311 name = "large-file-limit"
2311 name = "large-file-limit"
2312 default = 10485760
2312 default = 10485760
2313
2313
2314 [[items]]
2314 [[items]]
2315 section = "ui"
2315 section = "ui"
2316 name = "logblockedtimes"
2316 name = "logblockedtimes"
2317 default = false
2317 default = false
2318
2318
2319 [[items]]
2319 [[items]]
2320 section = "ui"
2320 section = "ui"
2321 name = "merge"
2321 name = "merge"
2322
2322
2323 [[items]]
2323 [[items]]
2324 section = "ui"
2324 section = "ui"
2325 name = "mergemarkers"
2325 name = "mergemarkers"
2326 default = "basic"
2326 default = "basic"
2327
2327
2328 [[items]]
2328 [[items]]
2329 section = "ui"
2329 section = "ui"
2330 name = "message-output"
2330 name = "message-output"
2331 default = "stdio"
2331 default = "stdio"
2332
2332
2333 [[items]]
2333 [[items]]
2334 section = "ui"
2334 section = "ui"
2335 name = "nontty"
2335 name = "nontty"
2336 default = false
2336 default = false
2337
2337
2338 [[items]]
2338 [[items]]
2339 section = "ui"
2339 section = "ui"
2340 name = "origbackuppath"
2340 name = "origbackuppath"
2341
2341
2342 [[items]]
2342 [[items]]
2343 section = "ui"
2343 section = "ui"
2344 name = "paginate"
2344 name = "paginate"
2345 default = true
2345 default = true
2346
2346
2347 [[items]]
2347 [[items]]
2348 section = "ui"
2348 section = "ui"
2349 name = "patch"
2349 name = "patch"
2350
2350
2351 [[items]]
2351 [[items]]
2352 section = "ui"
2352 section = "ui"
2353 name = "portablefilenames"
2353 name = "portablefilenames"
2354 default = "warn"
2354 default = "warn"
2355
2355
2356 [[items]]
2356 [[items]]
2357 section = "ui"
2357 section = "ui"
2358 name = "promptecho"
2358 name = "promptecho"
2359 default = false
2359 default = false
2360
2360
2361 [[items]]
2361 [[items]]
2362 section = "ui"
2362 section = "ui"
2363 name = "quiet"
2363 name = "quiet"
2364 default = false
2364 default = false
2365
2365
2366 [[items]]
2366 [[items]]
2367 section = "ui"
2367 section = "ui"
2368 name = "quietbookmarkmove"
2368 name = "quietbookmarkmove"
2369 default = false
2369 default = false
2370
2370
2371 [[items]]
2371 [[items]]
2372 section = "ui"
2372 section = "ui"
2373 name = "relative-paths"
2373 name = "relative-paths"
2374 default = "legacy"
2374 default = "legacy"
2375
2375
2376 [[items]]
2376 [[items]]
2377 section = "ui"
2377 section = "ui"
2378 name = "remotecmd"
2378 name = "remotecmd"
2379 default = "hg"
2379 default = "hg"
2380
2380
2381 [[items]]
2381 [[items]]
2382 section = "ui"
2382 section = "ui"
2383 name = "report_untrusted"
2383 name = "report_untrusted"
2384 default = true
2384 default = true
2385
2385
2386 [[items]]
2386 [[items]]
2387 section = "ui"
2387 section = "ui"
2388 name = "rollback"
2388 name = "rollback"
2389 default = true
2389 default = true
2390
2390
2391 [[items]]
2391 [[items]]
2392 section = "ui"
2392 section = "ui"
2393 name = "signal-safe-lock"
2393 name = "signal-safe-lock"
2394 default = true
2394 default = true
2395
2395
2396 [[items]]
2396 [[items]]
2397 section = "ui"
2397 section = "ui"
2398 name = "slash"
2398 name = "slash"
2399 default = false
2399 default = false
2400
2400
2401 [[items]]
2401 [[items]]
2402 section = "ui"
2402 section = "ui"
2403 name = "ssh"
2403 name = "ssh"
2404 default = "ssh"
2404 default = "ssh"
2405
2405
2406 [[items]]
2406 [[items]]
2407 section = "ui"
2407 section = "ui"
2408 name = "ssherrorhint"
2408 name = "ssherrorhint"
2409
2409
2410 [[items]]
2410 [[items]]
2411 section = "ui"
2411 section = "ui"
2412 name = "statuscopies"
2412 name = "statuscopies"
2413 default = false
2413 default = false
2414
2414
2415 [[items]]
2415 [[items]]
2416 section = "ui"
2416 section = "ui"
2417 name = "strict"
2417 name = "strict"
2418 default = false
2418 default = false
2419
2419
2420 [[items]]
2420 [[items]]
2421 section = "ui"
2421 section = "ui"
2422 name = "style"
2422 name = "style"
2423 default = ""
2423 default = ""
2424
2424
2425 [[items]]
2425 [[items]]
2426 section = "ui"
2426 section = "ui"
2427 name = "supportcontact"
2427 name = "supportcontact"
2428
2428
2429 [[items]]
2429 [[items]]
2430 section = "ui"
2430 section = "ui"
2431 name = "textwidth"
2431 name = "textwidth"
2432 default = 78
2432 default = 78
2433
2433
2434 [[items]]
2434 [[items]]
2435 section = "ui"
2435 section = "ui"
2436 name = "timeout"
2436 name = "timeout"
2437 default = "600"
2437 default = "600"
2438
2438
2439 [[items]]
2439 [[items]]
2440 section = "ui"
2440 section = "ui"
2441 name = "timeout.warn"
2441 name = "timeout.warn"
2442 default = 0
2442 default = 0
2443
2443
2444 [[items]]
2444 [[items]]
2445 section = "ui"
2445 section = "ui"
2446 name = "timestamp-output"
2446 name = "timestamp-output"
2447 default = false
2447 default = false
2448
2448
2449 [[items]]
2449 [[items]]
2450 section = "ui"
2450 section = "ui"
2451 name = "traceback"
2451 name = "traceback"
2452 default = false
2452 default = false
2453
2453
2454 [[items]]
2454 [[items]]
2455 section = "ui"
2455 section = "ui"
2456 name = "tweakdefaults"
2456 name = "tweakdefaults"
2457 default = false
2457 default = false
2458
2458
2459 [[items]]
2459 [[items]]
2460 section = "ui"
2460 section = "ui"
2461 name = "username"
2461 name = "username"
2462 alias = [["ui", "user"]]
2462 alias = [["ui", "user"]]
2463
2463
2464 [[items]]
2464 [[items]]
2465 section = "ui"
2465 section = "ui"
2466 name = "verbose"
2466 name = "verbose"
2467 default = false
2467 default = false
2468
2468
2469 [[items]]
2469 [[items]]
2470 section = "verify"
2470 section = "verify"
2471 name = "skipflags"
2471 name = "skipflags"
2472 default = 0
2472 default = 0
2473
2473
2474 [[items]]
2474 [[items]]
2475 section = "web"
2475 section = "web"
2476 name = "accesslog"
2476 name = "accesslog"
2477 default = "-"
2477 default = "-"
2478
2478
2479 [[items]]
2479 [[items]]
2480 section = "web"
2480 section = "web"
2481 name = "address"
2481 name = "address"
2482 default = ""
2482 default = ""
2483
2483
2484 [[items]]
2484 [[items]]
2485 section = "web"
2485 section = "web"
2486 name = "allow-archive"
2486 name = "allow-archive"
2487 default-type = "list_type"
2487 default-type = "list_type"
2488 alias = [["web", "allow_archive"]]
2488 alias = [["web", "allow_archive"]]
2489
2489
2490 [[items]]
2490 [[items]]
2491 section = "web"
2491 section = "web"
2492 name = "allow-pull"
2492 name = "allow-pull"
2493 default = true
2493 default = true
2494 alias = [["web", "allowpull"]]
2494 alias = [["web", "allowpull"]]
2495
2495
2496 [[items]]
2496 [[items]]
2497 section = "web"
2497 section = "web"
2498 name = "allow-push"
2498 name = "allow-push"
2499 default-type = "list_type"
2499 default-type = "list_type"
2500 alias = [["web", "allow_push"]]
2500 alias = [["web", "allow_push"]]
2501
2501
2502 [[items]]
2502 [[items]]
2503 section = "web"
2503 section = "web"
2504 name = "allow_read"
2504 name = "allow_read"
2505 default-type = "list_type"
2505 default-type = "list_type"
2506
2506
2507 [[items]]
2507 [[items]]
2508 section = "web"
2508 section = "web"
2509 name = "allowbz2"
2509 name = "allowbz2"
2510 default = false
2510 default = false
2511
2511
2512 [[items]]
2512 [[items]]
2513 section = "web"
2513 section = "web"
2514 name = "allowgz"
2514 name = "allowgz"
2515 default = false
2515 default = false
2516
2516
2517 [[items]]
2517 [[items]]
2518 section = "web"
2518 section = "web"
2519 name = "allowzip"
2519 name = "allowzip"
2520 default = false
2520 default = false
2521
2521
2522 [[items]]
2522 [[items]]
2523 section = "web"
2523 section = "web"
2524 name = "archivesubrepos"
2524 name = "archivesubrepos"
2525 default = false
2525 default = false
2526
2526
2527 [[items]]
2527 [[items]]
2528 section = "web"
2528 section = "web"
2529 name = "baseurl"
2529 name = "baseurl"
2530
2530
2531 [[items]]
2531 [[items]]
2532 section = "web"
2532 section = "web"
2533 name = "cacerts"
2533 name = "cacerts"
2534
2534
2535 [[items]]
2535 [[items]]
2536 section = "web"
2536 section = "web"
2537 name = "cache"
2537 name = "cache"
2538 default = true
2538 default = true
2539
2539
2540 [[items]]
2540 [[items]]
2541 section = "web"
2541 section = "web"
2542 name = "certificate"
2542 name = "certificate"
2543
2543
2544 [[items]]
2544 [[items]]
2545 section = "web"
2545 section = "web"
2546 name = "collapse"
2546 name = "collapse"
2547 default = false
2547 default = false
2548
2548
2549 [[items]]
2549 [[items]]
2550 section = "web"
2550 section = "web"
2551 name = "comparisoncontext"
2551 name = "comparisoncontext"
2552 default = 5
2552 default = 5
2553
2553
2554 [[items]]
2554 [[items]]
2555 section = "web"
2555 section = "web"
2556 name = "contact"
2556 name = "contact"
2557
2557
2558 [[items]]
2558 [[items]]
2559 section = "web"
2559 section = "web"
2560 name = "csp"
2560 name = "csp"
2561
2561
2562 [[items]]
2562 [[items]]
2563 section = "web"
2563 section = "web"
2564 name = "deny_push"
2564 name = "deny_push"
2565 default-type = "list_type"
2565 default-type = "list_type"
2566
2566
2567 [[items]]
2567 [[items]]
2568 section = "web"
2568 section = "web"
2569 name = "deny_read"
2569 name = "deny_read"
2570 default-type = "list_type"
2570 default-type = "list_type"
2571
2571
2572 [[items]]
2572 [[items]]
2573 section = "web"
2573 section = "web"
2574 name = "descend"
2574 name = "descend"
2575 default = true
2575 default = true
2576
2576
2577 [[items]]
2577 [[items]]
2578 section = "web"
2578 section = "web"
2579 name = "description"
2579 name = "description"
2580 default = ""
2580 default = ""
2581
2581
2582 [[items]]
2582 [[items]]
2583 section = "web"
2583 section = "web"
2584 name = "encoding"
2584 name = "encoding"
2585 default-type = "lazy_module"
2585 default-type = "lazy_module"
2586 default = "encoding.encoding"
2586 default = "encoding.encoding"
2587
2587
2588 [[items]]
2588 [[items]]
2589 section = "web"
2589 section = "web"
2590 name = "errorlog"
2590 name = "errorlog"
2591 default = "-"
2591 default = "-"
2592
2592
2593 [[items]]
2593 [[items]]
2594 section = "web"
2594 section = "web"
2595 name = "guessmime"
2595 name = "guessmime"
2596 default = false
2596 default = false
2597
2597
2598 [[items]]
2598 [[items]]
2599 section = "web"
2599 section = "web"
2600 name = "hidden"
2600 name = "hidden"
2601 default = false
2601 default = false
2602
2602
2603 [[items]]
2603 [[items]]
2604 section = "web"
2604 section = "web"
2605 name = "ipv6"
2605 name = "ipv6"
2606 default = false
2606 default = false
2607
2607
2608 [[items]]
2608 [[items]]
2609 section = "web"
2609 section = "web"
2610 name = "labels"
2610 name = "labels"
2611 default-type = "list_type"
2611 default-type = "list_type"
2612
2612
2613 [[items]]
2613 [[items]]
2614 section = "web"
2614 section = "web"
2615 name = "logoimg"
2615 name = "logoimg"
2616 default = "hglogo.png"
2616 default = "hglogo.png"
2617
2617
2618 [[items]]
2618 [[items]]
2619 section = "web"
2619 section = "web"
2620 name = "logourl"
2620 name = "logourl"
2621 default = "https://mercurial-scm.org/"
2621 default = "https://mercurial-scm.org/"
2622
2622
2623 [[items]]
2623 [[items]]
2624 section = "web"
2624 section = "web"
2625 name = "maxchanges"
2625 name = "maxchanges"
2626 default = 10
2626 default = 10
2627
2627
2628 [[items]]
2628 [[items]]
2629 section = "web"
2629 section = "web"
2630 name = "maxfiles"
2630 name = "maxfiles"
2631 default = 10
2631 default = 10
2632
2632
2633 [[items]]
2633 [[items]]
2634 section = "web"
2634 section = "web"
2635 name = "maxshortchanges"
2635 name = "maxshortchanges"
2636 default = 60
2636 default = 60
2637
2637
2638 [[items]]
2638 [[items]]
2639 section = "web"
2639 section = "web"
2640 name = "motd"
2640 name = "motd"
2641 default = ""
2641 default = ""
2642
2642
2643 [[items]]
2643 [[items]]
2644 section = "web"
2644 section = "web"
2645 name = "name"
2645 name = "name"
2646 default-type = "dynamic"
2646 default-type = "dynamic"
2647
2647
2648 [[items]]
2648 [[items]]
2649 section = "web"
2649 section = "web"
2650 name = "port"
2650 name = "port"
2651 default = 8000
2651 default = 8000
2652
2652
2653 [[items]]
2653 [[items]]
2654 section = "web"
2654 section = "web"
2655 name = "prefix"
2655 name = "prefix"
2656 default = ""
2656 default = ""
2657
2657
2658 [[items]]
2658 [[items]]
2659 section = "web"
2659 section = "web"
2660 name = "push_ssl"
2660 name = "push_ssl"
2661 default = true
2661 default = true
2662
2662
2663 [[items]]
2663 [[items]]
2664 section = "web"
2664 section = "web"
2665 name = "refreshinterval"
2665 name = "refreshinterval"
2666 default = 20
2666 default = 20
2667
2667
2668 [[items]]
2668 [[items]]
2669 section = "web"
2669 section = "web"
2670 name = "server-header"
2670 name = "server-header"
2671
2671
2672 [[items]]
2672 [[items]]
2673 section = "web"
2673 section = "web"
2674 name = "static"
2674 name = "static"
2675
2675
2676 [[items]]
2676 [[items]]
2677 section = "web"
2677 section = "web"
2678 name = "staticurl"
2678 name = "staticurl"
2679
2679
2680 [[items]]
2680 [[items]]
2681 section = "web"
2681 section = "web"
2682 name = "stripes"
2682 name = "stripes"
2683 default = 1
2683 default = 1
2684
2684
2685 [[items]]
2685 [[items]]
2686 section = "web"
2686 section = "web"
2687 name = "style"
2687 name = "style"
2688 default = "paper"
2688 default = "paper"
2689
2689
2690 [[items]]
2690 [[items]]
2691 section = "web"
2691 section = "web"
2692 name = "templates"
2692 name = "templates"
2693
2693
2694 [[items]]
2694 [[items]]
2695 section = "web"
2695 section = "web"
2696 name = "view"
2696 name = "view"
2697 default = "served"
2697 default = "served"
2698 experimental = true
2698 experimental = true
2699
2699
2700 [[items]]
2700 [[items]]
2701 section = "worker"
2701 section = "worker"
2702 name = "backgroundclose"
2702 name = "backgroundclose"
2703 default-type = "dynamic"
2703 default-type = "dynamic"
2704
2704
2705 [[items]]
2705 [[items]]
2706 section = "worker"
2706 section = "worker"
2707 name = "backgroundclosemaxqueue"
2707 name = "backgroundclosemaxqueue"
2708 # Windows defaults to a limit of 512 open files. A buffer of 128
2708 # Windows defaults to a limit of 512 open files. A buffer of 128
2709 # should give us enough headway.
2709 # should give us enough headway.
2710 default = 384
2710 default = 384
2711
2711
2712 [[items]]
2712 [[items]]
2713 section = "worker"
2713 section = "worker"
2714 name = "backgroundcloseminfilecount"
2714 name = "backgroundcloseminfilecount"
2715 default = 2048
2715 default = 2048
2716
2716
2717 [[items]]
2717 [[items]]
2718 section = "worker"
2718 section = "worker"
2719 name = "backgroundclosethreadcount"
2719 name = "backgroundclosethreadcount"
2720 default = 4
2720 default = 4
2721
2721
2722 [[items]]
2722 [[items]]
2723 section = "worker"
2723 section = "worker"
2724 name = "enabled"
2724 name = "enabled"
2725 default = true
2725 default = true
2726
2726
2727 [[items]]
2727 [[items]]
2728 section = "worker"
2728 section = "worker"
2729 name = "numcpus"
2729 name = "numcpus"
2730
2730
2731 # Templates and template applications
2731 # Templates and template applications
2732
2732
2733 [[template-applications]]
2733 [[template-applications]]
2734 template = "diff-options"
2734 template = "diff-options"
2735 section = "annotate"
2735 section = "annotate"
2736
2736
2737 [[template-applications]]
2737 [[template-applications]]
2738 template = "diff-options"
2738 template = "diff-options"
2739 section = "commands"
2739 section = "commands"
2740 prefix = "commit.interactive"
2740 prefix = "commit.interactive"
2741
2741
2742 [[template-applications]]
2742 [[template-applications]]
2743 template = "diff-options"
2743 template = "diff-options"
2744 section = "commands"
2744 section = "commands"
2745 prefix = "revert.interactive"
2745 prefix = "revert.interactive"
2746
2746
2747 [[template-applications]]
2747 [[template-applications]]
2748 template = "diff-options"
2748 template = "diff-options"
2749 section = "diff"
2749 section = "diff"
2750
2750
2751 [templates]
2751 [templates]
2752 [[templates.diff-options]]
2752 [[templates.diff-options]]
2753 suffix = "nodates"
2753 suffix = "nodates"
2754 default = false
2754 default = false
2755
2755
2756 [[templates.diff-options]]
2756 [[templates.diff-options]]
2757 suffix = "showfunc"
2757 suffix = "showfunc"
2758 default = false
2758 default = false
2759
2759
2760 [[templates.diff-options]]
2760 [[templates.diff-options]]
2761 suffix = "unified"
2761 suffix = "unified"
2762
2762
2763 [[templates.diff-options]]
2763 [[templates.diff-options]]
2764 suffix = "git"
2764 suffix = "git"
2765 default = false
2765 default = false
2766
2766
2767 [[templates.diff-options]]
2767 [[templates.diff-options]]
2768 suffix = "ignorews"
2768 suffix = "ignorews"
2769 default = false
2769 default = false
2770
2770
2771 [[templates.diff-options]]
2771 [[templates.diff-options]]
2772 suffix = "ignorewsamount"
2772 suffix = "ignorewsamount"
2773 default = false
2773 default = false
2774
2774
2775 [[templates.diff-options]]
2775 [[templates.diff-options]]
2776 suffix = "ignoreblanklines"
2776 suffix = "ignoreblanklines"
2777 default = false
2777 default = false
2778
2778
2779 [[templates.diff-options]]
2779 [[templates.diff-options]]
2780 suffix = "ignorewseol"
2780 suffix = "ignorewseol"
2781 default = false
2781 default = false
2782
2782
2783 [[templates.diff-options]]
2783 [[templates.diff-options]]
2784 suffix = "nobinary"
2784 suffix = "nobinary"
2785 default = false
2785 default = false
2786
2786
2787 [[templates.diff-options]]
2787 [[templates.diff-options]]
2788 suffix = "noprefix"
2788 suffix = "noprefix"
2789 default = false
2789 default = false
2790
2790
2791 [[templates.diff-options]]
2791 [[templates.diff-options]]
2792 suffix = "word-diff"
2792 suffix = "word-diff"
2793 default = false
2793 default = false
2794
2794
2795 # In-core extensions
2795 # In-core extensions
2796
2796
2797 [[items]]
2797 [[items]]
2798 section = "blackbox"
2798 section = "blackbox"
2799 name = "debug.to-stderr"
2800 default = false
2801 in_core_extension = "blackbox"
2802
2803 [[items]]
2804 section = "blackbox"
2799 name = "dirty"
2805 name = "dirty"
2800 default = false
2806 default = false
2801 in_core_extension = "blackbox"
2807 in_core_extension = "blackbox"
2802
2808
2803 [[items]]
2809 [[items]]
2804 section = "blackbox"
2810 section = "blackbox"
2805 name = "maxsize"
2811 name = "maxsize"
2806 default = "1 MB"
2812 default = "1 MB"
2807 in_core_extension = "blackbox"
2813 in_core_extension = "blackbox"
2808
2814
2809 [[items]]
2815 [[items]]
2810 section = "blackbox"
2816 section = "blackbox"
2811 name = "logsource"
2817 name = "logsource"
2812 default = false
2818 default = false
2813 in_core_extension = "blackbox"
2819 in_core_extension = "blackbox"
2814
2820
2815 [[items]]
2821 [[items]]
2816 section = "blackbox"
2822 section = "blackbox"
2817 name = "maxfiles"
2823 name = "maxfiles"
2818 default = 7
2824 default = 7
2819 in_core_extension = "blackbox"
2825 in_core_extension = "blackbox"
2820
2826
2821 [[items]]
2827 [[items]]
2822 section = "blackbox"
2828 section = "blackbox"
2823 name = "track"
2829 name = "track"
2824 default-type = "lambda"
2830 default-type = "lambda"
2825 default = ["*"]
2831 default = ["*"]
2826 in_core_extension = "blackbox"
2832 in_core_extension = "blackbox"
2827
2833
2828 [[items]]
2834 [[items]]
2829 section = "blackbox"
2835 section = "blackbox"
2830 name = "ignore"
2836 name = "ignore"
2831 default-type = "lambda"
2837 default-type = "lambda"
2832 default = ["chgserver", "cmdserver", "extension"]
2838 default = ["chgserver", "cmdserver", "extension"]
2833 in_core_extension = "blackbox"
2839 in_core_extension = "blackbox"
2834
2840
2835 [[items]]
2841 [[items]]
2836 section = "blackbox"
2842 section = "blackbox"
2837 name = "date-format"
2843 name = "date-format"
2838 default = ""
2844 default = ""
2839 in_core_extension = "blackbox"
2845 in_core_extension = "blackbox"
@@ -1,669 +1,670 b''
1 # httppeer.py - HTTP repository proxy classes for mercurial
1 # httppeer.py - HTTP repository proxy classes for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import errno
10 import errno
11 import io
11 import io
12 import os
12 import os
13 import socket
13 import socket
14 import struct
14 import struct
15
15
16 from concurrent import futures
16 from concurrent import futures
17 from .i18n import _
17 from .i18n import _
18 from . import (
18 from . import (
19 bundle2,
19 bundle2,
20 error,
20 error,
21 httpconnection,
21 httpconnection,
22 pycompat,
22 pycompat,
23 statichttprepo,
23 statichttprepo,
24 url as urlmod,
24 url as urlmod,
25 util,
25 util,
26 wireprotov1peer,
26 wireprotov1peer,
27 )
27 )
28 from .utils import urlutil
28 from .utils import urlutil
29
29
30 httplib = util.httplib
30 httplib = util.httplib
31 urlerr = util.urlerr
31 urlerr = util.urlerr
32 urlreq = util.urlreq
32 urlreq = util.urlreq
33
33
34
34
35 def encodevalueinheaders(value, header, limit):
35 def encodevalueinheaders(value, header, limit):
36 """Encode a string value into multiple HTTP headers.
36 """Encode a string value into multiple HTTP headers.
37
37
38 ``value`` will be encoded into 1 or more HTTP headers with the names
38 ``value`` will be encoded into 1 or more HTTP headers with the names
39 ``header-<N>`` where ``<N>`` is an integer starting at 1. Each header
39 ``header-<N>`` where ``<N>`` is an integer starting at 1. Each header
40 name + value will be at most ``limit`` bytes long.
40 name + value will be at most ``limit`` bytes long.
41
41
42 Returns an iterable of 2-tuples consisting of header names and
42 Returns an iterable of 2-tuples consisting of header names and
43 values as native strings.
43 values as native strings.
44 """
44 """
45 # HTTP Headers are ASCII. Python 3 requires them to be unicodes,
45 # HTTP Headers are ASCII. Python 3 requires them to be unicodes,
46 # not bytes. This function always takes bytes in as arguments.
46 # not bytes. This function always takes bytes in as arguments.
47 fmt = pycompat.strurl(header) + r'-%s'
47 fmt = pycompat.strurl(header) + r'-%s'
48 # Note: it is *NOT* a bug that the last bit here is a bytestring
48 # Note: it is *NOT* a bug that the last bit here is a bytestring
49 # and not a unicode: we're just getting the encoded length anyway,
49 # and not a unicode: we're just getting the encoded length anyway,
50 # and using an r-string to make it portable between Python 2 and 3
50 # and using an r-string to make it portable between Python 2 and 3
51 # doesn't work because then the \r is a literal backslash-r
51 # doesn't work because then the \r is a literal backslash-r
52 # instead of a carriage return.
52 # instead of a carriage return.
53 valuelen = limit - len(fmt % '000') - len(b': \r\n')
53 valuelen = limit - len(fmt % '000') - len(b': \r\n')
54 result = []
54 result = []
55
55
56 n = 0
56 n = 0
57 for i in range(0, len(value), valuelen):
57 for i in range(0, len(value), valuelen):
58 n += 1
58 n += 1
59 result.append((fmt % str(n), pycompat.strurl(value[i : i + valuelen])))
59 result.append((fmt % str(n), pycompat.strurl(value[i : i + valuelen])))
60
60
61 return result
61 return result
62
62
63
63
64 class _multifile:
64 class _multifile:
65 def __init__(self, *fileobjs):
65 def __init__(self, *fileobjs):
66 for f in fileobjs:
66 for f in fileobjs:
67 if not hasattr(f, 'length'):
67 if not hasattr(f, 'length'):
68 raise ValueError(
68 raise ValueError(
69 b'_multifile only supports file objects that '
69 b'_multifile only supports file objects that '
70 b'have a length but this one does not:',
70 b'have a length but this one does not:',
71 type(f),
71 type(f),
72 f,
72 f,
73 )
73 )
74 self._fileobjs = fileobjs
74 self._fileobjs = fileobjs
75 self._index = 0
75 self._index = 0
76
76
77 @property
77 @property
78 def length(self):
78 def length(self):
79 return sum(f.length for f in self._fileobjs)
79 return sum(f.length for f in self._fileobjs)
80
80
81 def read(self, amt=None):
81 def read(self, amt=None):
82 if amt <= 0:
82 if amt <= 0:
83 return b''.join(f.read() for f in self._fileobjs)
83 return b''.join(f.read() for f in self._fileobjs)
84 parts = []
84 parts = []
85 while amt and self._index < len(self._fileobjs):
85 while amt and self._index < len(self._fileobjs):
86 parts.append(self._fileobjs[self._index].read(amt))
86 parts.append(self._fileobjs[self._index].read(amt))
87 got = len(parts[-1])
87 got = len(parts[-1])
88 if got < amt:
88 if got < amt:
89 self._index += 1
89 self._index += 1
90 amt -= got
90 amt -= got
91 return b''.join(parts)
91 return b''.join(parts)
92
92
93 def seek(self, offset, whence=os.SEEK_SET):
93 def seek(self, offset, whence=os.SEEK_SET):
94 if whence != os.SEEK_SET:
94 if whence != os.SEEK_SET:
95 raise NotImplementedError(
95 raise NotImplementedError(
96 b'_multifile does not support anything other'
96 b'_multifile does not support anything other'
97 b' than os.SEEK_SET for whence on seek()'
97 b' than os.SEEK_SET for whence on seek()'
98 )
98 )
99 if offset != 0:
99 if offset != 0:
100 raise NotImplementedError(
100 raise NotImplementedError(
101 b'_multifile only supports seeking to start, but that '
101 b'_multifile only supports seeking to start, but that '
102 b'could be fixed if you need it'
102 b'could be fixed if you need it'
103 )
103 )
104 for f in self._fileobjs:
104 for f in self._fileobjs:
105 f.seek(0)
105 f.seek(0)
106 self._index = 0
106 self._index = 0
107
107
108
108
109 def makev1commandrequest(
109 def makev1commandrequest(
110 ui,
110 ui,
111 requestbuilder,
111 requestbuilder,
112 caps,
112 caps,
113 capablefn,
113 capablefn,
114 repobaseurl,
114 repobaseurl,
115 cmd,
115 cmd,
116 args,
116 args,
117 remotehidden=False,
117 remotehidden=False,
118 ):
118 ):
119 """Make an HTTP request to run a command for a version 1 client.
119 """Make an HTTP request to run a command for a version 1 client.
120
120
121 ``caps`` is a set of known server capabilities. The value may be
121 ``caps`` is a set of known server capabilities. The value may be
122 None if capabilities are not yet known.
122 None if capabilities are not yet known.
123
123
124 ``capablefn`` is a function to evaluate a capability.
124 ``capablefn`` is a function to evaluate a capability.
125
125
126 ``cmd``, ``args``, and ``data`` define the command, its arguments, and
126 ``cmd``, ``args``, and ``data`` define the command, its arguments, and
127 raw data to pass to it.
127 raw data to pass to it.
128 """
128 """
129 if cmd == b'pushkey':
129 if cmd == b'pushkey':
130 args[b'data'] = b''
130 args[b'data'] = b''
131 data = args.pop(b'data', None)
131 data = args.pop(b'data', None)
132 headers = args.pop(b'headers', {})
132 headers = args.pop(b'headers', {})
133
133
134 ui.debug(b"sending %s command\n" % cmd)
134 ui.debug(b"sending %s command\n" % cmd)
135 q = [(b'cmd', cmd)]
135 q = [(b'cmd', cmd)]
136 if remotehidden:
136 if remotehidden:
137 q.append(('access-hidden', '1'))
137 q.append(('access-hidden', '1'))
138 headersize = 0
138 headersize = 0
139 # Important: don't use self.capable() here or else you end up
139 # Important: don't use self.capable() here or else you end up
140 # with infinite recursion when trying to look up capabilities
140 # with infinite recursion when trying to look up capabilities
141 # for the first time.
141 # for the first time.
142 postargsok = caps is not None and b'httppostargs' in caps
142 postargsok = caps is not None and b'httppostargs' in caps
143
143
144 # Send arguments via POST.
144 # Send arguments via POST.
145 if postargsok and args:
145 if postargsok and args:
146 strargs = urlreq.urlencode(sorted(args.items()))
146 strargs = urlreq.urlencode(sorted(args.items()))
147 if not data:
147 if not data:
148 data = strargs
148 data = strargs
149 else:
149 else:
150 if isinstance(data, bytes):
150 if isinstance(data, bytes):
151 i = io.BytesIO(data)
151 i = io.BytesIO(data)
152 i.length = len(data)
152 i.length = len(data)
153 data = i
153 data = i
154 argsio = io.BytesIO(strargs)
154 argsio = io.BytesIO(strargs)
155 argsio.length = len(strargs)
155 argsio.length = len(strargs)
156 data = _multifile(argsio, data)
156 data = _multifile(argsio, data)
157 headers['X-HgArgs-Post'] = len(strargs)
157 headers['X-HgArgs-Post'] = len(strargs)
158 elif args:
158 elif args:
159 # Calling self.capable() can infinite loop if we are calling
159 # Calling self.capable() can infinite loop if we are calling
160 # "capabilities". But that command should never accept wire
160 # "capabilities". But that command should never accept wire
161 # protocol arguments. So this should never happen.
161 # protocol arguments. So this should never happen.
162 assert cmd != b'capabilities'
162 assert cmd != b'capabilities'
163 httpheader = capablefn(b'httpheader')
163 httpheader = capablefn(b'httpheader')
164 if httpheader:
164 if httpheader:
165 headersize = int(httpheader.split(b',', 1)[0])
165 headersize = int(httpheader.split(b',', 1)[0])
166
166
167 # Send arguments via HTTP headers.
167 # Send arguments via HTTP headers.
168 if headersize > 0:
168 if headersize > 0:
169 # The headers can typically carry more data than the URL.
169 # The headers can typically carry more data than the URL.
170 encoded_args = urlreq.urlencode(sorted(args.items()))
170 encoded_args = urlreq.urlencode(sorted(args.items()))
171 for header, value in encodevalueinheaders(
171 for header, value in encodevalueinheaders(
172 encoded_args, b'X-HgArg', headersize
172 encoded_args, b'X-HgArg', headersize
173 ):
173 ):
174 headers[header] = value
174 headers[header] = value
175 # Send arguments via query string (Mercurial <1.9).
175 # Send arguments via query string (Mercurial <1.9).
176 else:
176 else:
177 q += sorted(args.items())
177 q += sorted(args.items())
178
178
179 qs = b'?%s' % urlreq.urlencode(q)
179 qs = b'?%s' % urlreq.urlencode(q)
180 cu = b"%s%s" % (repobaseurl, qs)
180 cu = b"%s%s" % (repobaseurl, qs)
181 size = 0
181 size = 0
182 if hasattr(data, 'length'):
182 if hasattr(data, 'length'):
183 size = data.length
183 size = data.length
184 elif data is not None:
184 elif data is not None:
185 size = len(data)
185 size = len(data)
186 if data is not None and 'Content-Type' not in headers:
186 if data is not None and 'Content-Type' not in headers:
187 headers['Content-Type'] = 'application/mercurial-0.1'
187 headers['Content-Type'] = 'application/mercurial-0.1'
188
188
189 # Tell the server we accept application/mercurial-0.2 and multiple
189 # Tell the server we accept application/mercurial-0.2 and multiple
190 # compression formats if the server is capable of emitting those
190 # compression formats if the server is capable of emitting those
191 # payloads.
191 # payloads.
192 # Note: Keep this set empty by default, as client advertisement of
192 # Note: Keep this set empty by default, as client advertisement of
193 # protocol parameters should only occur after the handshake.
193 # protocol parameters should only occur after the handshake.
194 protoparams = set()
194 protoparams = set()
195
195
196 mediatypes = set()
196 mediatypes = set()
197 if caps is not None:
197 if caps is not None:
198 mt = capablefn(b'httpmediatype')
198 mt = capablefn(b'httpmediatype')
199 if mt:
199 if mt:
200 protoparams.add(b'0.1')
200 protoparams.add(b'0.1')
201 mediatypes = set(mt.split(b','))
201 mediatypes = set(mt.split(b','))
202
202
203 protoparams.add(b'partial-pull')
203 protoparams.add(b'partial-pull')
204
204
205 if b'0.2tx' in mediatypes:
205 if b'0.2tx' in mediatypes:
206 protoparams.add(b'0.2')
206 protoparams.add(b'0.2')
207
207
208 if b'0.2tx' in mediatypes and capablefn(b'compression'):
208 if b'0.2tx' in mediatypes and capablefn(b'compression'):
209 # We /could/ compare supported compression formats and prune
209 # We /could/ compare supported compression formats and prune
210 # non-mutually supported or error if nothing is mutually supported.
210 # non-mutually supported or error if nothing is mutually supported.
211 # For now, send the full list to the server and have it error.
211 # For now, send the full list to the server and have it error.
212 comps = [
212 comps = [
213 e.wireprotosupport().name
213 e.wireprotosupport().name
214 for e in util.compengines.supportedwireengines(util.CLIENTROLE)
214 for e in util.compengines.supportedwireengines(util.CLIENTROLE)
215 ]
215 ]
216 protoparams.add(b'comp=%s' % b','.join(comps))
216 protoparams.add(b'comp=%s' % b','.join(comps))
217
217
218 if protoparams:
218 if protoparams:
219 protoheaders = encodevalueinheaders(
219 protoheaders = encodevalueinheaders(
220 b' '.join(sorted(protoparams)), b'X-HgProto', headersize or 1024
220 b' '.join(sorted(protoparams)), b'X-HgProto', headersize or 1024
221 )
221 )
222 for header, value in protoheaders:
222 for header, value in protoheaders:
223 headers[header] = value
223 headers[header] = value
224
224
225 varyheaders = []
225 varyheaders = []
226 for header in headers:
226 for header in headers:
227 if header.lower().startswith('x-hg'):
227 if header.lower().startswith('x-hg'):
228 varyheaders.append(header)
228 varyheaders.append(header)
229
229
230 if varyheaders:
230 if varyheaders:
231 headers['Vary'] = ','.join(sorted(varyheaders))
231 headers['Vary'] = ','.join(sorted(varyheaders))
232
232
233 req = requestbuilder(pycompat.strurl(cu), data, headers)
233 req = requestbuilder(pycompat.strurl(cu), data, headers)
234
234
235 if data is not None:
235 if data is not None:
236 ui.debug(b"sending %d bytes\n" % size)
236 ui.debug(b"sending %d bytes\n" % size)
237 req.add_unredirected_header('Content-Length', '%d' % size)
237 req.add_unredirected_header('Content-Length', '%d' % size)
238
238
239 return req, cu, qs
239 return req, cu, qs
240
240
241
241
242 def sendrequest(ui, opener, req):
242 def sendrequest(ui, opener, req):
243 """Send a prepared HTTP request.
243 """Send a prepared HTTP request.
244
244
245 Returns the response object.
245 Returns the response object.
246 """
246 """
247 dbg = ui.debug
247 dbg = ui.debug
248 if ui.debugflag and ui.configbool(b'devel', b'debug.peer-request'):
248 if ui.debugflag and ui.configbool(b'devel', b'debug.peer-request'):
249 line = b'devel-peer-request: %s\n'
249 line = b'devel-peer-request: %s\n'
250 dbg(
250 dbg(
251 line
251 line
252 % b'%s %s'
252 % b'%s %s'
253 % (
253 % (
254 pycompat.bytesurl(req.get_method()),
254 pycompat.bytesurl(req.get_method()),
255 pycompat.bytesurl(req.get_full_url()),
255 pycompat.bytesurl(req.get_full_url()),
256 )
256 )
257 )
257 )
258 hgargssize = None
258 hgargssize = None
259
259
260 for header, value in sorted(req.header_items()):
260 for header, value in sorted(req.header_items()):
261 header = pycompat.bytesurl(header)
261 header = pycompat.bytesurl(header)
262 value = pycompat.bytesurl(value)
262 value = pycompat.bytesurl(value)
263 if header.startswith(b'X-hgarg-'):
263 if header.startswith(b'X-hgarg-'):
264 if hgargssize is None:
264 if hgargssize is None:
265 hgargssize = 0
265 hgargssize = 0
266 hgargssize += len(value)
266 hgargssize += len(value)
267 else:
267 else:
268 dbg(line % b' %s %s' % (header, value))
268 dbg(line % b' %s %s' % (header, value))
269
269
270 if hgargssize is not None:
270 if hgargssize is not None:
271 dbg(
271 dbg(
272 line
272 line
273 % b' %d bytes of commands arguments in headers'
273 % b' %d bytes of commands arguments in headers'
274 % hgargssize
274 % hgargssize
275 )
275 )
276 data = req.data
276 data = req.data
277 if data is not None:
277 if data is not None:
278 length = getattr(data, 'length', None)
278 length = getattr(data, 'length', None)
279 if length is None:
279 if length is None:
280 length = len(data)
280 length = len(data)
281 dbg(line % b' %d bytes of data' % length)
281 dbg(line % b' %d bytes of data' % length)
282
282
283 start = util.timer()
283 start = util.timer()
284
284
285 res = None
285 res = None
286 try:
286 try:
287 res = opener.open(req)
287 res = opener.open(req)
288 except urlerr.httperror as inst:
288 except urlerr.httperror as inst:
289 if inst.code == 401:
289 if inst.code == 401:
290 raise error.Abort(_(b'authorization failed'))
290 raise error.Abort(_(b'authorization failed'))
291 raise
291 raise
292 except httplib.HTTPException as inst:
292 except httplib.HTTPException as inst:
293 ui.debug(
293 ui.debug(
294 b'http error requesting %s\n'
294 b'http error requesting %s\n'
295 % urlutil.hidepassword(req.get_full_url())
295 % urlutil.hidepassword(req.get_full_url())
296 )
296 )
297 ui.traceback()
297 ui.traceback()
298 raise IOError(None, inst)
298 raise IOError(None, inst)
299 finally:
299 finally:
300 if ui.debugflag and ui.configbool(b'devel', b'debug.peer-request'):
300 if ui.debugflag and ui.configbool(b'devel', b'debug.peer-request'):
301 code = res.code if res else -1
301 code = res.code if res else -1
302 dbg(
302 dbg(
303 line
303 line
304 % b' finished in %.4f seconds (%d)'
304 % b' finished in %.4f seconds (%d)'
305 % (util.timer() - start, code)
305 % (util.timer() - start, code)
306 )
306 )
307
307
308 # Insert error handlers for common I/O failures.
308 # Insert error handlers for common I/O failures.
309 urlmod.wrapresponse(res)
309 urlmod.wrapresponse(res)
310
310
311 return res
311 return res
312
312
313
313
314 class RedirectedRepoError(error.RepoError):
314 class RedirectedRepoError(error.RepoError):
315 def __init__(self, msg, respurl):
315 def __init__(self, msg, respurl):
316 super(RedirectedRepoError, self).__init__(msg)
316 super(RedirectedRepoError, self).__init__(msg)
317 self.respurl = respurl
317 self.respurl = respurl
318
318
319
319
320 def parsev1commandresponse(ui, baseurl, requrl, qs, resp, compressible):
320 def parsev1commandresponse(ui, baseurl, requrl, qs, resp, compressible):
321 # record the url we got redirected to
321 # record the url we got redirected to
322 redirected = False
322 redirected = False
323 respurl = pycompat.bytesurl(resp.geturl())
323 respurl = pycompat.bytesurl(resp.geturl())
324 if respurl.endswith(qs):
324 if respurl.endswith(qs):
325 respurl = respurl[: -len(qs)]
325 respurl = respurl[: -len(qs)]
326 qsdropped = False
326 qsdropped = False
327 else:
327 else:
328 qsdropped = True
328 qsdropped = True
329
329
330 if baseurl.rstrip(b'/') != respurl.rstrip(b'/'):
330 if baseurl.rstrip(b'/') != respurl.rstrip(b'/'):
331 redirected = True
331 redirected = True
332 if not ui.quiet:
332 if not ui.quiet:
333 ui.warn(_(b'real URL is %s\n') % respurl)
333 ui.warn(_(b'real URL is %s\n') % respurl)
334
334
335 try:
335 try:
336 proto = pycompat.bytesurl(resp.getheader('content-type', ''))
336 proto = pycompat.bytesurl(resp.getheader('content-type', ''))
337 except AttributeError:
337 except AttributeError:
338 proto = pycompat.bytesurl(resp.headers.get('content-type', ''))
338 proto = pycompat.bytesurl(resp.headers.get('content-type', ''))
339
339
340 safeurl = urlutil.hidepassword(baseurl)
340 safeurl = urlutil.hidepassword(baseurl)
341 if proto.startswith(b'application/hg-error'):
341 if proto.startswith(b'application/hg-error'):
342 raise error.OutOfBandError(resp.read())
342 raise error.OutOfBandError(resp.read())
343
343
344 # Pre 1.0 versions of Mercurial used text/plain and
344 # Pre 1.0 versions of Mercurial used text/plain and
345 # application/hg-changegroup. We don't support such old servers.
345 # application/hg-changegroup. We don't support such old servers.
346 if not proto.startswith(b'application/mercurial-'):
346 if not proto.startswith(b'application/mercurial-'):
347 ui.debug(b"requested URL: '%s'\n" % urlutil.hidepassword(requrl))
347 ui.debug(b"requested URL: '%s'\n" % urlutil.hidepassword(requrl))
348 msg = _(
348 msg = _(
349 b"'%s' does not appear to be an hg repository:\n"
349 b"'%s' does not appear to be an hg repository:\n"
350 b"---%%<--- (%s)\n%s\n---%%<---\n"
350 b"---%%<--- (%s)\n%s\n---%%<---\n"
351 ) % (safeurl, proto or b'no content-type', resp.read(1024))
351 ) % (safeurl, proto or b'no content-type', resp.read(1024))
352
352
353 # Some servers may strip the query string from the redirect. We
353 # Some servers may strip the query string from the redirect. We
354 # raise a special error type so callers can react to this specially.
354 # raise a special error type so callers can react to this specially.
355 if redirected and qsdropped:
355 if redirected and qsdropped:
356 raise RedirectedRepoError(msg, respurl)
356 raise RedirectedRepoError(msg, respurl)
357 else:
357 else:
358 raise error.RepoError(msg)
358 raise error.RepoError(msg)
359
359
360 try:
360 try:
361 subtype = proto.split(b'-', 1)[1]
361 subtype = proto.split(b'-', 1)[1]
362
362
363 version_info = tuple([int(n) for n in subtype.split(b'.')])
363 version_info = tuple([int(n) for n in subtype.split(b'.')])
364 except ValueError:
364 except ValueError:
365 raise error.RepoError(
365 raise error.RepoError(
366 _(b"'%s' sent a broken Content-Type header (%s)") % (safeurl, proto)
366 _(b"'%s' sent a broken Content-Type header (%s)") % (safeurl, proto)
367 )
367 )
368
368
369 # TODO consider switching to a decompression reader that uses
369 # TODO consider switching to a decompression reader that uses
370 # generators.
370 # generators.
371 if version_info == (0, 1):
371 if version_info == (0, 1):
372 if compressible:
372 if compressible:
373 resp = util.compengines[b'zlib'].decompressorreader(resp)
373 resp = util.compengines[b'zlib'].decompressorreader(resp)
374
374
375 elif version_info == (0, 2):
375 elif version_info == (0, 2):
376 # application/mercurial-0.2 always identifies the compression
376 # application/mercurial-0.2 always identifies the compression
377 # engine in the payload header.
377 # engine in the payload header.
378 elen = struct.unpack(b'B', util.readexactly(resp, 1))[0]
378 elen = struct.unpack(b'B', util.readexactly(resp, 1))[0]
379 ename = util.readexactly(resp, elen)
379 ename = util.readexactly(resp, elen)
380 engine = util.compengines.forwiretype(ename)
380 engine = util.compengines.forwiretype(ename)
381
381
382 resp = engine.decompressorreader(resp)
382 resp = engine.decompressorreader(resp)
383 else:
383 else:
384 raise error.RepoError(
384 raise error.RepoError(
385 _(b"'%s' uses newer protocol %s") % (safeurl, subtype)
385 _(b"'%s' uses newer protocol %s") % (safeurl, subtype)
386 )
386 )
387
387
388 return respurl, proto, resp
388 return respurl, proto, resp
389
389
390
390
391 class httppeer(wireprotov1peer.wirepeer):
391 class httppeer(wireprotov1peer.wirepeer):
392 def __init__(
392 def __init__(
393 self, ui, path, url, opener, requestbuilder, caps, remotehidden=False
393 self, ui, path, url, opener, requestbuilder, caps, remotehidden=False
394 ):
394 ):
395 super().__init__(ui, path=path, remotehidden=remotehidden)
395 super().__init__(ui, path=path, remotehidden=remotehidden)
396 self._url = url
396 self._url = url
397 self._caps = caps
397 self._caps = caps
398 self.limitedarguments = caps is not None and b'httppostargs' not in caps
398 self.limitedarguments = caps is not None and b'httppostargs' not in caps
399 self._urlopener = opener
399 self._urlopener = opener
400 self._requestbuilder = requestbuilder
400 self._requestbuilder = requestbuilder
401 self._remotehidden = remotehidden
401 self._remotehidden = remotehidden
402
402
403 def __del__(self):
403 def __del__(self):
404 for h in self._urlopener.handlers:
404 for h in self._urlopener.handlers:
405 h.close()
405 h.close()
406 getattr(h, "close_all", lambda: None)()
406 getattr(h, "close_all", lambda: None)()
407
407
408 # Begin of ipeerconnection interface.
408 # Begin of ipeerconnection interface.
409
409
410 def url(self):
410 def url(self):
411 return self.path.loc
411 return self.path.loc
412
412
413 def local(self):
413 def local(self):
414 return None
414 return None
415
415
416 def canpush(self):
416 def canpush(self):
417 return True
417 return True
418
418
419 def close(self):
419 def close(self):
420 try:
420 try:
421 reqs, sent, recv = (
421 reqs, sent, recv = (
422 self._urlopener.requestscount,
422 self._urlopener.requestscount,
423 self._urlopener.sentbytescount,
423 self._urlopener.sentbytescount,
424 self._urlopener.receivedbytescount,
424 self._urlopener.receivedbytescount,
425 )
425 )
426 except AttributeError:
426 except AttributeError:
427 return
427 return
428 self.ui.note(
428 self.ui.note(
429 _(
429 _(
430 b'(sent %d HTTP requests and %d bytes; '
430 b'(sent %d HTTP requests and %d bytes; '
431 b'received %d bytes in responses)\n'
431 b'received %d bytes in responses)\n'
432 )
432 )
433 % (reqs, sent, recv)
433 % (reqs, sent, recv)
434 )
434 )
435
435
436 # End of ipeerconnection interface.
436 # End of ipeerconnection interface.
437
437
438 # Begin of ipeercommands interface.
438 # Begin of ipeercommands interface.
439
439
440 def capabilities(self):
440 def capabilities(self):
441 return self._caps
441 return self._caps
442
442
443 def _finish_inline_clone_bundle(self, stream):
443 def _finish_inline_clone_bundle(self, stream):
444 # HTTP streams must hit the end to process the last empty
444 # HTTP streams must hit the end to process the last empty
445 # chunk of Chunked-Encoding so the connection can be reused.
445 # chunk of Chunked-Encoding so the connection can be reused.
446 chunk = stream.read(1)
446 chunk = stream.read(1)
447 if chunk:
447 if chunk:
448 self._abort(error.ResponseError(_(b"unexpected response:"), chunk))
448 self._abort(error.ResponseError(_(b"unexpected response:"), chunk))
449
449
450 # End of ipeercommands interface.
450 # End of ipeercommands interface.
451
451
452 def _callstream(self, cmd, _compressible=False, **args):
452 def _callstream(self, cmd, _compressible=False, **args):
453 args = pycompat.byteskwargs(args)
453 args = pycompat.byteskwargs(args)
454
454
455 req, cu, qs = makev1commandrequest(
455 req, cu, qs = makev1commandrequest(
456 self.ui,
456 self.ui,
457 self._requestbuilder,
457 self._requestbuilder,
458 self._caps,
458 self._caps,
459 self.capable,
459 self.capable,
460 self._url,
460 self._url,
461 cmd,
461 cmd,
462 args,
462 args,
463 self._remotehidden,
463 self._remotehidden,
464 )
464 )
465
465
466 resp = sendrequest(self.ui, self._urlopener, req)
466 resp = sendrequest(self.ui, self._urlopener, req)
467
467
468 self._url, ct, resp = parsev1commandresponse(
468 self._url, ct, resp = parsev1commandresponse(
469 self.ui, self._url, cu, qs, resp, _compressible
469 self.ui, self._url, cu, qs, resp, _compressible
470 )
470 )
471
471
472 return resp
472 return resp
473
473
474 def _call(self, cmd, **args):
474 def _call(self, cmd, **args):
475 fp = self._callstream(cmd, **args)
475 fp = self._callstream(cmd, **args)
476 try:
476 try:
477 return fp.read()
477 return fp.read()
478 finally:
478 finally:
479 # if using keepalive, allow connection to be reused
479 # if using keepalive, allow connection to be reused
480 fp.close()
480 fp.close()
481
481
482 def _callpush(self, cmd, cg, **args):
482 def _callpush(self, cmd, cg, **args):
483 # have to stream bundle to a temp file because we do not have
483 # have to stream bundle to a temp file because we do not have
484 # http 1.1 chunked transfer.
484 # http 1.1 chunked transfer.
485
485
486 types = self.capable(b'unbundle')
486 types = self.capable(b'unbundle')
487 try:
487 try:
488 types = types.split(b',')
488 types = types.split(b',')
489 except AttributeError:
489 except AttributeError:
490 # servers older than d1b16a746db6 will send 'unbundle' as a
490 # servers older than d1b16a746db6 will send 'unbundle' as a
491 # boolean capability. They only support headerless/uncompressed
491 # boolean capability. They only support headerless/uncompressed
492 # bundles.
492 # bundles.
493 types = [b""]
493 types = [b""]
494 for x in types:
494 for x in types:
495 if x in bundle2.bundletypes:
495 if x in bundle2.bundletypes:
496 type = x
496 type = x
497 break
497 break
498
498
499 tempname = bundle2.writebundle(self.ui, cg, None, type)
499 tempname = bundle2.writebundle(self.ui, cg, None, type)
500 fp = httpconnection.httpsendfile(self.ui, tempname, b"rb")
500 fp = httpconnection.httpsendfile(self.ui, tempname, b"rb")
501 headers = {'Content-Type': 'application/mercurial-0.1'}
501 headers = {'Content-Type': 'application/mercurial-0.1'}
502
502
503 try:
503 try:
504 r = self._call(cmd, data=fp, headers=headers, **args)
504 r = self._call(cmd, data=fp, headers=headers, **args)
505 vals = r.split(b'\n', 1)
505 vals = r.split(b'\n', 1)
506 if len(vals) < 2:
506 if len(vals) < 2:
507 raise error.ResponseError(_(b"unexpected response:"), r)
507 raise error.ResponseError(_(b"unexpected response:"), r)
508 return vals
508 return vals
509 except urlerr.httperror:
509 except urlerr.httperror:
510 # Catch and re-raise these so we don't try and treat them
510 # Catch and re-raise these so we don't try and treat them
511 # like generic socket errors. They lack any values in
511 # like generic socket errors. They lack any values in
512 # .args on Python 3 which breaks our socket.error block.
512 # .args on Python 3 which breaks our socket.error block.
513 raise
513 raise
514 except socket.error as err:
514 except socket.error as err:
515 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
515 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
516 raise error.Abort(_(b'push failed: %s') % err.args[1])
516 raise error.Abort(_(b'push failed: %s') % err.args[1])
517 raise error.Abort(err.args[1])
517 raise error.Abort(err.args[1])
518 finally:
518 finally:
519 fp.close()
519 fp.close()
520 os.unlink(tempname)
520 os.unlink(tempname)
521
521
522 def _calltwowaystream(self, cmd, fp, **args):
522 def _calltwowaystream(self, cmd, fp, **args):
523 filename = None
523 filename = None
524 try:
524 try:
525 # dump bundle to disk
525 # dump bundle to disk
526 fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
526 fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
527 with os.fdopen(fd, "wb") as fh:
527 with os.fdopen(fd, "wb") as fh:
528 d = fp.read(4096)
528 d = fp.read(4096)
529 while d:
529 while d:
530 fh.write(d)
530 fh.write(d)
531 d = fp.read(4096)
531 d = fp.read(4096)
532 # start http push
532 # start http push
533 with httpconnection.httpsendfile(self.ui, filename, b"rb") as fp_:
533 with httpconnection.httpsendfile(self.ui, filename, b"rb") as fp_:
534 headers = {'Content-Type': 'application/mercurial-0.1'}
534 headers = {'Content-Type': 'application/mercurial-0.1'}
535 return self._callstream(cmd, data=fp_, headers=headers, **args)
535 return self._callstream(cmd, data=fp_, headers=headers, **args)
536 finally:
536 finally:
537 if filename is not None:
537 if filename is not None:
538 os.unlink(filename)
538 os.unlink(filename)
539
539
540 def _callcompressable(self, cmd, **args):
540 def _callcompressable(self, cmd, **args):
541 return self._callstream(cmd, _compressible=True, **args)
541 return self._callstream(cmd, _compressible=True, **args)
542
542
543 def _abort(self, exception):
543 def _abort(self, exception):
544 raise exception
544 raise exception
545
545
546
546
547 class queuedcommandfuture(futures.Future):
547 class queuedcommandfuture(futures.Future):
548 """Wraps result() on command futures to trigger submission on call."""
548 """Wraps result() on command futures to trigger submission on call."""
549
549
550 def result(self, timeout=None):
550 def result(self, timeout=None):
551 if self.done():
551 if self.done():
552 return futures.Future.result(self, timeout)
552 return futures.Future.result(self, timeout)
553
553
554 self._peerexecutor.sendcommands()
554 self._peerexecutor.sendcommands()
555
555
556 # sendcommands() will restore the original __class__ and self.result
556 # sendcommands() will restore the original __class__ and self.result
557 # will resolve to Future.result.
557 # will resolve to Future.result.
558 return self.result(timeout)
558 return self.result(timeout)
559
559
560
560
561 def performhandshake(ui, url, opener, requestbuilder):
561 def performhandshake(ui, url, opener, requestbuilder):
562 # The handshake is a request to the capabilities command.
562 # The handshake is a request to the capabilities command.
563
563
564 caps = None
564 caps = None
565
565
566 def capable(x):
566 def capable(x):
567 raise error.ProgrammingError(b'should not be called')
567 raise error.ProgrammingError(b'should not be called')
568
568
569 args = {}
569 args = {}
570
570
571 req, requrl, qs = makev1commandrequest(
571 req, requrl, qs = makev1commandrequest(
572 ui, requestbuilder, caps, capable, url, b'capabilities', args
572 ui, requestbuilder, caps, capable, url, b'capabilities', args
573 )
573 )
574 resp = sendrequest(ui, opener, req)
574 resp = sendrequest(ui, opener, req)
575
575
576 # The server may redirect us to the repo root, stripping the
576 # The server may redirect us to the repo root, stripping the
577 # ?cmd=capabilities query string from the URL. The server would likely
577 # ?cmd=capabilities query string from the URL. The server would likely
578 # return HTML in this case and ``parsev1commandresponse()`` would raise.
578 # return HTML in this case and ``parsev1commandresponse()`` would raise.
579 # We catch this special case and re-issue the capabilities request against
579 # We catch this special case and re-issue the capabilities request against
580 # the new URL.
580 # the new URL.
581 #
581 #
582 # We should ideally not do this, as a redirect that drops the query
582 # We should ideally not do this, as a redirect that drops the query
583 # string from the URL is arguably a server bug. (Garbage in, garbage out).
583 # string from the URL is arguably a server bug. (Garbage in, garbage out).
584 # However, Mercurial clients for several years appeared to handle this
584 # However, Mercurial clients for several years appeared to handle this
585 # issue without behavior degradation. And according to issue 5860, it may
585 # issue without behavior degradation. And according to issue 5860, it may
586 # be a longstanding bug in some server implementations. So we allow a
586 # be a longstanding bug in some server implementations. So we allow a
587 # redirect that drops the query string to "just work."
587 # redirect that drops the query string to "just work."
588 try:
588 try:
589 respurl, ct, resp = parsev1commandresponse(
589 respurl, ct, resp = parsev1commandresponse(
590 ui, url, requrl, qs, resp, compressible=False
590 ui, url, requrl, qs, resp, compressible=False
591 )
591 )
592 except RedirectedRepoError as e:
592 except RedirectedRepoError as e:
593 req, requrl, qs = makev1commandrequest(
593 req, requrl, qs = makev1commandrequest(
594 ui, requestbuilder, caps, capable, e.respurl, b'capabilities', args
594 ui, requestbuilder, caps, capable, e.respurl, b'capabilities', args
595 )
595 )
596 resp = sendrequest(ui, opener, req)
596 resp = sendrequest(ui, opener, req)
597 respurl, ct, resp = parsev1commandresponse(
597 respurl, ct, resp = parsev1commandresponse(
598 ui, url, requrl, qs, resp, compressible=False
598 ui, url, requrl, qs, resp, compressible=False
599 )
599 )
600
600
601 try:
601 try:
602 rawdata = resp.read()
602 rawdata = resp.read()
603 finally:
603 finally:
604 resp.close()
604 resp.close()
605
605
606 if not ct.startswith(b'application/mercurial-'):
606 if not ct.startswith(b'application/mercurial-'):
607 raise error.ProgrammingError(b'unexpected content-type: %s' % ct)
607 raise error.ProgrammingError(b'unexpected content-type: %s' % ct)
608
608
609 info = {b'v1capabilities': set(rawdata.split())}
609 info = {b'v1capabilities': set(rawdata.split())}
610
610
611 return respurl, info
611 return respurl, info
612
612
613
613
614 def _make_peer(
614 def _make_peer(
615 ui, path, opener=None, requestbuilder=urlreq.request, remotehidden=False
615 ui, path, opener=None, requestbuilder=urlreq.request, remotehidden=False
616 ):
616 ):
617 """Construct an appropriate HTTP peer instance.
617 """Construct an appropriate HTTP peer instance.
618
618
619 ``opener`` is an ``url.opener`` that should be used to establish
619 ``opener`` is an ``url.opener`` that should be used to establish
620 connections, perform HTTP requests.
620 connections, perform HTTP requests.
621
621
622 ``requestbuilder`` is the type used for constructing HTTP requests.
622 ``requestbuilder`` is the type used for constructing HTTP requests.
623 It exists as an argument so extensions can override the default.
623 It exists as an argument so extensions can override the default.
624 """
624 """
625 if path.url.query or path.url.fragment:
625 if path.url.query or path.url.fragment:
626 msg = _(b'unsupported URL component: "%s"')
626 msg = _(b'unsupported URL component: "%s"')
627 msg %= path.url.query or path.url.fragment
627 msg %= path.url.query or path.url.fragment
628 raise error.Abort(msg)
628 raise error.Abort(msg)
629
629
630 # urllib cannot handle URLs with embedded user or passwd.
630 # urllib cannot handle URLs with embedded user or passwd.
631 url, authinfo = path.url.authinfo()
631 url, authinfo = path.url.authinfo()
632 ui.debug(b'using %s\n' % url)
632 ui.debug(b'using %s\n' % url)
633
633
634 opener = opener or urlmod.opener(ui, authinfo)
634 opener = opener or urlmod.opener(ui, authinfo)
635
635
636 respurl, info = performhandshake(ui, url, opener, requestbuilder)
636 respurl, info = performhandshake(ui, url, opener, requestbuilder)
637
637
638 return httppeer(
638 return httppeer(
639 ui,
639 ui,
640 path,
640 path,
641 respurl,
641 respurl,
642 opener,
642 opener,
643 requestbuilder,
643 requestbuilder,
644 info[b'v1capabilities'],
644 info[b'v1capabilities'],
645 remotehidden=remotehidden,
645 remotehidden=remotehidden,
646 )
646 )
647
647
648
648
649 def make_peer(
649 def make_peer(
650 ui, path, create, intents=None, createopts=None, remotehidden=False
650 ui, path, create, intents=None, createopts=None, remotehidden=False
651 ):
651 ):
652 if create:
652 if create:
653 raise error.Abort(_(b'cannot create new http repository'))
653 raise error.Abort(_(b'cannot create new http repository'))
654 try:
654 try:
655 if path.url.scheme == b'https' and not urlmod.has_https:
655 if path.url.scheme == b'https' and not urlmod.has_https:
656 raise error.Abort(
656 raise error.Abort(
657 _(b'Python support for SSL and HTTPS is not installed')
657 _(b'Python support for SSL and HTTPS is not installed')
658 )
658 )
659
659
660 inst = _make_peer(ui, path, remotehidden=remotehidden)
660 inst = _make_peer(ui, path, remotehidden=remotehidden)
661
661
662 return inst
662 return inst
663 except error.RepoError as httpexception:
663 except error.RepoError as httpexception:
664 try:
664 try:
665 r = statichttprepo.make_peer(ui, b"static-" + path.loc, create)
665 path = path.copy(new_raw_location=b"static-" + path.rawloc)
666 r = statichttprepo.make_peer(ui, path, create)
666 ui.note(_(b'(falling back to static-http)\n'))
667 ui.note(_(b'(falling back to static-http)\n'))
667 return r
668 return r
668 except error.RepoError:
669 except error.RepoError:
669 raise httpexception # use the original http RepoError instead
670 raise httpexception # use the original http RepoError instead
@@ -1,912 +1,932 b''
1 # tags.py - read tag info from local repository
1 # tags.py - read tag info from local repository
2 #
2 #
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 # Currently this module only deals with reading and caching tags.
9 # Currently this module only deals with reading and caching tags.
10 # Eventually, it could take care of updating (adding/removing/moving)
10 # Eventually, it could take care of updating (adding/removing/moving)
11 # tags too.
11 # tags too.
12
12
13
13
14 import binascii
14 import binascii
15 import io
15 import io
16
16
17 from .node import (
17 from .node import (
18 bin,
18 bin,
19 hex,
19 hex,
20 nullrev,
20 nullrev,
21 short,
21 short,
22 )
22 )
23 from .i18n import _
23 from .i18n import _
24 from . import (
24 from . import (
25 encoding,
25 encoding,
26 error,
26 error,
27 match as matchmod,
27 match as matchmod,
28 scmutil,
28 scmutil,
29 util,
29 util,
30 )
30 )
31 from .utils import stringutil
31 from .utils import stringutil
32
32
33 # Tags computation can be expensive and caches exist to make it fast in
33 # Tags computation can be expensive and caches exist to make it fast in
34 # the common case.
34 # the common case.
35 #
35 #
36 # The "hgtagsfnodes1" cache file caches the .hgtags filenode values for
36 # The "hgtagsfnodes1" cache file caches the .hgtags filenode values for
37 # each revision in the repository. The file is effectively an array of
37 # each revision in the repository. The file is effectively an array of
38 # fixed length records. Read the docs for "hgtagsfnodescache" for technical
38 # fixed length records. Read the docs for "hgtagsfnodescache" for technical
39 # details.
39 # details.
40 #
40 #
41 # The .hgtags filenode cache grows in proportion to the length of the
41 # The .hgtags filenode cache grows in proportion to the length of the
42 # changelog. The file is truncated when the # changelog is stripped.
42 # changelog. The file is truncated when the # changelog is stripped.
43 #
43 #
44 # The purpose of the filenode cache is to avoid the most expensive part
44 # The purpose of the filenode cache is to avoid the most expensive part
45 # of finding global tags, which is looking up the .hgtags filenode in the
45 # of finding global tags, which is looking up the .hgtags filenode in the
46 # manifest for each head. This can take dozens or over 100ms for
46 # manifest for each head. This can take dozens or over 100ms for
47 # repositories with very large manifests. Multiplied by dozens or even
47 # repositories with very large manifests. Multiplied by dozens or even
48 # hundreds of heads and there is a significant performance concern.
48 # hundreds of heads and there is a significant performance concern.
49 #
49 #
50 # There also exist a separate cache file for each repository filter.
50 # There also exist a separate cache file for each repository filter.
51 # These "tags-*" files store information about the history of tags.
51 # These "tags-*" files store information about the history of tags.
52 #
52 #
53 # The tags cache files consists of a cache validation line followed by
53 # The tags cache files consists of a cache validation line followed by
54 # a history of tags.
54 # a history of tags.
55 #
55 #
56 # The cache validation line has the format:
56 # The cache validation line has the format:
57 #
57 #
58 # <tiprev> <tipnode> [<filteredhash>]
58 # <tiprev> <tipnode> [<filteredhash>]
59 #
59 #
60 # <tiprev> is an integer revision and <tipnode> is a 40 character hex
60 # <tiprev> is an integer revision and <tipnode> is a 40 character hex
61 # node for that changeset. These redundantly identify the repository
61 # node for that changeset. These redundantly identify the repository
62 # tip from the time the cache was written. In addition, <filteredhash>,
62 # tip from the time the cache was written. In addition, <filteredhash>,
63 # if present, is a 40 character hex hash of the contents of the filtered
63 # if present, is a 40 character hex hash of the contents of the filtered
64 # revisions for this filter. If the set of filtered revs changes, the
64 # revisions for this filter. If the set of filtered revs changes, the
65 # hash will change and invalidate the cache.
65 # hash will change and invalidate the cache.
66 #
66 #
67 # The history part of the tags cache consists of lines of the form:
67 # The history part of the tags cache consists of lines of the form:
68 #
68 #
69 # <node> <tag>
69 # <node> <tag>
70 #
70 #
71 # (This format is identical to that of .hgtags files.)
71 # (This format is identical to that of .hgtags files.)
72 #
72 #
73 # <tag> is the tag name and <node> is the 40 character hex changeset
73 # <tag> is the tag name and <node> is the 40 character hex changeset
74 # the tag is associated with.
74 # the tag is associated with.
75 #
75 #
76 # Tags are written sorted by tag name.
76 # Tags are written sorted by tag name.
77 #
77 #
78 # Tags associated with multiple changesets have an entry for each changeset.
78 # Tags associated with multiple changesets have an entry for each changeset.
79 # The most recent changeset (in terms of revlog ordering for the head
79 # The most recent changeset (in terms of revlog ordering for the head
80 # setting it) for each tag is last.
80 # setting it) for each tag is last.
81
81
82
82
83 def fnoderevs(ui, repo, revs):
83 def fnoderevs(ui, repo, revs):
84 """return the list of '.hgtags' fnodes used in a set revisions
84 """return the list of '.hgtags' fnodes used in a set revisions
85
85
86 This is returned as list of unique fnodes. We use a list instead of a set
86 This is returned as list of unique fnodes. We use a list instead of a set
87 because order matters when it comes to tags."""
87 because order matters when it comes to tags."""
88 unfi = repo.unfiltered()
88 unfi = repo.unfiltered()
89 tonode = unfi.changelog.node
89 tonode = unfi.changelog.node
90 nodes = [tonode(r) for r in revs]
90 nodes = [tonode(r) for r in revs]
91 fnodes = _getfnodes(ui, repo, nodes)
91 fnodes = _getfnodes(ui, repo, nodes)
92 fnodes = _filterfnodes(fnodes, nodes)
92 fnodes = _filterfnodes(fnodes, nodes)
93 return fnodes
93 return fnodes
94
94
95
95
96 def _nulltonone(repo, value):
96 def _nulltonone(repo, value):
97 """convert nullid to None
97 """convert nullid to None
98
98
99 For tag value, nullid means "deleted". This small utility function helps
99 For tag value, nullid means "deleted". This small utility function helps
100 translating that to None."""
100 translating that to None."""
101 if value == repo.nullid:
101 if value == repo.nullid:
102 return None
102 return None
103 return value
103 return value
104
104
105
105
106 def difftags(ui, repo, oldfnodes, newfnodes):
106 def difftags(ui, repo, oldfnodes, newfnodes):
107 """list differences between tags expressed in two set of file-nodes
107 """list differences between tags expressed in two set of file-nodes
108
108
109 The list contains entries in the form: (tagname, oldvalue, new value).
109 The list contains entries in the form: (tagname, oldvalue, new value).
110 None is used to expressed missing value:
110 None is used to expressed missing value:
111 ('foo', None, 'abcd') is a new tag,
111 ('foo', None, 'abcd') is a new tag,
112 ('bar', 'ef01', None) is a deletion,
112 ('bar', 'ef01', None) is a deletion,
113 ('baz', 'abcd', 'ef01') is a tag movement.
113 ('baz', 'abcd', 'ef01') is a tag movement.
114 """
114 """
115 if oldfnodes == newfnodes:
115 if oldfnodes == newfnodes:
116 return []
116 return []
117 oldtags = _tagsfromfnodes(ui, repo, oldfnodes)
117 oldtags = _tagsfromfnodes(ui, repo, oldfnodes)
118 newtags = _tagsfromfnodes(ui, repo, newfnodes)
118 newtags = _tagsfromfnodes(ui, repo, newfnodes)
119
119
120 # list of (tag, old, new): None means missing
120 # list of (tag, old, new): None means missing
121 entries = []
121 entries = []
122 for tag, (new, __) in newtags.items():
122 for tag, (new, __) in newtags.items():
123 new = _nulltonone(repo, new)
123 new = _nulltonone(repo, new)
124 old, __ = oldtags.pop(tag, (None, None))
124 old, __ = oldtags.pop(tag, (None, None))
125 old = _nulltonone(repo, old)
125 old = _nulltonone(repo, old)
126 if old != new:
126 if old != new:
127 entries.append((tag, old, new))
127 entries.append((tag, old, new))
128 # handle deleted tags
128 # handle deleted tags
129 for tag, (old, __) in oldtags.items():
129 for tag, (old, __) in oldtags.items():
130 old = _nulltonone(repo, old)
130 old = _nulltonone(repo, old)
131 if old is not None:
131 if old is not None:
132 entries.append((tag, old, None))
132 entries.append((tag, old, None))
133 entries.sort()
133 entries.sort()
134 return entries
134 return entries
135
135
136
136
137 def writediff(fp, difflist):
137 def writediff(fp, difflist):
138 """write tags diff information to a file.
138 """write tags diff information to a file.
139
139
140 Data are stored with a line based format:
140 Data are stored with a line based format:
141
141
142 <action> <hex-node> <tag-name>\n
142 <action> <hex-node> <tag-name>\n
143
143
144 Action are defined as follow:
144 Action are defined as follow:
145 -R tag is removed,
145 -R tag is removed,
146 +A tag is added,
146 +A tag is added,
147 -M tag is moved (old value),
147 -M tag is moved (old value),
148 +M tag is moved (new value),
148 +M tag is moved (new value),
149
149
150 Example:
150 Example:
151
151
152 +A 875517b4806a848f942811a315a5bce30804ae85 t5
152 +A 875517b4806a848f942811a315a5bce30804ae85 t5
153
153
154 See documentation of difftags output for details about the input.
154 See documentation of difftags output for details about the input.
155 """
155 """
156 add = b'+A %s %s\n'
156 add = b'+A %s %s\n'
157 remove = b'-R %s %s\n'
157 remove = b'-R %s %s\n'
158 updateold = b'-M %s %s\n'
158 updateold = b'-M %s %s\n'
159 updatenew = b'+M %s %s\n'
159 updatenew = b'+M %s %s\n'
160 for tag, old, new in difflist:
160 for tag, old, new in difflist:
161 # translate to hex
161 # translate to hex
162 if old is not None:
162 if old is not None:
163 old = hex(old)
163 old = hex(old)
164 if new is not None:
164 if new is not None:
165 new = hex(new)
165 new = hex(new)
166 # write to file
166 # write to file
167 if old is None:
167 if old is None:
168 fp.write(add % (new, tag))
168 fp.write(add % (new, tag))
169 elif new is None:
169 elif new is None:
170 fp.write(remove % (old, tag))
170 fp.write(remove % (old, tag))
171 else:
171 else:
172 fp.write(updateold % (old, tag))
172 fp.write(updateold % (old, tag))
173 fp.write(updatenew % (new, tag))
173 fp.write(updatenew % (new, tag))
174
174
175
175
176 def findglobaltags(ui, repo):
176 def findglobaltags(ui, repo):
177 """Find global tags in a repo: return a tagsmap
177 """Find global tags in a repo: return a tagsmap
178
178
179 tagsmap: tag name to (node, hist) 2-tuples.
179 tagsmap: tag name to (node, hist) 2-tuples.
180
180
181 The tags cache is read and updated as a side-effect of calling.
181 The tags cache is read and updated as a side-effect of calling.
182 """
182 """
183 (heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo)
183 (heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo)
184 if cachetags is not None:
184 if cachetags is not None:
185 assert not shouldwrite
185 assert not shouldwrite
186 # XXX is this really 100% correct? are there oddball special
186 # XXX is this really 100% correct? are there oddball special
187 # cases where a global tag should outrank a local tag but won't,
187 # cases where a global tag should outrank a local tag but won't,
188 # because cachetags does not contain rank info?
188 # because cachetags does not contain rank info?
189 alltags = {}
189 alltags = {}
190 _updatetags(cachetags, alltags)
190 _updatetags(cachetags, alltags)
191 return alltags
191 return alltags
192
192
193 has_node = repo.changelog.index.has_node
193 for head in reversed(heads): # oldest to newest
194 for head in reversed(heads): # oldest to newest
194 assert repo.changelog.index.has_node(
195 assert has_node(head), b"tag cache returned bogus head %s" % short(head)
195 head
196 ), b"tag cache returned bogus head %s" % short(head)
197 fnodes = _filterfnodes(tagfnode, reversed(heads))
196 fnodes = _filterfnodes(tagfnode, reversed(heads))
198 alltags = _tagsfromfnodes(ui, repo, fnodes)
197 alltags = _tagsfromfnodes(ui, repo, fnodes)
199
198
200 # and update the cache (if necessary)
199 # and update the cache (if necessary)
201 if shouldwrite:
200 if shouldwrite:
202 _writetagcache(ui, repo, valid, alltags)
201 _writetagcache(ui, repo, valid, alltags)
203 return alltags
202 return alltags
204
203
205
204
206 def _filterfnodes(tagfnode, nodes):
205 def _filterfnodes(tagfnode, nodes):
207 """return a list of unique fnodes
206 """return a list of unique fnodes
208
207
209 The order of this list matches the order of "nodes". Preserving this order
208 The order of this list matches the order of "nodes". Preserving this order
210 is important as reading tags in different order provides different
209 is important as reading tags in different order provides different
211 results."""
210 results."""
212 seen = set() # set of fnode
211 seen = set() # set of fnode
213 fnodes = []
212 fnodes = []
214 for no in nodes: # oldest to newest
213 for no in nodes: # oldest to newest
215 fnode = tagfnode.get(no)
214 fnode = tagfnode.get(no)
216 if fnode and fnode not in seen:
215 if fnode and fnode not in seen:
217 seen.add(fnode)
216 seen.add(fnode)
218 fnodes.append(fnode)
217 fnodes.append(fnode)
219 return fnodes
218 return fnodes
220
219
221
220
222 def _tagsfromfnodes(ui, repo, fnodes):
221 def _tagsfromfnodes(ui, repo, fnodes):
223 """return a tagsmap from a list of file-node
222 """return a tagsmap from a list of file-node
224
223
225 tagsmap: tag name to (node, hist) 2-tuples.
224 tagsmap: tag name to (node, hist) 2-tuples.
226
225
227 The order of the list matters."""
226 The order of the list matters."""
228 alltags = {}
227 alltags = {}
229 fctx = None
228 fctx = None
230 for fnode in fnodes:
229 for fnode in fnodes:
231 if fctx is None:
230 if fctx is None:
232 fctx = repo.filectx(b'.hgtags', fileid=fnode)
231 fctx = repo.filectx(b'.hgtags', fileid=fnode)
233 else:
232 else:
234 fctx = fctx.filectx(fnode)
233 fctx = fctx.filectx(fnode)
235 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
234 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
236 _updatetags(filetags, alltags)
235 _updatetags(filetags, alltags)
237 return alltags
236 return alltags
238
237
239
238
240 def readlocaltags(ui, repo, alltags, tagtypes):
239 def readlocaltags(ui, repo, alltags, tagtypes):
241 '''Read local tags in repo. Update alltags and tagtypes.'''
240 '''Read local tags in repo. Update alltags and tagtypes.'''
242 try:
241 try:
243 data = repo.vfs.read(b"localtags")
242 data = repo.vfs.read(b"localtags")
244 except FileNotFoundError:
243 except FileNotFoundError:
245 return
244 return
246
245
247 # localtags is in the local encoding; re-encode to UTF-8 on
246 # localtags is in the local encoding; re-encode to UTF-8 on
248 # input for consistency with the rest of this module.
247 # input for consistency with the rest of this module.
249 filetags = _readtags(
248 filetags = _readtags(
250 ui, repo, data.splitlines(), b"localtags", recode=encoding.fromlocal
249 ui, repo, data.splitlines(), b"localtags", recode=encoding.fromlocal
251 )
250 )
252
251
253 # remove tags pointing to invalid nodes
252 # remove tags pointing to invalid nodes
254 cl = repo.changelog
253 cl = repo.changelog
255 for t in list(filetags):
254 for t in list(filetags):
256 try:
255 try:
257 cl.rev(filetags[t][0])
256 cl.rev(filetags[t][0])
258 except (LookupError, ValueError):
257 except (LookupError, ValueError):
259 del filetags[t]
258 del filetags[t]
260
259
261 _updatetags(filetags, alltags, b'local', tagtypes)
260 _updatetags(filetags, alltags, b'local', tagtypes)
262
261
263
262
264 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
263 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
265 """Read tag definitions from a file (or any source of lines).
264 """Read tag definitions from a file (or any source of lines).
266
265
267 This function returns two sortdicts with similar information:
266 This function returns two sortdicts with similar information:
268
267
269 - the first dict, bintaghist, contains the tag information as expected by
268 - the first dict, bintaghist, contains the tag information as expected by
270 the _readtags function, i.e. a mapping from tag name to (node, hist):
269 the _readtags function, i.e. a mapping from tag name to (node, hist):
271 - node is the node id from the last line read for that name,
270 - node is the node id from the last line read for that name,
272 - hist is the list of node ids previously associated with it (in file
271 - hist is the list of node ids previously associated with it (in file
273 order). All node ids are binary, not hex.
272 order). All node ids are binary, not hex.
274
273
275 - the second dict, hextaglines, is a mapping from tag name to a list of
274 - the second dict, hextaglines, is a mapping from tag name to a list of
276 [hexnode, line number] pairs, ordered from the oldest to the newest node.
275 [hexnode, line number] pairs, ordered from the oldest to the newest node.
277
276
278 When calcnodelines is False the hextaglines dict is not calculated (an
277 When calcnodelines is False the hextaglines dict is not calculated (an
279 empty dict is returned). This is done to improve this function's
278 empty dict is returned). This is done to improve this function's
280 performance in cases where the line numbers are not needed.
279 performance in cases where the line numbers are not needed.
281 """
280 """
282
281
283 bintaghist = util.sortdict()
282 bintaghist = util.sortdict()
284 hextaglines = util.sortdict()
283 hextaglines = util.sortdict()
285 count = 0
284 count = 0
286
285
287 def dbg(msg):
286 def dbg(msg):
288 ui.debug(b"%s, line %d: %s\n" % (fn, count, msg))
287 ui.debug(b"%s, line %d: %s\n" % (fn, count, msg))
289
288
290 for nline, line in enumerate(lines):
289 for nline, line in enumerate(lines):
291 count += 1
290 count += 1
292 if not line:
291 if not line:
293 continue
292 continue
294 try:
293 try:
295 (nodehex, name) = line.split(b" ", 1)
294 (nodehex, name) = line.split(b" ", 1)
296 except ValueError:
295 except ValueError:
297 dbg(b"cannot parse entry")
296 dbg(b"cannot parse entry")
298 continue
297 continue
299 name = name.strip()
298 name = name.strip()
300 if recode:
299 if recode:
301 name = recode(name)
300 name = recode(name)
302 try:
301 try:
303 nodebin = bin(nodehex)
302 nodebin = bin(nodehex)
304 except binascii.Error:
303 except binascii.Error:
305 dbg(b"node '%s' is not well formed" % nodehex)
304 dbg(b"node '%s' is not well formed" % nodehex)
306 continue
305 continue
307
306
308 # update filetags
307 # update filetags
309 if calcnodelines:
308 if calcnodelines:
310 # map tag name to a list of line numbers
309 # map tag name to a list of line numbers
311 if name not in hextaglines:
310 if name not in hextaglines:
312 hextaglines[name] = []
311 hextaglines[name] = []
313 hextaglines[name].append([nodehex, nline])
312 hextaglines[name].append([nodehex, nline])
314 continue
313 continue
315 # map tag name to (node, hist)
314 # map tag name to (node, hist)
316 if name not in bintaghist:
315 if name not in bintaghist:
317 bintaghist[name] = []
316 bintaghist[name] = []
318 bintaghist[name].append(nodebin)
317 bintaghist[name].append(nodebin)
319 return bintaghist, hextaglines
318 return bintaghist, hextaglines
320
319
321
320
322 def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False):
321 def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False):
323 """Read tag definitions from a file (or any source of lines).
322 """Read tag definitions from a file (or any source of lines).
324
323
325 Returns a mapping from tag name to (node, hist).
324 Returns a mapping from tag name to (node, hist).
326
325
327 "node" is the node id from the last line read for that name. "hist"
326 "node" is the node id from the last line read for that name. "hist"
328 is the list of node ids previously associated with it (in file order).
327 is the list of node ids previously associated with it (in file order).
329 All node ids are binary, not hex.
328 All node ids are binary, not hex.
330 """
329 """
331 filetags, nodelines = _readtaghist(
330 filetags, nodelines = _readtaghist(
332 ui, repo, lines, fn, recode=recode, calcnodelines=calcnodelines
331 ui, repo, lines, fn, recode=recode, calcnodelines=calcnodelines
333 )
332 )
334 # util.sortdict().__setitem__ is much slower at replacing then inserting
333 # util.sortdict().__setitem__ is much slower at replacing then inserting
335 # new entries. The difference can matter if there are thousands of tags.
334 # new entries. The difference can matter if there are thousands of tags.
336 # Create a new sortdict to avoid the performance penalty.
335 # Create a new sortdict to avoid the performance penalty.
337 newtags = util.sortdict()
336 newtags = util.sortdict()
338 for tag, taghist in filetags.items():
337 for tag, taghist in filetags.items():
339 newtags[tag] = (taghist[-1], taghist[:-1])
338 newtags[tag] = (taghist[-1], taghist[:-1])
340 return newtags
339 return newtags
341
340
342
341
343 def _updatetags(filetags, alltags, tagtype=None, tagtypes=None):
342 def _updatetags(filetags, alltags, tagtype=None, tagtypes=None):
344 """Incorporate the tag info read from one file into dictionnaries
343 """Incorporate the tag info read from one file into dictionnaries
345
344
346 The first one, 'alltags', is a "tagmaps" (see 'findglobaltags' for details).
345 The first one, 'alltags', is a "tagmaps" (see 'findglobaltags' for details).
347
346
348 The second one, 'tagtypes', is optional and will be updated to track the
347 The second one, 'tagtypes', is optional and will be updated to track the
349 "tagtype" of entries in the tagmaps. When set, the 'tagtype' argument also
348 "tagtype" of entries in the tagmaps. When set, the 'tagtype' argument also
350 needs to be set."""
349 needs to be set."""
351 if tagtype is None:
350 if tagtype is None:
352 assert tagtypes is None
351 assert tagtypes is None
353
352
354 for name, nodehist in filetags.items():
353 for name, nodehist in filetags.items():
355 if name not in alltags:
354 if name not in alltags:
356 alltags[name] = nodehist
355 alltags[name] = nodehist
357 if tagtype is not None:
356 if tagtype is not None:
358 tagtypes[name] = tagtype
357 tagtypes[name] = tagtype
359 continue
358 continue
360
359
361 # we prefer alltags[name] if:
360 # we prefer alltags[name] if:
362 # it supersedes us OR
361 # it supersedes us OR
363 # mutual supersedes and it has a higher rank
362 # mutual supersedes and it has a higher rank
364 # otherwise we win because we're tip-most
363 # otherwise we win because we're tip-most
365 anode, ahist = nodehist
364 anode, ahist = nodehist
366 bnode, bhist = alltags[name]
365 bnode, bhist = alltags[name]
367 if (
366 if (
368 bnode != anode
367 bnode != anode
369 and anode in bhist
368 and anode in bhist
370 and (bnode not in ahist or len(bhist) > len(ahist))
369 and (bnode not in ahist or len(bhist) > len(ahist))
371 ):
370 ):
372 anode = bnode
371 anode = bnode
373 elif tagtype is not None:
372 elif tagtype is not None:
374 tagtypes[name] = tagtype
373 tagtypes[name] = tagtype
375 ahist.extend([n for n in bhist if n not in ahist])
374 ahist.extend([n for n in bhist if n not in ahist])
376 alltags[name] = anode, ahist
375 alltags[name] = anode, ahist
377
376
378
377
379 def _filename(repo):
378 def _filename(repo):
380 """name of a tagcache file for a given repo or repoview"""
379 """name of a tagcache file for a given repo or repoview"""
381 filename = b'tags2'
380 filename = b'tags2'
382 if repo.filtername:
381 if repo.filtername:
383 filename = b'%s-%s' % (filename, repo.filtername)
382 filename = b'%s-%s' % (filename, repo.filtername)
384 return filename
383 return filename
385
384
386
385
387 def _readtagcache(ui, repo):
386 def _readtagcache(ui, repo):
388 """Read the tag cache.
387 """Read the tag cache.
389
388
390 Returns a tuple (heads, fnodes, validinfo, cachetags, shouldwrite).
389 Returns a tuple (heads, fnodes, validinfo, cachetags, shouldwrite).
391
390
392 If the cache is completely up-to-date, "cachetags" is a dict of the
391 If the cache is completely up-to-date, "cachetags" is a dict of the
393 form returned by _readtags() and "heads", "fnodes", and "validinfo" are
392 form returned by _readtags() and "heads", "fnodes", and "validinfo" are
394 None and "shouldwrite" is False.
393 None and "shouldwrite" is False.
395
394
396 If the cache is not up to date, "cachetags" is None. "heads" is a list
395 If the cache is not up to date, "cachetags" is None. "heads" is a list
397 of all heads currently in the repository, ordered from tip to oldest.
396 of all heads currently in the repository, ordered from tip to oldest.
398 "validinfo" is a tuple describing cache validation info. This is used
397 "validinfo" is a tuple describing cache validation info. This is used
399 when writing the tags cache. "fnodes" is a mapping from head to .hgtags
398 when writing the tags cache. "fnodes" is a mapping from head to .hgtags
400 filenode. "shouldwrite" is True.
399 filenode. "shouldwrite" is True.
401
400
402 If the cache is not up to date, the caller is responsible for reading tag
401 If the cache is not up to date, the caller is responsible for reading tag
403 info from each returned head. (See findglobaltags().)
402 info from each returned head. (See findglobaltags().)
404 """
403 """
405 try:
404 try:
406 cachefile = repo.cachevfs(_filename(repo), b'r')
405 cachefile = repo.cachevfs(_filename(repo), b'r')
407 # force reading the file for static-http
406 # force reading the file for static-http
408 cachelines = iter(cachefile)
407 cachelines = iter(cachefile)
409 except IOError:
408 except IOError:
410 cachefile = None
409 cachefile = None
411
410
412 cacherev = None
411 cacherev = None
413 cachenode = None
412 cachenode = None
414 cachehash = None
413 cachehash = None
415 if cachefile:
414 if cachefile:
416 try:
415 try:
417 validline = next(cachelines)
416 validline = next(cachelines)
418 validline = validline.split()
417 validline = validline.split()
419 cacherev = int(validline[0])
418 cacherev = int(validline[0])
420 cachenode = bin(validline[1])
419 cachenode = bin(validline[1])
421 if len(validline) > 2:
420 if len(validline) > 2:
422 cachehash = bin(validline[2])
421 cachehash = bin(validline[2])
423 except Exception:
422 except Exception:
424 # corruption of the cache, just recompute it.
423 # corruption of the cache, just recompute it.
425 pass
424 pass
426
425
427 tipnode = repo.changelog.tip()
426 tipnode = repo.changelog.tip()
428 tiprev = len(repo.changelog) - 1
427 tiprev = len(repo.changelog) - 1
429
428
430 # Case 1 (common): tip is the same, so nothing has changed.
429 # Case 1 (common): tip is the same, so nothing has changed.
431 # (Unchanged tip trivially means no changesets have been added.
430 # (Unchanged tip trivially means no changesets have been added.
432 # But, thanks to localrepository.destroyed(), it also means none
431 # But, thanks to localrepository.destroyed(), it also means none
433 # have been destroyed by strip or rollback.)
432 # have been destroyed by strip or rollback.)
434 if (
433 if (
435 cacherev == tiprev
434 cacherev == tiprev
436 and cachenode == tipnode
435 and cachenode == tipnode
437 and cachehash == scmutil.filteredhash(repo, tiprev)
436 and cachehash == scmutil.filteredhash(repo, tiprev)
438 ):
437 ):
439 tags = _readtags(ui, repo, cachelines, cachefile.name)
438 tags = _readtags(ui, repo, cachelines, cachefile.name)
440 cachefile.close()
439 cachefile.close()
441 return (None, None, None, tags, False)
440 return (None, None, None, tags, False)
442 if cachefile:
441 if cachefile:
443 cachefile.close() # ignore rest of file
442 cachefile.close() # ignore rest of file
444
443
445 valid = (tiprev, tipnode, scmutil.filteredhash(repo, tiprev))
444 valid = (tiprev, tipnode, scmutil.filteredhash(repo, tiprev))
446
445
447 repoheads = repo.heads()
446 repoheads = repo.heads()
448 # Case 2 (uncommon): empty repo; get out quickly and don't bother
447 # Case 2 (uncommon): empty repo; get out quickly and don't bother
449 # writing an empty cache.
448 # writing an empty cache.
450 if repoheads == [repo.nullid]:
449 if repoheads == [repo.nullid]:
451 return ([], {}, valid, {}, False)
450 return ([], {}, valid, {}, False)
452
451
453 # Case 3 (uncommon): cache file missing or empty.
452 # Case 3 (uncommon): cache file missing or empty.
454
453
455 # Case 4 (uncommon): tip rev decreased. This should only happen
454 # Case 4 (uncommon): tip rev decreased. This should only happen
456 # when we're called from localrepository.destroyed(). Refresh the
455 # when we're called from localrepository.destroyed(). Refresh the
457 # cache so future invocations will not see disappeared heads in the
456 # cache so future invocations will not see disappeared heads in the
458 # cache.
457 # cache.
459
458
460 # Case 5 (common): tip has changed, so we've added/replaced heads.
459 # Case 5 (common): tip has changed, so we've added/replaced heads.
461
460
462 # As it happens, the code to handle cases 3, 4, 5 is the same.
461 # As it happens, the code to handle cases 3, 4, 5 is the same.
463
462
464 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
463 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
465 # exposed".
464 # exposed".
466 if not len(repo.file(b'.hgtags')):
465 if not len(repo.file(b'.hgtags')):
467 # No tags have ever been committed, so we can avoid a
466 # No tags have ever been committed, so we can avoid a
468 # potentially expensive search.
467 # potentially expensive search.
469 return ([], {}, valid, None, True)
468 return ([], {}, valid, None, True)
470
469
471 # Now we have to lookup the .hgtags filenode for every new head.
470 # Now we have to lookup the .hgtags filenode for every new head.
472 # This is the most expensive part of finding tags, so performance
471 # This is the most expensive part of finding tags, so performance
473 # depends primarily on the size of newheads. Worst case: no cache
472 # depends primarily on the size of newheads. Worst case: no cache
474 # file, so newheads == repoheads.
473 # file, so newheads == repoheads.
475 # Reversed order helps the cache ('repoheads' is in descending order)
474 # Reversed order helps the cache ('repoheads' is in descending order)
476 cachefnode = _getfnodes(ui, repo, reversed(repoheads))
475 cachefnode = _getfnodes(ui, repo, reversed(repoheads))
477
476
478 # Caller has to iterate over all heads, but can use the filenodes in
477 # Caller has to iterate over all heads, but can use the filenodes in
479 # cachefnode to get to each .hgtags revision quickly.
478 # cachefnode to get to each .hgtags revision quickly.
480 return (repoheads, cachefnode, valid, None, True)
479 return (repoheads, cachefnode, valid, None, True)
481
480
482
481
483 def _getfnodes(ui, repo, nodes):
482 def _getfnodes(ui, repo, nodes):
484 """return .hgtags fnodes for a list of changeset nodes
483 """return .hgtags fnodes for a list of changeset nodes
485
484
486 Return value is a {node: fnode} mapping. There will be no entry for nodes
485 Return value is a {node: fnode} mapping. There will be no entry for nodes
487 without a '.hgtags' file.
486 without a '.hgtags' file.
488 """
487 """
489 starttime = util.timer()
488 starttime = util.timer()
490 fnodescache = hgtagsfnodescache(repo.unfiltered())
489 fnodescache = hgtagsfnodescache(repo.unfiltered())
491 cachefnode = {}
490 cachefnode = {}
492 validated_fnodes = set()
491 validated_fnodes = set()
493 unknown_entries = set()
492 unknown_entries = set()
494
493
495 flog = None
494 flog = None
496 for node in nodes:
495 for node in nodes:
497 fnode = fnodescache.getfnode(node)
496 fnode = fnodescache.getfnode(node)
498 if fnode != repo.nullid:
497 if fnode != repo.nullid:
499 if fnode not in validated_fnodes:
498 if fnode not in validated_fnodes:
500 if flog is None:
499 if flog is None:
501 flog = repo.file(b'.hgtags')
500 flog = repo.file(b'.hgtags')
502 if flog.hasnode(fnode):
501 if flog.hasnode(fnode):
503 validated_fnodes.add(fnode)
502 validated_fnodes.add(fnode)
504 else:
503 else:
505 unknown_entries.add(node)
504 unknown_entries.add(node)
506 cachefnode[node] = fnode
505 cachefnode[node] = fnode
507
506
508 if unknown_entries:
507 if unknown_entries:
509 fixed_nodemap = fnodescache.refresh_invalid_nodes(unknown_entries)
508 fixed_nodemap = fnodescache.refresh_invalid_nodes(unknown_entries)
510 for node, fnode in fixed_nodemap.items():
509 for node, fnode in fixed_nodemap.items():
511 if fnode != repo.nullid:
510 if fnode != repo.nullid:
512 cachefnode[node] = fnode
511 cachefnode[node] = fnode
513
512
514 fnodescache.write()
513 fnodescache.write()
515
514
516 duration = util.timer() - starttime
515 duration = util.timer() - starttime
517 ui.log(
516 ui.log(
518 b'tagscache',
517 b'tagscache',
519 b'%d/%d cache hits/lookups in %0.4f seconds\n',
518 b'%d/%d cache hits/lookups in %0.4f seconds\n',
520 fnodescache.hitcount,
519 fnodescache.hitcount,
521 fnodescache.lookupcount,
520 fnodescache.lookupcount,
522 duration,
521 duration,
523 )
522 )
524 return cachefnode
523 return cachefnode
525
524
526
525
527 def _writetagcache(ui, repo, valid, cachetags):
526 def _writetagcache(ui, repo, valid, cachetags):
528 filename = _filename(repo)
527 filename = _filename(repo)
529 try:
528 try:
530 cachefile = repo.cachevfs(filename, b'w', atomictemp=True)
529 cachefile = repo.cachevfs(filename, b'w', atomictemp=True)
531 except (OSError, IOError):
530 except (OSError, IOError):
532 return
531 return
533
532
534 ui.log(
533 ui.log(
535 b'tagscache',
534 b'tagscache',
536 b'writing .hg/cache/%s with %d tags\n',
535 b'writing .hg/cache/%s with %d tags\n',
537 filename,
536 filename,
538 len(cachetags),
537 len(cachetags),
539 )
538 )
540
539
541 if valid[2]:
540 if valid[2]:
542 cachefile.write(
541 cachefile.write(
543 b'%d %s %s\n' % (valid[0], hex(valid[1]), hex(valid[2]))
542 b'%d %s %s\n' % (valid[0], hex(valid[1]), hex(valid[2]))
544 )
543 )
545 else:
544 else:
546 cachefile.write(b'%d %s\n' % (valid[0], hex(valid[1])))
545 cachefile.write(b'%d %s\n' % (valid[0], hex(valid[1])))
547
546
548 # Tag names in the cache are in UTF-8 -- which is the whole reason
547 # Tag names in the cache are in UTF-8 -- which is the whole reason
549 # we keep them in UTF-8 throughout this module. If we converted
548 # we keep them in UTF-8 throughout this module. If we converted
550 # them local encoding on input, we would lose info writing them to
549 # them local encoding on input, we would lose info writing them to
551 # the cache.
550 # the cache.
552 for (name, (node, hist)) in sorted(cachetags.items()):
551 for (name, (node, hist)) in sorted(cachetags.items()):
553 for n in hist:
552 for n in hist:
554 cachefile.write(b"%s %s\n" % (hex(n), name))
553 cachefile.write(b"%s %s\n" % (hex(n), name))
555 cachefile.write(b"%s %s\n" % (hex(node), name))
554 cachefile.write(b"%s %s\n" % (hex(node), name))
556
555
557 try:
556 try:
558 cachefile.close()
557 cachefile.close()
559 except (OSError, IOError):
558 except (OSError, IOError):
560 pass
559 pass
561
560
562
561
563 def tag(repo, names, node, message, local, user, date, editor=False):
562 def tag(repo, names, node, message, local, user, date, editor=False):
564 """tag a revision with one or more symbolic names.
563 """tag a revision with one or more symbolic names.
565
564
566 names is a list of strings or, when adding a single tag, names may be a
565 names is a list of strings or, when adding a single tag, names may be a
567 string.
566 string.
568
567
569 if local is True, the tags are stored in a per-repository file.
568 if local is True, the tags are stored in a per-repository file.
570 otherwise, they are stored in the .hgtags file, and a new
569 otherwise, they are stored in the .hgtags file, and a new
571 changeset is committed with the change.
570 changeset is committed with the change.
572
571
573 keyword arguments:
572 keyword arguments:
574
573
575 local: whether to store tags in non-version-controlled file
574 local: whether to store tags in non-version-controlled file
576 (default False)
575 (default False)
577
576
578 message: commit message to use if committing
577 message: commit message to use if committing
579
578
580 user: name of user to use if committing
579 user: name of user to use if committing
581
580
582 date: date tuple to use if committing"""
581 date: date tuple to use if committing"""
583
582
584 if not local:
583 if not local:
585 m = matchmod.exact([b'.hgtags'])
584 m = matchmod.exact([b'.hgtags'])
586 st = repo.status(match=m, unknown=True, ignored=True)
585 st = repo.status(match=m, unknown=True, ignored=True)
587 if any(
586 if any(
588 (
587 (
589 st.modified,
588 st.modified,
590 st.added,
589 st.added,
591 st.removed,
590 st.removed,
592 st.deleted,
591 st.deleted,
593 st.unknown,
592 st.unknown,
594 st.ignored,
593 st.ignored,
595 )
594 )
596 ):
595 ):
597 raise error.Abort(
596 raise error.Abort(
598 _(b'working copy of .hgtags is changed'),
597 _(b'working copy of .hgtags is changed'),
599 hint=_(b'please commit .hgtags manually'),
598 hint=_(b'please commit .hgtags manually'),
600 )
599 )
601
600
602 with repo.wlock():
601 with repo.wlock():
603 repo.tags() # instantiate the cache
602 repo.tags() # instantiate the cache
604 _tag(repo, names, node, message, local, user, date, editor=editor)
603 _tag(repo, names, node, message, local, user, date, editor=editor)
605
604
606
605
607 def _tag(
606 def _tag(
608 repo, names, node, message, local, user, date, extra=None, editor=False
607 repo, names, node, message, local, user, date, extra=None, editor=False
609 ):
608 ):
610 if isinstance(names, bytes):
609 if isinstance(names, bytes):
611 names = (names,)
610 names = (names,)
612
611
613 branches = repo.branchmap()
612 branches = repo.branchmap()
614 for name in names:
613 for name in names:
615 repo.hook(b'pretag', throw=True, node=hex(node), tag=name, local=local)
614 repo.hook(b'pretag', throw=True, node=hex(node), tag=name, local=local)
616 if name in branches:
615 if name in branches:
617 repo.ui.warn(
616 repo.ui.warn(
618 _(b"warning: tag %s conflicts with existing branch name\n")
617 _(b"warning: tag %s conflicts with existing branch name\n")
619 % name
618 % name
620 )
619 )
621
620
622 def writetags(fp, names, munge, prevtags):
621 def writetags(fp, names, munge, prevtags):
623 fp.seek(0, io.SEEK_END)
622 fp.seek(0, io.SEEK_END)
624 if prevtags and not prevtags.endswith(b'\n'):
623 if prevtags and not prevtags.endswith(b'\n'):
625 fp.write(b'\n')
624 fp.write(b'\n')
626 for name in names:
625 for name in names:
627 if munge:
626 if munge:
628 m = munge(name)
627 m = munge(name)
629 else:
628 else:
630 m = name
629 m = name
631
630
632 if repo._tagscache.tagtypes and name in repo._tagscache.tagtypes:
631 if repo._tagscache.tagtypes and name in repo._tagscache.tagtypes:
633 old = repo.tags().get(name, repo.nullid)
632 old = repo.tags().get(name, repo.nullid)
634 fp.write(b'%s %s\n' % (hex(old), m))
633 fp.write(b'%s %s\n' % (hex(old), m))
635 fp.write(b'%s %s\n' % (hex(node), m))
634 fp.write(b'%s %s\n' % (hex(node), m))
636 fp.close()
635 fp.close()
637
636
638 prevtags = b''
637 prevtags = b''
639 if local:
638 if local:
640 try:
639 try:
641 fp = repo.vfs(b'localtags', b'r+')
640 fp = repo.vfs(b'localtags', b'r+')
642 except IOError:
641 except IOError:
643 fp = repo.vfs(b'localtags', b'a')
642 fp = repo.vfs(b'localtags', b'a')
644 else:
643 else:
645 prevtags = fp.read()
644 prevtags = fp.read()
646
645
647 # local tags are stored in the current charset
646 # local tags are stored in the current charset
648 writetags(fp, names, None, prevtags)
647 writetags(fp, names, None, prevtags)
649 for name in names:
648 for name in names:
650 repo.hook(b'tag', node=hex(node), tag=name, local=local)
649 repo.hook(b'tag', node=hex(node), tag=name, local=local)
651 return
650 return
652
651
653 try:
652 try:
654 fp = repo.wvfs(b'.hgtags', b'rb+')
653 fp = repo.wvfs(b'.hgtags', b'rb+')
655 except FileNotFoundError:
654 except FileNotFoundError:
656 fp = repo.wvfs(b'.hgtags', b'ab')
655 fp = repo.wvfs(b'.hgtags', b'ab')
657 else:
656 else:
658 prevtags = fp.read()
657 prevtags = fp.read()
659
658
660 # committed tags are stored in UTF-8
659 # committed tags are stored in UTF-8
661 writetags(fp, names, encoding.fromlocal, prevtags)
660 writetags(fp, names, encoding.fromlocal, prevtags)
662
661
663 fp.close()
662 fp.close()
664
663
665 repo.invalidatecaches()
664 repo.invalidatecaches()
666
665
667 with repo.dirstate.changing_files(repo):
666 with repo.dirstate.changing_files(repo):
668 if b'.hgtags' not in repo.dirstate:
667 if b'.hgtags' not in repo.dirstate:
669 repo[None].add([b'.hgtags'])
668 repo[None].add([b'.hgtags'])
670
669
671 m = matchmod.exact([b'.hgtags'])
670 m = matchmod.exact([b'.hgtags'])
672 tagnode = repo.commit(
671 tagnode = repo.commit(
673 message, user, date, extra=extra, match=m, editor=editor
672 message, user, date, extra=extra, match=m, editor=editor
674 )
673 )
675
674
676 for name in names:
675 for name in names:
677 repo.hook(b'tag', node=hex(node), tag=name, local=local)
676 repo.hook(b'tag', node=hex(node), tag=name, local=local)
678
677
679 return tagnode
678 return tagnode
680
679
681
680
682 _fnodescachefile = b'hgtagsfnodes1'
681 _fnodescachefile = b'hgtagsfnodes1'
683 _fnodesrecsize = 4 + 20 # changeset fragment + filenode
682 _fnodesrecsize = 4 + 20 # changeset fragment + filenode
684 _fnodesmissingrec = b'\xff' * 24
683 _fnodesmissingrec = b'\xff' * 24
685
684
686
685
687 class hgtagsfnodescache:
686 class hgtagsfnodescache:
688 """Persistent cache mapping revisions to .hgtags filenodes.
687 """Persistent cache mapping revisions to .hgtags filenodes.
689
688
690 The cache is an array of records. Each item in the array corresponds to
689 The cache is an array of records. Each item in the array corresponds to
691 a changelog revision. Values in the array contain the first 4 bytes of
690 a changelog revision. Values in the array contain the first 4 bytes of
692 the node hash and the 20 bytes .hgtags filenode for that revision.
691 the node hash and the 20 bytes .hgtags filenode for that revision.
693
692
694 The first 4 bytes are present as a form of verification. Repository
693 The first 4 bytes are present as a form of verification. Repository
695 stripping and rewriting may change the node at a numeric revision in the
694 stripping and rewriting may change the node at a numeric revision in the
696 changelog. The changeset fragment serves as a verifier to detect
695 changelog. The changeset fragment serves as a verifier to detect
697 rewriting. This logic is shared with the rev branch cache (see
696 rewriting. This logic is shared with the rev branch cache (see
698 branchmap.py).
697 branchmap.py).
699
698
700 The instance holds in memory the full cache content but entries are
699 The instance holds in memory the full cache content but entries are
701 only parsed on read.
700 only parsed on read.
702
701
703 Instances behave like lists. ``c[i]`` works where i is a rev or
702 Instances behave like lists. ``c[i]`` works where i is a rev or
704 changeset node. Missing indexes are populated automatically on access.
703 changeset node. Missing indexes are populated automatically on access.
705 """
704 """
706
705
707 def __init__(self, repo):
706 def __init__(self, repo):
708 assert repo.filtername is None
707 assert repo.filtername is None
709
708
710 self._repo = repo
709 self._repo = repo
711
710
712 # Only for reporting purposes.
711 # Only for reporting purposes.
713 self.lookupcount = 0
712 self.lookupcount = 0
714 self.hitcount = 0
713 self.hitcount = 0
715
714
716 try:
715 try:
717 data = repo.cachevfs.read(_fnodescachefile)
716 data = repo.cachevfs.read(_fnodescachefile)
718 except (OSError, IOError):
717 except (OSError, IOError):
719 data = b""
718 data = b""
720 self._raw = bytearray(data)
719 self._raw = bytearray(data)
721
720
722 # The end state of self._raw is an array that is of the exact length
721 # The end state of self._raw is an array that is of the exact length
723 # required to hold a record for every revision in the repository.
722 # required to hold a record for every revision in the repository.
724 # We truncate or extend the array as necessary. self._dirtyoffset is
723 # We truncate or extend the array as necessary. self._dirtyoffset is
725 # defined to be the start offset at which we need to write the output
724 # defined to be the start offset at which we need to write the output
726 # file. This offset is also adjusted when new entries are calculated
725 # file. This offset is also adjusted when new entries are calculated
727 # for array members.
726 # for array members.
728 cllen = len(repo.changelog)
727 cllen = len(repo.changelog)
729 wantedlen = cllen * _fnodesrecsize
728 wantedlen = cllen * _fnodesrecsize
730 rawlen = len(self._raw)
729 rawlen = len(self._raw)
731
730
732 self._dirtyoffset = None
731 self._dirtyoffset = None
733
732
734 rawlentokeep = min(
733 rawlentokeep = min(
735 wantedlen, (rawlen // _fnodesrecsize) * _fnodesrecsize
734 wantedlen, (rawlen // _fnodesrecsize) * _fnodesrecsize
736 )
735 )
737 if rawlen > rawlentokeep:
736 if rawlen > rawlentokeep:
738 # There's no easy way to truncate array instances. This seems
737 # There's no easy way to truncate array instances. This seems
739 # slightly less evil than copying a potentially large array slice.
738 # slightly less evil than copying a potentially large array slice.
740 for i in range(rawlen - rawlentokeep):
739 for i in range(rawlen - rawlentokeep):
741 self._raw.pop()
740 self._raw.pop()
742 rawlen = len(self._raw)
741 rawlen = len(self._raw)
743 self._dirtyoffset = rawlen
742 self._dirtyoffset = rawlen
744 if rawlen < wantedlen:
743 if rawlen < wantedlen:
745 if self._dirtyoffset is None:
744 if self._dirtyoffset is None:
746 self._dirtyoffset = rawlen
745 self._dirtyoffset = rawlen
747 # TODO: zero fill entire record, because it's invalid not missing?
746 # TODO: zero fill entire record, because it's invalid not missing?
748 self._raw.extend(b'\xff' * (wantedlen - rawlen))
747 self._raw.extend(b'\xff' * (wantedlen - rawlen))
749
748
750 def getfnode(self, node, computemissing=True):
749 def getfnode(self, node, computemissing=True):
751 """Obtain the filenode of the .hgtags file at a specified revision.
750 """Obtain the filenode of the .hgtags file at a specified revision.
752
751
753 If the value is in the cache, the entry will be validated and returned.
752 If the value is in the cache, the entry will be validated and returned.
754 Otherwise, the filenode will be computed and returned unless
753 Otherwise, the filenode will be computed and returned unless
755 "computemissing" is False. In that case, None will be returned if
754 "computemissing" is False. In that case, None will be returned if
756 the entry is missing or False if the entry is invalid without
755 the entry is missing or False if the entry is invalid without
757 any potentially expensive computation being performed.
756 any potentially expensive computation being performed.
758
757
759 If an .hgtags does not exist at the specified revision, nullid is
758 If an .hgtags does not exist at the specified revision, nullid is
760 returned.
759 returned.
761 """
760 """
762 if node == self._repo.nullid:
761 if node == self._repo.nullid:
763 return node
762 return node
764
763
765 rev = self._repo.changelog.rev(node)
764 rev = self._repo.changelog.rev(node)
766
765
767 self.lookupcount += 1
766 self.lookupcount += 1
768
767
769 offset = rev * _fnodesrecsize
768 offset = rev * _fnodesrecsize
770 record = b'%s' % self._raw[offset : offset + _fnodesrecsize]
769 record = b'%s' % self._raw[offset : offset + _fnodesrecsize]
771 properprefix = node[0:4]
770 properprefix = node[0:4]
772
771
773 # Validate and return existing entry.
772 # Validate and return existing entry.
774 if record != _fnodesmissingrec and len(record) == _fnodesrecsize:
773 if record != _fnodesmissingrec and len(record) == _fnodesrecsize:
775 fileprefix = record[0:4]
774 fileprefix = record[0:4]
776
775
777 if fileprefix == properprefix:
776 if fileprefix == properprefix:
778 self.hitcount += 1
777 self.hitcount += 1
779 return record[4:]
778 return record[4:]
780
779
781 # Fall through.
780 # Fall through.
782
781
783 # If we get here, the entry is either missing or invalid.
782 # If we get here, the entry is either missing or invalid.
784
783
785 if not computemissing:
784 if not computemissing:
786 if record != _fnodesmissingrec:
785 if record != _fnodesmissingrec:
787 return False
786 return False
788 return None
787 return None
789
788
790 fnode = self._computefnode(node)
789 fnode = self._computefnode(node)
791 self._writeentry(offset, properprefix, fnode)
790 self._writeentry(offset, properprefix, fnode)
792 return fnode
791 return fnode
793
792
794 def _computefnode(self, node):
793 def _computefnode(self, node):
795 """Finds the tag filenode for a node which is missing or invalid
794 """Finds the tag filenode for a node which is missing or invalid
796 in cache"""
795 in cache"""
797 ctx = self._repo[node]
796 ctx = self._repo[node]
798 rev = ctx.rev()
797 rev = ctx.rev()
799 fnode = None
798 fnode = None
800 cl = self._repo.changelog
799 cl = self._repo.changelog
801 p1rev, p2rev = cl._uncheckedparentrevs(rev)
800 p1rev, p2rev = cl._uncheckedparentrevs(rev)
802 p1node = cl.node(p1rev)
801 p1node = cl.node(p1rev)
803 p1fnode = self.getfnode(p1node, computemissing=False)
802 p1fnode = self.getfnode(p1node, computemissing=False)
804 if p2rev != nullrev:
803 if p2rev != nullrev:
805 # There is some no-merge changeset where p1 is null and p2 is set
804 # There is some no-merge changeset where p1 is null and p2 is set
806 # Processing them as merge is just slower, but still gives a good
805 # Processing them as merge is just slower, but still gives a good
807 # result.
806 # result.
808 p2node = cl.node(p2rev)
807 p2node = cl.node(p2rev)
809 p2fnode = self.getfnode(p2node, computemissing=False)
808 p2fnode = self.getfnode(p2node, computemissing=False)
810 if p1fnode != p2fnode:
809 if p1fnode != p2fnode:
811 # we cannot rely on readfast because we don't know against what
810 # we cannot rely on readfast because we don't know against what
812 # parent the readfast delta is computed
811 # parent the readfast delta is computed
813 p1fnode = None
812 p1fnode = None
814 if p1fnode:
813 if p1fnode:
815 mctx = ctx.manifestctx()
814 mctx = ctx.manifestctx()
816 fnode = mctx.readfast().get(b'.hgtags')
815 fnode = mctx.readfast().get(b'.hgtags')
817 if fnode is None:
816 if fnode is None:
818 fnode = p1fnode
817 fnode = p1fnode
819 if fnode is None:
818 if fnode is None:
820 # Populate missing entry.
819 # Populate missing entry.
821 try:
820 try:
822 fnode = ctx.filenode(b'.hgtags')
821 fnode = ctx.filenode(b'.hgtags')
823 except error.LookupError:
822 except error.LookupError:
824 # No .hgtags file on this revision.
823 # No .hgtags file on this revision.
825 fnode = self._repo.nullid
824 fnode = self._repo.nullid
826 return fnode
825 return fnode
827
826
828 def setfnode(self, node, fnode):
827 def setfnode(self, node, fnode):
829 """Set the .hgtags filenode for a given changeset."""
828 """Set the .hgtags filenode for a given changeset."""
830 assert len(fnode) == 20
829 assert len(fnode) == 20
831 ctx = self._repo[node]
830 ctx = self._repo[node]
832
831
833 # Do a lookup first to avoid writing if nothing has changed.
832 # Do a lookup first to avoid writing if nothing has changed.
834 if self.getfnode(ctx.node(), computemissing=False) == fnode:
833 if self.getfnode(ctx.node(), computemissing=False) == fnode:
835 return
834 return
836
835
837 self._writeentry(ctx.rev() * _fnodesrecsize, node[0:4], fnode)
836 self._writeentry(ctx.rev() * _fnodesrecsize, node[0:4], fnode)
838
837
839 def refresh_invalid_nodes(self, nodes):
838 def refresh_invalid_nodes(self, nodes):
840 """recomputes file nodes for a given set of nodes which has unknown
839 """recomputes file nodes for a given set of nodes which has unknown
841 filenodes for them in the cache
840 filenodes for them in the cache
842 Also updates the in-memory cache with the correct filenode.
841 Also updates the in-memory cache with the correct filenode.
843 Caller needs to take care about calling `.write()` so that updates are
842 Caller needs to take care about calling `.write()` so that updates are
844 persisted.
843 persisted.
845 Returns a map {node: recomputed fnode}
844 Returns a map {node: recomputed fnode}
846 """
845 """
847 fixed_nodemap = {}
846 fixed_nodemap = {}
848 for node in nodes:
847 for node in nodes:
849 fnode = self._computefnode(node)
848 fnode = self._computefnode(node)
850 fixed_nodemap[node] = fnode
849 fixed_nodemap[node] = fnode
851 self.setfnode(node, fnode)
850 self.setfnode(node, fnode)
852 return fixed_nodemap
851 return fixed_nodemap
853
852
854 def _writeentry(self, offset, prefix, fnode):
853 def _writeentry(self, offset, prefix, fnode):
855 # Slices on array instances only accept other array.
854 # Slices on array instances only accept other array.
856 entry = bytearray(prefix + fnode)
855 entry = bytearray(prefix + fnode)
857 self._raw[offset : offset + _fnodesrecsize] = entry
856 self._raw[offset : offset + _fnodesrecsize] = entry
858 # self._dirtyoffset could be None.
857 # self._dirtyoffset could be None.
859 self._dirtyoffset = min(self._dirtyoffset or 0, offset or 0)
858 self._dirtyoffset = min(self._dirtyoffset or 0, offset or 0)
860
859
861 def write(self):
860 def write(self):
862 """Perform all necessary writes to cache file.
861 """Perform all necessary writes to cache file.
863
862
864 This may no-op if no writes are needed or if a write lock could
863 This may no-op if no writes are needed or if a write lock could
865 not be obtained.
864 not be obtained.
866 """
865 """
867 if self._dirtyoffset is None:
866 if self._dirtyoffset is None:
868 return
867 return
869
868
870 data = self._raw[self._dirtyoffset :]
869 data = self._raw[self._dirtyoffset :]
871 if not data:
870 if not data:
872 return
871 return
873
872
874 repo = self._repo
873 repo = self._repo
875
874
876 try:
875 try:
877 lock = repo.lock(wait=False)
876 lock = repo.lock(wait=False)
878 except error.LockError:
877 except error.LockError:
879 repo.ui.log(
878 repo.ui.log(
880 b'tagscache',
879 b'tagscache',
881 b'not writing .hg/cache/%s because '
880 b'not writing .hg/cache/%s because '
882 b'lock cannot be acquired\n' % _fnodescachefile,
881 b'lock cannot be acquired\n' % _fnodescachefile,
883 )
882 )
884 return
883 return
885
884
886 try:
885 try:
887 f = repo.cachevfs.open(_fnodescachefile, b'ab')
886 f = repo.cachevfs.open(_fnodescachefile, b'ab')
888 try:
887 try:
889 # if the file has been truncated
888 # if the file has been truncated
890 actualoffset = f.tell()
889 actualoffset = f.tell()
891 if actualoffset < self._dirtyoffset:
890 if actualoffset < self._dirtyoffset:
892 self._dirtyoffset = actualoffset
891 self._dirtyoffset = actualoffset
893 data = self._raw[self._dirtyoffset :]
892 data = self._raw[self._dirtyoffset :]
894 f.seek(self._dirtyoffset)
893 f.seek(self._dirtyoffset)
895 f.truncate()
894 f.truncate()
896 repo.ui.log(
895 repo.ui.log(
897 b'tagscache',
896 b'tagscache',
898 b'writing %d bytes to cache/%s\n'
897 b'writing %d bytes to cache/%s\n'
899 % (len(data), _fnodescachefile),
898 % (len(data), _fnodescachefile),
900 )
899 )
901 f.write(data)
900 f.write(data)
902 self._dirtyoffset = None
901 self._dirtyoffset = None
903 finally:
902 finally:
904 f.close()
903 f.close()
905 except (IOError, OSError) as inst:
904 except (IOError, OSError) as inst:
906 repo.ui.log(
905 repo.ui.log(
907 b'tagscache',
906 b'tagscache',
908 b"couldn't write cache/%s: %s\n"
907 b"couldn't write cache/%s: %s\n"
909 % (_fnodescachefile, stringutil.forcebytestr(inst)),
908 % (_fnodescachefile, stringutil.forcebytestr(inst)),
910 )
909 )
911 finally:
910 finally:
912 lock.release()
911 lock.release()
912
913
914 def clear_cache_on_disk(repo):
915 """function used by the perf extension to "tags" cache"""
916 repo.cachevfs.tryunlink(_filename(repo))
917
918
919 def clear_cache_fnodes(repo):
920 """function used by the perf extension to clear "file node cache"""
921 repo.cachevfs.tryunlink(_filename(repo))
922
923
924 def forget_fnodes(repo, revs):
925 """function used by the perf extension to prune some entries from the fnodes
926 cache"""
927 missing_1 = b'\xff' * 4
928 missing_2 = b'\xff' * 20
929 cache = hgtagsfnodescache(repo.unfiltered())
930 for r in revs:
931 cache._writeentry(r * _fnodesrecsize, missing_1, missing_2)
932 cache.write()
@@ -1,1122 +1,1121 b''
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 from ..i18n import _
9 from ..i18n import _
10 from .. import (
10 from .. import (
11 error,
11 error,
12 localrepo,
12 localrepo,
13 pycompat,
13 pycompat,
14 requirements,
14 requirements,
15 revlog,
15 revlog,
16 util,
16 util,
17 )
17 )
18
18
19 from ..utils import compression
19 from ..utils import compression
20
20
21 if pycompat.TYPE_CHECKING:
21 if pycompat.TYPE_CHECKING:
22 from typing import (
22 from typing import (
23 List,
23 List,
24 Type,
24 Type,
25 )
25 )
26
26
27
27
28 # list of requirements that request a clone of all revlog if added/removed
28 # list of requirements that request a clone of all revlog if added/removed
29 RECLONES_REQUIREMENTS = {
29 RECLONES_REQUIREMENTS = {
30 requirements.GENERALDELTA_REQUIREMENT,
30 requirements.GENERALDELTA_REQUIREMENT,
31 requirements.SPARSEREVLOG_REQUIREMENT,
31 requirements.SPARSEREVLOG_REQUIREMENT,
32 requirements.REVLOGV2_REQUIREMENT,
32 requirements.REVLOGV2_REQUIREMENT,
33 requirements.CHANGELOGV2_REQUIREMENT,
33 requirements.CHANGELOGV2_REQUIREMENT,
34 }
34 }
35
35
36
36
37 def preservedrequirements(repo):
37 def preservedrequirements(repo):
38 preserved = {
38 preserved = {
39 requirements.SHARED_REQUIREMENT,
39 requirements.SHARED_REQUIREMENT,
40 requirements.NARROW_REQUIREMENT,
40 requirements.NARROW_REQUIREMENT,
41 }
41 }
42 return preserved & repo.requirements
42 return preserved & repo.requirements
43
43
44
44
45 FORMAT_VARIANT = b'deficiency'
45 FORMAT_VARIANT = b'deficiency'
46 OPTIMISATION = b'optimization'
46 OPTIMISATION = b'optimization'
47
47
48
48
49 class improvement:
49 class improvement:
50 """Represents an improvement that can be made as part of an upgrade."""
50 """Represents an improvement that can be made as part of an upgrade."""
51
51
52 ### The following attributes should be defined for each subclass:
52 ### The following attributes should be defined for each subclass:
53
53
54 # Either ``FORMAT_VARIANT`` or ``OPTIMISATION``.
54 # Either ``FORMAT_VARIANT`` or ``OPTIMISATION``.
55 # A format variant is where we change the storage format. Not all format
55 # A format variant is where we change the storage format. Not all format
56 # variant changes are an obvious problem.
56 # variant changes are an obvious problem.
57 # An optimization is an action (sometimes optional) that
57 # An optimization is an action (sometimes optional) that
58 # can be taken to further improve the state of the repository.
58 # can be taken to further improve the state of the repository.
59 type = None
59 type = None
60
60
61 # machine-readable string uniquely identifying this improvement. it will be
61 # machine-readable string uniquely identifying this improvement. it will be
62 # mapped to an action later in the upgrade process.
62 # mapped to an action later in the upgrade process.
63 name = None
63 name = None
64
64
65 # message intended for humans explaining the improvement in more detail,
65 # message intended for humans explaining the improvement in more detail,
66 # including the implications of it ``FORMAT_VARIANT`` types, should be
66 # including the implications of it ``FORMAT_VARIANT`` types, should be
67 # worded
67 # worded
68 # in the present tense.
68 # in the present tense.
69 description = None
69 description = None
70
70
71 # message intended for humans explaining what an upgrade addressing this
71 # message intended for humans explaining what an upgrade addressing this
72 # issue will do. should be worded in the future tense.
72 # issue will do. should be worded in the future tense.
73 upgrademessage = None
73 upgrademessage = None
74
74
75 # value of current Mercurial default for new repository
75 # value of current Mercurial default for new repository
76 default = None
76 default = None
77
77
78 # Message intended for humans which will be shown post an upgrade
78 # Message intended for humans which will be shown post an upgrade
79 # operation when the improvement will be added
79 # operation when the improvement will be added
80 postupgrademessage = None
80 postupgrademessage = None
81
81
82 # Message intended for humans which will be shown post an upgrade
82 # Message intended for humans which will be shown post an upgrade
83 # operation in which this improvement was removed
83 # operation in which this improvement was removed
84 postdowngrademessage = None
84 postdowngrademessage = None
85
85
86 # By default we assume that every improvement touches requirements and all revlogs
86 # By default we assume that every improvement touches requirements and all revlogs
87
87
88 # Whether this improvement touches filelogs
88 # Whether this improvement touches filelogs
89 touches_filelogs = True
89 touches_filelogs = True
90
90
91 # Whether this improvement touches manifests
91 # Whether this improvement touches manifests
92 touches_manifests = True
92 touches_manifests = True
93
93
94 # Whether this improvement touches changelog
94 # Whether this improvement touches changelog
95 touches_changelog = True
95 touches_changelog = True
96
96
97 # Whether this improvement changes repository requirements
97 # Whether this improvement changes repository requirements
98 touches_requirements = True
98 touches_requirements = True
99
99
100 # Whether this improvement touches the dirstate
100 # Whether this improvement touches the dirstate
101 touches_dirstate = False
101 touches_dirstate = False
102
102
103 # Can this action be run on a share instead of its mains repository
103 # Can this action be run on a share instead of its mains repository
104 compatible_with_share = False
104 compatible_with_share = False
105
105
106
106
107 allformatvariant = [] # type: List[Type['formatvariant']]
107 allformatvariant = [] # type: List[Type['formatvariant']]
108
108
109
109
110 def registerformatvariant(cls):
110 def registerformatvariant(cls):
111 allformatvariant.append(cls)
111 allformatvariant.append(cls)
112 return cls
112 return cls
113
113
114
114
115 class formatvariant(improvement):
115 class formatvariant(improvement):
116 """an improvement subclass dedicated to repository format"""
116 """an improvement subclass dedicated to repository format"""
117
117
118 type = FORMAT_VARIANT
118 type = FORMAT_VARIANT
119
119
120 @staticmethod
120 @staticmethod
121 def fromrepo(repo):
121 def fromrepo(repo):
122 """current value of the variant in the repository"""
122 """current value of the variant in the repository"""
123 raise NotImplementedError()
123 raise NotImplementedError()
124
124
125 @staticmethod
125 @staticmethod
126 def fromconfig(repo):
126 def fromconfig(repo):
127 """current value of the variant in the configuration"""
127 """current value of the variant in the configuration"""
128 raise NotImplementedError()
128 raise NotImplementedError()
129
129
130
130
131 class requirementformatvariant(formatvariant):
131 class requirementformatvariant(formatvariant):
132 """formatvariant based on a 'requirement' name.
132 """formatvariant based on a 'requirement' name.
133
133
134 Many format variant are controlled by a 'requirement'. We define a small
134 Many format variant are controlled by a 'requirement'. We define a small
135 subclass to factor the code.
135 subclass to factor the code.
136 """
136 """
137
137
138 # the requirement that control this format variant
138 # the requirement that control this format variant
139 _requirement = None
139 _requirement = None
140
140
141 @staticmethod
141 @staticmethod
142 def _newreporequirements(ui):
142 def _newreporequirements(ui):
143 return localrepo.newreporequirements(
143 return localrepo.newreporequirements(
144 ui, localrepo.defaultcreateopts(ui)
144 ui, localrepo.defaultcreateopts(ui)
145 )
145 )
146
146
147 @classmethod
147 @classmethod
148 def fromrepo(cls, repo):
148 def fromrepo(cls, repo):
149 assert cls._requirement is not None
149 assert cls._requirement is not None
150 return cls._requirement in repo.requirements
150 return cls._requirement in repo.requirements
151
151
152 @classmethod
152 @classmethod
153 def fromconfig(cls, repo):
153 def fromconfig(cls, repo):
154 assert cls._requirement is not None
154 assert cls._requirement is not None
155 return cls._requirement in cls._newreporequirements(repo.ui)
155 return cls._requirement in cls._newreporequirements(repo.ui)
156
156
157
157
158 @registerformatvariant
158 @registerformatvariant
159 class fncache(requirementformatvariant):
159 class fncache(requirementformatvariant):
160 name = b'fncache'
160 name = b'fncache'
161
161
162 _requirement = requirements.FNCACHE_REQUIREMENT
162 _requirement = requirements.FNCACHE_REQUIREMENT
163
163
164 default = True
164 default = True
165
165
166 description = _(
166 description = _(
167 b'long and reserved filenames may not work correctly; '
167 b'long and reserved filenames may not work correctly; '
168 b'repository performance is sub-optimal'
168 b'repository performance is sub-optimal'
169 )
169 )
170
170
171 upgrademessage = _(
171 upgrademessage = _(
172 b'repository will be more resilient to storing '
172 b'repository will be more resilient to storing '
173 b'certain paths and performance of certain '
173 b'certain paths and performance of certain '
174 b'operations should be improved'
174 b'operations should be improved'
175 )
175 )
176
176
177
177
178 @registerformatvariant
178 @registerformatvariant
179 class dirstatev2(requirementformatvariant):
179 class dirstatev2(requirementformatvariant):
180 name = b'dirstate-v2'
180 name = b'dirstate-v2'
181 _requirement = requirements.DIRSTATE_V2_REQUIREMENT
181 _requirement = requirements.DIRSTATE_V2_REQUIREMENT
182
182
183 default = False
183 default = False
184
184
185 description = _(
185 description = _(
186 b'version 1 of the dirstate file format requires '
186 b'version 1 of the dirstate file format requires '
187 b'reading and parsing it all at once.\n'
187 b'reading and parsing it all at once.\n'
188 b'Version 2 has a better structure,'
188 b'Version 2 has a better structure,'
189 b'better information and lighter update mechanism'
189 b'better information and lighter update mechanism'
190 )
190 )
191
191
192 upgrademessage = _(b'"hg status" will be faster')
192 upgrademessage = _(b'"hg status" will be faster')
193
193
194 touches_filelogs = False
194 touches_filelogs = False
195 touches_manifests = False
195 touches_manifests = False
196 touches_changelog = False
196 touches_changelog = False
197 touches_requirements = True
197 touches_requirements = True
198 touches_dirstate = True
198 touches_dirstate = True
199 compatible_with_share = True
199 compatible_with_share = True
200
200
201
201
202 @registerformatvariant
202 @registerformatvariant
203 class dirstatetrackedkey(requirementformatvariant):
203 class dirstatetrackedkey(requirementformatvariant):
204 name = b'tracked-hint'
204 name = b'tracked-hint'
205 _requirement = requirements.DIRSTATE_TRACKED_HINT_V1
205 _requirement = requirements.DIRSTATE_TRACKED_HINT_V1
206
206
207 default = False
207 default = False
208
208
209 description = _(
209 description = _(
210 b'Add a small file to help external tooling that watch the tracked set'
210 b'Add a small file to help external tooling that watch the tracked set'
211 )
211 )
212
212
213 upgrademessage = _(
213 upgrademessage = _(
214 b'external tools will be informated of potential change in the tracked set'
214 b'external tools will be informated of potential change in the tracked set'
215 )
215 )
216
216
217 touches_filelogs = False
217 touches_filelogs = False
218 touches_manifests = False
218 touches_manifests = False
219 touches_changelog = False
219 touches_changelog = False
220 touches_requirements = True
220 touches_requirements = True
221 touches_dirstate = True
221 touches_dirstate = True
222 compatible_with_share = True
222 compatible_with_share = True
223
223
224
224
225 @registerformatvariant
225 @registerformatvariant
226 class dotencode(requirementformatvariant):
226 class dotencode(requirementformatvariant):
227 name = b'dotencode'
227 name = b'dotencode'
228
228
229 _requirement = requirements.DOTENCODE_REQUIREMENT
229 _requirement = requirements.DOTENCODE_REQUIREMENT
230
230
231 default = True
231 default = True
232
232
233 description = _(
233 description = _(
234 b'storage of filenames beginning with a period or '
234 b'storage of filenames beginning with a period or '
235 b'space may not work correctly'
235 b'space may not work correctly'
236 )
236 )
237
237
238 upgrademessage = _(
238 upgrademessage = _(
239 b'repository will be better able to store files '
239 b'repository will be better able to store files '
240 b'beginning with a space or period'
240 b'beginning with a space or period'
241 )
241 )
242
242
243
243
244 @registerformatvariant
244 @registerformatvariant
245 class generaldelta(requirementformatvariant):
245 class generaldelta(requirementformatvariant):
246 name = b'generaldelta'
246 name = b'generaldelta'
247
247
248 _requirement = requirements.GENERALDELTA_REQUIREMENT
248 _requirement = requirements.GENERALDELTA_REQUIREMENT
249
249
250 default = True
250 default = True
251
251
252 description = _(
252 description = _(
253 b'deltas within internal storage are unable to '
253 b'deltas within internal storage are unable to '
254 b'choose optimal revisions; repository is larger and '
254 b'choose optimal revisions; repository is larger and '
255 b'slower than it could be; interaction with other '
255 b'slower than it could be; interaction with other '
256 b'repositories may require extra network and CPU '
256 b'repositories may require extra network and CPU '
257 b'resources, making "hg push" and "hg pull" slower'
257 b'resources, making "hg push" and "hg pull" slower'
258 )
258 )
259
259
260 upgrademessage = _(
260 upgrademessage = _(
261 b'repository storage will be able to create '
261 b'repository storage will be able to create '
262 b'optimal deltas; new repository data will be '
262 b'optimal deltas; new repository data will be '
263 b'smaller and read times should decrease; '
263 b'smaller and read times should decrease; '
264 b'interacting with other repositories using this '
264 b'interacting with other repositories using this '
265 b'storage model should require less network and '
265 b'storage model should require less network and '
266 b'CPU resources, making "hg push" and "hg pull" '
266 b'CPU resources, making "hg push" and "hg pull" '
267 b'faster'
267 b'faster'
268 )
268 )
269
269
270
270
271 @registerformatvariant
271 @registerformatvariant
272 class sharesafe(requirementformatvariant):
272 class sharesafe(requirementformatvariant):
273 name = b'share-safe'
273 name = b'share-safe'
274 _requirement = requirements.SHARESAFE_REQUIREMENT
274 _requirement = requirements.SHARESAFE_REQUIREMENT
275
275
276 default = True
276 default = True
277
277
278 description = _(
278 description = _(
279 b'old shared repositories do not share source repository '
279 b'old shared repositories do not share source repository '
280 b'requirements and config. This leads to various problems '
280 b'requirements and config. This leads to various problems '
281 b'when the source repository format is upgraded or some new '
281 b'when the source repository format is upgraded or some new '
282 b'extensions are enabled.'
282 b'extensions are enabled.'
283 )
283 )
284
284
285 upgrademessage = _(
285 upgrademessage = _(
286 b'Upgrades a repository to share-safe format so that future '
286 b'Upgrades a repository to share-safe format so that future '
287 b'shares of this repository share its requirements and configs.'
287 b'shares of this repository share its requirements and configs.'
288 )
288 )
289
289
290 postdowngrademessage = _(
290 postdowngrademessage = _(
291 b'repository downgraded to not use share safe mode, '
291 b'repository downgraded to not use share safe mode, '
292 b'existing shares will not work and needs to'
292 b'existing shares will not work and need to be reshared.'
293 b' be reshared.'
294 )
293 )
295
294
296 postupgrademessage = _(
295 postupgrademessage = _(
297 b'repository upgraded to share safe mode, existing'
296 b'repository upgraded to share safe mode, existing'
298 b' shares will still work in old non-safe mode. '
297 b' shares will still work in old non-safe mode. '
299 b'Re-share existing shares to use them in safe mode'
298 b'Re-share existing shares to use them in safe mode'
300 b' New shares will be created in safe mode.'
299 b' New shares will be created in safe mode.'
301 )
300 )
302
301
303 # upgrade only needs to change the requirements
302 # upgrade only needs to change the requirements
304 touches_filelogs = False
303 touches_filelogs = False
305 touches_manifests = False
304 touches_manifests = False
306 touches_changelog = False
305 touches_changelog = False
307 touches_requirements = True
306 touches_requirements = True
308
307
309
308
310 @registerformatvariant
309 @registerformatvariant
311 class sparserevlog(requirementformatvariant):
310 class sparserevlog(requirementformatvariant):
312 name = b'sparserevlog'
311 name = b'sparserevlog'
313
312
314 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
313 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
315
314
316 default = True
315 default = True
317
316
318 description = _(
317 description = _(
319 b'in order to limit disk reading and memory usage on older '
318 b'in order to limit disk reading and memory usage on older '
320 b'version, the span of a delta chain from its root to its '
319 b'version, the span of a delta chain from its root to its '
321 b'end is limited, whatever the relevant data in this span. '
320 b'end is limited, whatever the relevant data in this span. '
322 b'This can severly limit Mercurial ability to build good '
321 b'This can severly limit Mercurial ability to build good '
323 b'chain of delta resulting is much more storage space being '
322 b'chain of delta resulting is much more storage space being '
324 b'taken and limit reusability of on disk delta during '
323 b'taken and limit reusability of on disk delta during '
325 b'exchange.'
324 b'exchange.'
326 )
325 )
327
326
328 upgrademessage = _(
327 upgrademessage = _(
329 b'Revlog supports delta chain with more unused data '
328 b'Revlog supports delta chain with more unused data '
330 b'between payload. These gaps will be skipped at read '
329 b'between payload. These gaps will be skipped at read '
331 b'time. This allows for better delta chains, making a '
330 b'time. This allows for better delta chains, making a '
332 b'better compression and faster exchange with server.'
331 b'better compression and faster exchange with server.'
333 )
332 )
334
333
335
334
336 @registerformatvariant
335 @registerformatvariant
337 class persistentnodemap(requirementformatvariant):
336 class persistentnodemap(requirementformatvariant):
338 name = b'persistent-nodemap'
337 name = b'persistent-nodemap'
339
338
340 _requirement = requirements.NODEMAP_REQUIREMENT
339 _requirement = requirements.NODEMAP_REQUIREMENT
341
340
342 default = False
341 default = False
343
342
344 description = _(
343 description = _(
345 b'persist the node -> rev mapping on disk to speedup lookup'
344 b'persist the node -> rev mapping on disk to speedup lookup'
346 )
345 )
347
346
348 upgrademessage = _(b'Speedup revision lookup by node id.')
347 upgrademessage = _(b'Speedup revision lookup by node id.')
349
348
350
349
351 @registerformatvariant
350 @registerformatvariant
352 class copiessdc(requirementformatvariant):
351 class copiessdc(requirementformatvariant):
353 name = b'copies-sdc'
352 name = b'copies-sdc'
354
353
355 _requirement = requirements.COPIESSDC_REQUIREMENT
354 _requirement = requirements.COPIESSDC_REQUIREMENT
356
355
357 default = False
356 default = False
358
357
359 description = _(b'Stores copies information alongside changesets.')
358 description = _(b'Stores copies information alongside changesets.')
360
359
361 upgrademessage = _(
360 upgrademessage = _(
362 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
361 b'Allows to use more efficient algorithm to deal with copy tracing.'
363 )
362 )
364
363
365 touches_filelogs = False
364 touches_filelogs = False
366 touches_manifests = False
365 touches_manifests = False
367
366
368
367
369 @registerformatvariant
368 @registerformatvariant
370 class revlogv2(requirementformatvariant):
369 class revlogv2(requirementformatvariant):
371 name = b'revlog-v2'
370 name = b'revlog-v2'
372 _requirement = requirements.REVLOGV2_REQUIREMENT
371 _requirement = requirements.REVLOGV2_REQUIREMENT
373 default = False
372 default = False
374 description = _(b'Version 2 of the revlog.')
373 description = _(b'Version 2 of the revlog.')
375 upgrademessage = _(b'very experimental')
374 upgrademessage = _(b'very experimental')
376
375
377
376
378 @registerformatvariant
377 @registerformatvariant
379 class changelogv2(requirementformatvariant):
378 class changelogv2(requirementformatvariant):
380 name = b'changelog-v2'
379 name = b'changelog-v2'
381 _requirement = requirements.CHANGELOGV2_REQUIREMENT
380 _requirement = requirements.CHANGELOGV2_REQUIREMENT
382 default = False
381 default = False
383 description = _(b'An iteration of the revlog focussed on changelog needs.')
382 description = _(b'An iteration of the revlog focussed on changelog needs.')
384 upgrademessage = _(b'quite experimental')
383 upgrademessage = _(b'quite experimental')
385
384
386 touches_filelogs = False
385 touches_filelogs = False
387 touches_manifests = False
386 touches_manifests = False
388
387
389
388
390 @registerformatvariant
389 @registerformatvariant
391 class removecldeltachain(formatvariant):
390 class removecldeltachain(formatvariant):
392 name = b'plain-cl-delta'
391 name = b'plain-cl-delta'
393
392
394 default = True
393 default = True
395
394
396 description = _(
395 description = _(
397 b'changelog storage is using deltas instead of '
396 b'changelog storage is using deltas instead of '
398 b'raw entries; changelog reading and any '
397 b'raw entries; changelog reading and any '
399 b'operation relying on changelog data are slower '
398 b'operation relying on changelog data are slower '
400 b'than they could be'
399 b'than they could be'
401 )
400 )
402
401
403 upgrademessage = _(
402 upgrademessage = _(
404 b'changelog storage will be reformated to '
403 b'changelog storage will be reformated to '
405 b'store raw entries; changelog reading will be '
404 b'store raw entries; changelog reading will be '
406 b'faster; changelog size may be reduced'
405 b'faster; changelog size may be reduced'
407 )
406 )
408
407
409 @staticmethod
408 @staticmethod
410 def fromrepo(repo):
409 def fromrepo(repo):
411 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
410 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
412 # changelogs with deltas.
411 # changelogs with deltas.
413 cl = repo.changelog
412 cl = repo.changelog
414 chainbase = cl.chainbase
413 chainbase = cl.chainbase
415 return all(rev == chainbase(rev) for rev in cl)
414 return all(rev == chainbase(rev) for rev in cl)
416
415
417 @staticmethod
416 @staticmethod
418 def fromconfig(repo):
417 def fromconfig(repo):
419 return True
418 return True
420
419
421
420
422 _has_zstd = (
421 _has_zstd = (
423 b'zstd' in util.compengines
422 b'zstd' in util.compengines
424 and util.compengines[b'zstd'].available()
423 and util.compengines[b'zstd'].available()
425 and util.compengines[b'zstd'].revlogheader()
424 and util.compengines[b'zstd'].revlogheader()
426 )
425 )
427
426
428
427
429 @registerformatvariant
428 @registerformatvariant
430 class compressionengine(formatvariant):
429 class compressionengine(formatvariant):
431 name = b'compression'
430 name = b'compression'
432
431
433 if _has_zstd:
432 if _has_zstd:
434 default = b'zstd'
433 default = b'zstd'
435 else:
434 else:
436 default = b'zlib'
435 default = b'zlib'
437
436
438 description = _(
437 description = _(
439 b'Compresion algorithm used to compress data. '
438 b'Compresion algorithm used to compress data. '
440 b'Some engine are faster than other'
439 b'Some engine are faster than other'
441 )
440 )
442
441
443 upgrademessage = _(
442 upgrademessage = _(
444 b'revlog content will be recompressed with the new algorithm.'
443 b'revlog content will be recompressed with the new algorithm.'
445 )
444 )
446
445
447 @classmethod
446 @classmethod
448 def fromrepo(cls, repo):
447 def fromrepo(cls, repo):
449 # we allow multiple compression engine requirement to co-exist because
448 # we allow multiple compression engine requirement to co-exist because
450 # strickly speaking, revlog seems to support mixed compression style.
449 # strickly speaking, revlog seems to support mixed compression style.
451 #
450 #
452 # The compression used for new entries will be "the last one"
451 # The compression used for new entries will be "the last one"
453 compression = b'zlib'
452 compression = b'zlib'
454 for req in repo.requirements:
453 for req in repo.requirements:
455 prefix = req.startswith
454 prefix = req.startswith
456 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
455 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
457 compression = req.split(b'-', 2)[2]
456 compression = req.split(b'-', 2)[2]
458 return compression
457 return compression
459
458
460 @classmethod
459 @classmethod
461 def fromconfig(cls, repo):
460 def fromconfig(cls, repo):
462 compengines = repo.ui.configlist(b'format', b'revlog-compression')
461 compengines = repo.ui.configlist(b'format', b'revlog-compression')
463 # return the first valid value as the selection code would do
462 # return the first valid value as the selection code would do
464 for comp in compengines:
463 for comp in compengines:
465 if comp in util.compengines:
464 if comp in util.compengines:
466 e = util.compengines[comp]
465 e = util.compengines[comp]
467 if e.available() and e.revlogheader():
466 if e.available() and e.revlogheader():
468 return comp
467 return comp
469
468
470 # no valide compression found lets display it all for clarity
469 # no valide compression found lets display it all for clarity
471 return b','.join(compengines)
470 return b','.join(compengines)
472
471
473
472
474 @registerformatvariant
473 @registerformatvariant
475 class compressionlevel(formatvariant):
474 class compressionlevel(formatvariant):
476 name = b'compression-level'
475 name = b'compression-level'
477 default = b'default'
476 default = b'default'
478
477
479 description = _(b'compression level')
478 description = _(b'compression level')
480
479
481 upgrademessage = _(b'revlog content will be recompressed')
480 upgrademessage = _(b'revlog content will be recompressed')
482
481
483 @classmethod
482 @classmethod
484 def fromrepo(cls, repo):
483 def fromrepo(cls, repo):
485 comp = compressionengine.fromrepo(repo)
484 comp = compressionengine.fromrepo(repo)
486 level = None
485 level = None
487 if comp == b'zlib':
486 if comp == b'zlib':
488 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
487 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
489 elif comp == b'zstd':
488 elif comp == b'zstd':
490 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
489 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
491 if level is None:
490 if level is None:
492 return b'default'
491 return b'default'
493 return bytes(level)
492 return bytes(level)
494
493
495 @classmethod
494 @classmethod
496 def fromconfig(cls, repo):
495 def fromconfig(cls, repo):
497 comp = compressionengine.fromconfig(repo)
496 comp = compressionengine.fromconfig(repo)
498 level = None
497 level = None
499 if comp == b'zlib':
498 if comp == b'zlib':
500 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
499 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
501 elif comp == b'zstd':
500 elif comp == b'zstd':
502 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
501 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
503 if level is None:
502 if level is None:
504 return b'default'
503 return b'default'
505 return bytes(level)
504 return bytes(level)
506
505
507
506
508 def find_format_upgrades(repo):
507 def find_format_upgrades(repo):
509 """returns a list of format upgrades which can be perform on the repo"""
508 """returns a list of format upgrades which can be perform on the repo"""
510 upgrades = []
509 upgrades = []
511
510
512 # We could detect lack of revlogv1 and store here, but they were added
511 # We could detect lack of revlogv1 and store here, but they were added
513 # in 0.9.2 and we don't support upgrading repos without these
512 # in 0.9.2 and we don't support upgrading repos without these
514 # requirements, so let's not bother.
513 # requirements, so let's not bother.
515
514
516 for fv in allformatvariant:
515 for fv in allformatvariant:
517 if not fv.fromrepo(repo):
516 if not fv.fromrepo(repo):
518 upgrades.append(fv)
517 upgrades.append(fv)
519
518
520 return upgrades
519 return upgrades
521
520
522
521
523 def find_format_downgrades(repo):
522 def find_format_downgrades(repo):
524 """returns a list of format downgrades which will be performed on the repo
523 """returns a list of format downgrades which will be performed on the repo
525 because of disabled config option for them"""
524 because of disabled config option for them"""
526
525
527 downgrades = []
526 downgrades = []
528
527
529 for fv in allformatvariant:
528 for fv in allformatvariant:
530 if fv.name == b'compression':
529 if fv.name == b'compression':
531 # If there is a compression change between repository
530 # If there is a compression change between repository
532 # and config, destination repository compression will change
531 # and config, destination repository compression will change
533 # and current compression will be removed.
532 # and current compression will be removed.
534 if fv.fromrepo(repo) != fv.fromconfig(repo):
533 if fv.fromrepo(repo) != fv.fromconfig(repo):
535 downgrades.append(fv)
534 downgrades.append(fv)
536 continue
535 continue
537 # format variant exist in repo but does not exist in new repository
536 # format variant exist in repo but does not exist in new repository
538 # config
537 # config
539 if fv.fromrepo(repo) and not fv.fromconfig(repo):
538 if fv.fromrepo(repo) and not fv.fromconfig(repo):
540 downgrades.append(fv)
539 downgrades.append(fv)
541
540
542 return downgrades
541 return downgrades
543
542
544
543
545 ALL_OPTIMISATIONS = []
544 ALL_OPTIMISATIONS = []
546
545
547
546
548 def register_optimization(obj):
547 def register_optimization(obj):
549 ALL_OPTIMISATIONS.append(obj)
548 ALL_OPTIMISATIONS.append(obj)
550 return obj
549 return obj
551
550
552
551
553 class optimization(improvement):
552 class optimization(improvement):
554 """an improvement subclass dedicated to optimizations"""
553 """an improvement subclass dedicated to optimizations"""
555
554
556 type = OPTIMISATION
555 type = OPTIMISATION
557
556
558
557
559 @register_optimization
558 @register_optimization
560 class redeltaparents(optimization):
559 class redeltaparents(optimization):
561 name = b're-delta-parent'
560 name = b're-delta-parent'
562
561
563 type = OPTIMISATION
562 type = OPTIMISATION
564
563
565 description = _(
564 description = _(
566 b'deltas within internal storage will be recalculated to '
565 b'deltas within internal storage will be recalculated to '
567 b'choose an optimal base revision where this was not '
566 b'choose an optimal base revision where this was not '
568 b'already done; the size of the repository may shrink and '
567 b'already done; the size of the repository may shrink and '
569 b'various operations may become faster; the first time '
568 b'various operations may become faster; the first time '
570 b'this optimization is performed could slow down upgrade '
569 b'this optimization is performed could slow down upgrade '
571 b'execution considerably; subsequent invocations should '
570 b'execution considerably; subsequent invocations should '
572 b'not run noticeably slower'
571 b'not run noticeably slower'
573 )
572 )
574
573
575 upgrademessage = _(
574 upgrademessage = _(
576 b'deltas within internal storage will choose a new '
575 b'deltas within internal storage will choose a new '
577 b'base revision if needed'
576 b'base revision if needed'
578 )
577 )
579
578
580
579
581 @register_optimization
580 @register_optimization
582 class redeltamultibase(optimization):
581 class redeltamultibase(optimization):
583 name = b're-delta-multibase'
582 name = b're-delta-multibase'
584
583
585 type = OPTIMISATION
584 type = OPTIMISATION
586
585
587 description = _(
586 description = _(
588 b'deltas within internal storage will be recalculated '
587 b'deltas within internal storage will be recalculated '
589 b'against multiple base revision and the smallest '
588 b'against multiple base revision and the smallest '
590 b'difference will be used; the size of the repository may '
589 b'difference will be used; the size of the repository may '
591 b'shrink significantly when there are many merges; this '
590 b'shrink significantly when there are many merges; this '
592 b'optimization will slow down execution in proportion to '
591 b'optimization will slow down execution in proportion to '
593 b'the number of merges in the repository and the amount '
592 b'the number of merges in the repository and the amount '
594 b'of files in the repository; this slow down should not '
593 b'of files in the repository; this slow down should not '
595 b'be significant unless there are tens of thousands of '
594 b'be significant unless there are tens of thousands of '
596 b'files and thousands of merges'
595 b'files and thousands of merges'
597 )
596 )
598
597
599 upgrademessage = _(
598 upgrademessage = _(
600 b'deltas within internal storage will choose an '
599 b'deltas within internal storage will choose an '
601 b'optimal delta by computing deltas against multiple '
600 b'optimal delta by computing deltas against multiple '
602 b'parents; may slow down execution time '
601 b'parents; may slow down execution time '
603 b'significantly'
602 b'significantly'
604 )
603 )
605
604
606
605
607 @register_optimization
606 @register_optimization
608 class redeltaall(optimization):
607 class redeltaall(optimization):
609 name = b're-delta-all'
608 name = b're-delta-all'
610
609
611 type = OPTIMISATION
610 type = OPTIMISATION
612
611
613 description = _(
612 description = _(
614 b'deltas within internal storage will always be '
613 b'deltas within internal storage will always be '
615 b'recalculated without reusing prior deltas; this will '
614 b'recalculated without reusing prior deltas; this will '
616 b'likely make execution run several times slower; this '
615 b'likely make execution run several times slower; this '
617 b'optimization is typically not needed'
616 b'optimization is typically not needed'
618 )
617 )
619
618
620 upgrademessage = _(
619 upgrademessage = _(
621 b'deltas within internal storage will be fully '
620 b'deltas within internal storage will be fully '
622 b'recomputed; this will likely drastically slow down '
621 b'recomputed; this will likely drastically slow down '
623 b'execution time'
622 b'execution time'
624 )
623 )
625
624
626
625
627 @register_optimization
626 @register_optimization
628 class redeltafulladd(optimization):
627 class redeltafulladd(optimization):
629 name = b're-delta-fulladd'
628 name = b're-delta-fulladd'
630
629
631 type = OPTIMISATION
630 type = OPTIMISATION
632
631
633 description = _(
632 description = _(
634 b'every revision will be re-added as if it was new '
633 b'every revision will be re-added as if it was new '
635 b'content. It will go through the full storage '
634 b'content. It will go through the full storage '
636 b'mechanism giving extensions a chance to process it '
635 b'mechanism giving extensions a chance to process it '
637 b'(eg. lfs). This is similar to "re-delta-all" but even '
636 b'(eg. lfs). This is similar to "re-delta-all" but even '
638 b'slower since more logic is involved.'
637 b'slower since more logic is involved.'
639 )
638 )
640
639
641 upgrademessage = _(
640 upgrademessage = _(
642 b'each revision will be added as new content to the '
641 b'each revision will be added as new content to the '
643 b'internal storage; this will likely drastically slow '
642 b'internal storage; this will likely drastically slow '
644 b'down execution time, but some extensions might need '
643 b'down execution time, but some extensions might need '
645 b'it'
644 b'it'
646 )
645 )
647
646
648
647
649 def findoptimizations(repo):
648 def findoptimizations(repo):
650 """Determine optimisation that could be used during upgrade"""
649 """Determine optimisation that could be used during upgrade"""
651 # These are unconditionally added. There is logic later that figures out
650 # These are unconditionally added. There is logic later that figures out
652 # which ones to apply.
651 # which ones to apply.
653 return list(ALL_OPTIMISATIONS)
652 return list(ALL_OPTIMISATIONS)
654
653
655
654
656 def determine_upgrade_actions(
655 def determine_upgrade_actions(
657 repo, format_upgrades, optimizations, sourcereqs, destreqs
656 repo, format_upgrades, optimizations, sourcereqs, destreqs
658 ):
657 ):
659 """Determine upgrade actions that will be performed.
658 """Determine upgrade actions that will be performed.
660
659
661 Given a list of improvements as returned by ``find_format_upgrades`` and
660 Given a list of improvements as returned by ``find_format_upgrades`` and
662 ``findoptimizations``, determine the list of upgrade actions that
661 ``findoptimizations``, determine the list of upgrade actions that
663 will be performed.
662 will be performed.
664
663
665 The role of this function is to filter improvements if needed, apply
664 The role of this function is to filter improvements if needed, apply
666 recommended optimizations from the improvements list that make sense,
665 recommended optimizations from the improvements list that make sense,
667 etc.
666 etc.
668
667
669 Returns a list of action names.
668 Returns a list of action names.
670 """
669 """
671 newactions = []
670 newactions = []
672
671
673 for d in format_upgrades:
672 for d in format_upgrades:
674 if hasattr(d, '_requirement'):
673 if hasattr(d, '_requirement'):
675 name = d._requirement
674 name = d._requirement
676 else:
675 else:
677 name = None
676 name = None
678
677
679 # If the action is a requirement that doesn't show up in the
678 # If the action is a requirement that doesn't show up in the
680 # destination requirements, prune the action.
679 # destination requirements, prune the action.
681 if name is not None and name not in destreqs:
680 if name is not None and name not in destreqs:
682 continue
681 continue
683
682
684 newactions.append(d)
683 newactions.append(d)
685
684
686 newactions.extend(
685 newactions.extend(
687 o
686 o
688 for o in sorted(optimizations, key=(lambda x: x.name))
687 for o in sorted(optimizations, key=(lambda x: x.name))
689 if o not in newactions
688 if o not in newactions
690 )
689 )
691
690
692 # FUTURE consider adding some optimizations here for certain transitions.
691 # FUTURE consider adding some optimizations here for certain transitions.
693 # e.g. adding generaldelta could schedule parent redeltas.
692 # e.g. adding generaldelta could schedule parent redeltas.
694
693
695 return newactions
694 return newactions
696
695
697
696
698 class BaseOperation:
697 class BaseOperation:
699 """base class that contains the minimum for an upgrade to work
698 """base class that contains the minimum for an upgrade to work
700
699
701 (this might need to be extended as the usage for subclass alternative to
700 (this might need to be extended as the usage for subclass alternative to
702 UpgradeOperation extends)
701 UpgradeOperation extends)
703 """
702 """
704
703
705 def __init__(
704 def __init__(
706 self,
705 self,
707 new_requirements,
706 new_requirements,
708 backup_store,
707 backup_store,
709 ):
708 ):
710 self.new_requirements = new_requirements
709 self.new_requirements = new_requirements
711 # should this operation create a backup of the store
710 # should this operation create a backup of the store
712 self.backup_store = backup_store
711 self.backup_store = backup_store
713
712
714
713
715 class UpgradeOperation(BaseOperation):
714 class UpgradeOperation(BaseOperation):
716 """represent the work to be done during an upgrade"""
715 """represent the work to be done during an upgrade"""
717
716
718 def __init__(
717 def __init__(
719 self,
718 self,
720 ui,
719 ui,
721 new_requirements,
720 new_requirements,
722 current_requirements,
721 current_requirements,
723 upgrade_actions,
722 upgrade_actions,
724 removed_actions,
723 removed_actions,
725 revlogs_to_process,
724 revlogs_to_process,
726 backup_store,
725 backup_store,
727 ):
726 ):
728 super().__init__(
727 super().__init__(
729 new_requirements,
728 new_requirements,
730 backup_store,
729 backup_store,
731 )
730 )
732 self.ui = ui
731 self.ui = ui
733 self.current_requirements = current_requirements
732 self.current_requirements = current_requirements
734 # list of upgrade actions the operation will perform
733 # list of upgrade actions the operation will perform
735 self.upgrade_actions = upgrade_actions
734 self.upgrade_actions = upgrade_actions
736 self.removed_actions = removed_actions
735 self.removed_actions = removed_actions
737 self.revlogs_to_process = revlogs_to_process
736 self.revlogs_to_process = revlogs_to_process
738 # requirements which will be added by the operation
737 # requirements which will be added by the operation
739 self._added_requirements = (
738 self._added_requirements = (
740 self.new_requirements - self.current_requirements
739 self.new_requirements - self.current_requirements
741 )
740 )
742 # requirements which will be removed by the operation
741 # requirements which will be removed by the operation
743 self._removed_requirements = (
742 self._removed_requirements = (
744 self.current_requirements - self.new_requirements
743 self.current_requirements - self.new_requirements
745 )
744 )
746 # requirements which will be preserved by the operation
745 # requirements which will be preserved by the operation
747 self._preserved_requirements = (
746 self._preserved_requirements = (
748 self.current_requirements & self.new_requirements
747 self.current_requirements & self.new_requirements
749 )
748 )
750 # optimizations which are not used and it's recommended that they
749 # optimizations which are not used and it's recommended that they
751 # should use them
750 # should use them
752 all_optimizations = findoptimizations(None)
751 all_optimizations = findoptimizations(None)
753 self.unused_optimizations = [
752 self.unused_optimizations = [
754 i for i in all_optimizations if i not in self.upgrade_actions
753 i for i in all_optimizations if i not in self.upgrade_actions
755 ]
754 ]
756
755
757 # delta reuse mode of this upgrade operation
756 # delta reuse mode of this upgrade operation
758 upgrade_actions_names = self.upgrade_actions_names
757 upgrade_actions_names = self.upgrade_actions_names
759 self.delta_reuse_mode = revlog.revlog.DELTAREUSEALWAYS
758 self.delta_reuse_mode = revlog.revlog.DELTAREUSEALWAYS
760 if b're-delta-all' in upgrade_actions_names:
759 if b're-delta-all' in upgrade_actions_names:
761 self.delta_reuse_mode = revlog.revlog.DELTAREUSENEVER
760 self.delta_reuse_mode = revlog.revlog.DELTAREUSENEVER
762 elif b're-delta-parent' in upgrade_actions_names:
761 elif b're-delta-parent' in upgrade_actions_names:
763 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
762 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
764 elif b're-delta-multibase' in upgrade_actions_names:
763 elif b're-delta-multibase' in upgrade_actions_names:
765 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
764 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
766 elif b're-delta-fulladd' in upgrade_actions_names:
765 elif b're-delta-fulladd' in upgrade_actions_names:
767 self.delta_reuse_mode = revlog.revlog.DELTAREUSEFULLADD
766 self.delta_reuse_mode = revlog.revlog.DELTAREUSEFULLADD
768
767
769 # should this operation force re-delta of both parents
768 # should this operation force re-delta of both parents
770 self.force_re_delta_both_parents = (
769 self.force_re_delta_both_parents = (
771 b're-delta-multibase' in upgrade_actions_names
770 b're-delta-multibase' in upgrade_actions_names
772 )
771 )
773
772
774 @property
773 @property
775 def upgrade_actions_names(self):
774 def upgrade_actions_names(self):
776 return set([a.name for a in self.upgrade_actions])
775 return set([a.name for a in self.upgrade_actions])
777
776
778 @property
777 @property
779 def requirements_only(self):
778 def requirements_only(self):
780 # does the operation only touches repository requirement
779 # does the operation only touches repository requirement
781 return (
780 return (
782 self.touches_requirements
781 self.touches_requirements
783 and not self.touches_filelogs
782 and not self.touches_filelogs
784 and not self.touches_manifests
783 and not self.touches_manifests
785 and not self.touches_changelog
784 and not self.touches_changelog
786 and not self.touches_dirstate
785 and not self.touches_dirstate
787 )
786 )
788
787
789 @property
788 @property
790 def touches_filelogs(self):
789 def touches_filelogs(self):
791 for a in self.upgrade_actions:
790 for a in self.upgrade_actions:
792 # in optimisations, we re-process the revlogs again
791 # in optimisations, we re-process the revlogs again
793 if a.type == OPTIMISATION:
792 if a.type == OPTIMISATION:
794 return True
793 return True
795 elif a.touches_filelogs:
794 elif a.touches_filelogs:
796 return True
795 return True
797 for a in self.removed_actions:
796 for a in self.removed_actions:
798 if a.touches_filelogs:
797 if a.touches_filelogs:
799 return True
798 return True
800 return False
799 return False
801
800
802 @property
801 @property
803 def touches_manifests(self):
802 def touches_manifests(self):
804 for a in self.upgrade_actions:
803 for a in self.upgrade_actions:
805 # in optimisations, we re-process the revlogs again
804 # in optimisations, we re-process the revlogs again
806 if a.type == OPTIMISATION:
805 if a.type == OPTIMISATION:
807 return True
806 return True
808 elif a.touches_manifests:
807 elif a.touches_manifests:
809 return True
808 return True
810 for a in self.removed_actions:
809 for a in self.removed_actions:
811 if a.touches_manifests:
810 if a.touches_manifests:
812 return True
811 return True
813 return False
812 return False
814
813
815 @property
814 @property
816 def touches_changelog(self):
815 def touches_changelog(self):
817 for a in self.upgrade_actions:
816 for a in self.upgrade_actions:
818 # in optimisations, we re-process the revlogs again
817 # in optimisations, we re-process the revlogs again
819 if a.type == OPTIMISATION:
818 if a.type == OPTIMISATION:
820 return True
819 return True
821 elif a.touches_changelog:
820 elif a.touches_changelog:
822 return True
821 return True
823 for a in self.removed_actions:
822 for a in self.removed_actions:
824 if a.touches_changelog:
823 if a.touches_changelog:
825 return True
824 return True
826 return False
825 return False
827
826
828 @property
827 @property
829 def touches_requirements(self):
828 def touches_requirements(self):
830 for a in self.upgrade_actions:
829 for a in self.upgrade_actions:
831 # optimisations are used to re-process revlogs and does not result
830 # optimisations are used to re-process revlogs and does not result
832 # in a requirement being added or removed
831 # in a requirement being added or removed
833 if a.type == OPTIMISATION:
832 if a.type == OPTIMISATION:
834 pass
833 pass
835 elif a.touches_requirements:
834 elif a.touches_requirements:
836 return True
835 return True
837 for a in self.removed_actions:
836 for a in self.removed_actions:
838 if a.touches_requirements:
837 if a.touches_requirements:
839 return True
838 return True
840
839
841 @property
840 @property
842 def touches_dirstate(self):
841 def touches_dirstate(self):
843 for a in self.upgrade_actions:
842 for a in self.upgrade_actions:
844 # revlog optimisations do not affect the dirstate
843 # revlog optimisations do not affect the dirstate
845 if a.type == OPTIMISATION:
844 if a.type == OPTIMISATION:
846 pass
845 pass
847 elif a.touches_dirstate:
846 elif a.touches_dirstate:
848 return True
847 return True
849 for a in self.removed_actions:
848 for a in self.removed_actions:
850 if a.touches_dirstate:
849 if a.touches_dirstate:
851 return True
850 return True
852
851
853 return False
852 return False
854
853
855 def _write_labeled(self, l, label: bytes):
854 def _write_labeled(self, l, label: bytes):
856 """
855 """
857 Utility function to aid writing of a list under one label
856 Utility function to aid writing of a list under one label
858 """
857 """
859 first = True
858 first = True
860 for r in sorted(l):
859 for r in sorted(l):
861 if not first:
860 if not first:
862 self.ui.write(b', ')
861 self.ui.write(b', ')
863 self.ui.write(r, label=label)
862 self.ui.write(r, label=label)
864 first = False
863 first = False
865
864
866 def print_requirements(self):
865 def print_requirements(self):
867 self.ui.write(_(b'requirements\n'))
866 self.ui.write(_(b'requirements\n'))
868 self.ui.write(_(b' preserved: '))
867 self.ui.write(_(b' preserved: '))
869 self._write_labeled(
868 self._write_labeled(
870 self._preserved_requirements, b"upgrade-repo.requirement.preserved"
869 self._preserved_requirements, b"upgrade-repo.requirement.preserved"
871 )
870 )
872 self.ui.write((b'\n'))
871 self.ui.write((b'\n'))
873 if self._removed_requirements:
872 if self._removed_requirements:
874 self.ui.write(_(b' removed: '))
873 self.ui.write(_(b' removed: '))
875 self._write_labeled(
874 self._write_labeled(
876 self._removed_requirements, b"upgrade-repo.requirement.removed"
875 self._removed_requirements, b"upgrade-repo.requirement.removed"
877 )
876 )
878 self.ui.write((b'\n'))
877 self.ui.write((b'\n'))
879 if self._added_requirements:
878 if self._added_requirements:
880 self.ui.write(_(b' added: '))
879 self.ui.write(_(b' added: '))
881 self._write_labeled(
880 self._write_labeled(
882 self._added_requirements, b"upgrade-repo.requirement.added"
881 self._added_requirements, b"upgrade-repo.requirement.added"
883 )
882 )
884 self.ui.write((b'\n'))
883 self.ui.write((b'\n'))
885 self.ui.write(b'\n')
884 self.ui.write(b'\n')
886
885
887 def print_optimisations(self):
886 def print_optimisations(self):
888 optimisations = [
887 optimisations = [
889 a for a in self.upgrade_actions if a.type == OPTIMISATION
888 a for a in self.upgrade_actions if a.type == OPTIMISATION
890 ]
889 ]
891 optimisations.sort(key=lambda a: a.name)
890 optimisations.sort(key=lambda a: a.name)
892 if optimisations:
891 if optimisations:
893 self.ui.write(_(b'optimisations: '))
892 self.ui.write(_(b'optimisations: '))
894 self._write_labeled(
893 self._write_labeled(
895 [a.name for a in optimisations],
894 [a.name for a in optimisations],
896 b"upgrade-repo.optimisation.performed",
895 b"upgrade-repo.optimisation.performed",
897 )
896 )
898 self.ui.write(b'\n\n')
897 self.ui.write(b'\n\n')
899
898
900 def print_upgrade_actions(self):
899 def print_upgrade_actions(self):
901 for a in self.upgrade_actions:
900 for a in self.upgrade_actions:
902 self.ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
901 self.ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
903
902
904 def print_affected_revlogs(self):
903 def print_affected_revlogs(self):
905 if not self.revlogs_to_process:
904 if not self.revlogs_to_process:
906 self.ui.write((b'no revlogs to process\n'))
905 self.ui.write((b'no revlogs to process\n'))
907 else:
906 else:
908 self.ui.write((b'processed revlogs:\n'))
907 self.ui.write((b'processed revlogs:\n'))
909 for r in sorted(self.revlogs_to_process):
908 for r in sorted(self.revlogs_to_process):
910 self.ui.write((b' - %s\n' % r))
909 self.ui.write((b' - %s\n' % r))
911 self.ui.write((b'\n'))
910 self.ui.write((b'\n'))
912
911
913 def print_unused_optimizations(self):
912 def print_unused_optimizations(self):
914 for i in self.unused_optimizations:
913 for i in self.unused_optimizations:
915 self.ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
914 self.ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
916
915
917 def has_upgrade_action(self, name):
916 def has_upgrade_action(self, name):
918 """Check whether the upgrade operation will perform this action"""
917 """Check whether the upgrade operation will perform this action"""
919 return name in self._upgrade_actions_names
918 return name in self._upgrade_actions_names
920
919
921 def print_post_op_messages(self):
920 def print_post_op_messages(self):
922 """print post upgrade operation warning messages"""
921 """print post upgrade operation warning messages"""
923 for a in self.upgrade_actions:
922 for a in self.upgrade_actions:
924 if a.postupgrademessage is not None:
923 if a.postupgrademessage is not None:
925 self.ui.warn(b'%s\n' % a.postupgrademessage)
924 self.ui.warn(b'%s\n' % a.postupgrademessage)
926 for a in self.removed_actions:
925 for a in self.removed_actions:
927 if a.postdowngrademessage is not None:
926 if a.postdowngrademessage is not None:
928 self.ui.warn(b'%s\n' % a.postdowngrademessage)
927 self.ui.warn(b'%s\n' % a.postdowngrademessage)
929
928
930
929
931 ### Code checking if a repository can got through the upgrade process at all. #
930 ### Code checking if a repository can got through the upgrade process at all. #
932
931
933
932
934 def requiredsourcerequirements(repo):
933 def requiredsourcerequirements(repo):
935 """Obtain requirements required to be present to upgrade a repo.
934 """Obtain requirements required to be present to upgrade a repo.
936
935
937 An upgrade will not be allowed if the repository doesn't have the
936 An upgrade will not be allowed if the repository doesn't have the
938 requirements returned by this function.
937 requirements returned by this function.
939 """
938 """
940 return {
939 return {
941 # Introduced in Mercurial 0.9.2.
940 # Introduced in Mercurial 0.9.2.
942 requirements.STORE_REQUIREMENT,
941 requirements.STORE_REQUIREMENT,
943 }
942 }
944
943
945
944
946 def blocksourcerequirements(repo):
945 def blocksourcerequirements(repo):
947 """Obtain requirements that will prevent an upgrade from occurring.
946 """Obtain requirements that will prevent an upgrade from occurring.
948
947
949 An upgrade cannot be performed if the source repository contains a
948 An upgrade cannot be performed if the source repository contains a
950 requirements in the returned set.
949 requirements in the returned set.
951 """
950 """
952 return {
951 return {
953 # This was a precursor to generaldelta and was never enabled by default.
952 # This was a precursor to generaldelta and was never enabled by default.
954 # It should (hopefully) not exist in the wild.
953 # It should (hopefully) not exist in the wild.
955 b'parentdelta',
954 b'parentdelta',
956 }
955 }
957
956
958
957
959 def check_revlog_version(reqs):
958 def check_revlog_version(reqs):
960 """Check that the requirements contain at least one Revlog version"""
959 """Check that the requirements contain at least one Revlog version"""
961 all_revlogs = {
960 all_revlogs = {
962 requirements.REVLOGV1_REQUIREMENT,
961 requirements.REVLOGV1_REQUIREMENT,
963 requirements.REVLOGV2_REQUIREMENT,
962 requirements.REVLOGV2_REQUIREMENT,
964 }
963 }
965 if not all_revlogs.intersection(reqs):
964 if not all_revlogs.intersection(reqs):
966 msg = _(b'cannot upgrade repository; missing a revlog version')
965 msg = _(b'cannot upgrade repository; missing a revlog version')
967 raise error.Abort(msg)
966 raise error.Abort(msg)
968
967
969
968
970 def check_source_requirements(repo):
969 def check_source_requirements(repo):
971 """Ensure that no existing requirements prevent the repository upgrade"""
970 """Ensure that no existing requirements prevent the repository upgrade"""
972
971
973 check_revlog_version(repo.requirements)
972 check_revlog_version(repo.requirements)
974 required = requiredsourcerequirements(repo)
973 required = requiredsourcerequirements(repo)
975 missingreqs = required - repo.requirements
974 missingreqs = required - repo.requirements
976 if missingreqs:
975 if missingreqs:
977 msg = _(b'cannot upgrade repository; requirement missing: %s')
976 msg = _(b'cannot upgrade repository; requirement missing: %s')
978 missingreqs = b', '.join(sorted(missingreqs))
977 missingreqs = b', '.join(sorted(missingreqs))
979 raise error.Abort(msg % missingreqs)
978 raise error.Abort(msg % missingreqs)
980
979
981 blocking = blocksourcerequirements(repo)
980 blocking = blocksourcerequirements(repo)
982 blockingreqs = blocking & repo.requirements
981 blockingreqs = blocking & repo.requirements
983 if blockingreqs:
982 if blockingreqs:
984 m = _(b'cannot upgrade repository; unsupported source requirement: %s')
983 m = _(b'cannot upgrade repository; unsupported source requirement: %s')
985 blockingreqs = b', '.join(sorted(blockingreqs))
984 blockingreqs = b', '.join(sorted(blockingreqs))
986 raise error.Abort(m % blockingreqs)
985 raise error.Abort(m % blockingreqs)
987 # Upgrade should operate on the actual store, not the shared link.
986 # Upgrade should operate on the actual store, not the shared link.
988
987
989 bad_share = (
988 bad_share = (
990 requirements.SHARED_REQUIREMENT in repo.requirements
989 requirements.SHARED_REQUIREMENT in repo.requirements
991 and requirements.SHARESAFE_REQUIREMENT not in repo.requirements
990 and requirements.SHARESAFE_REQUIREMENT not in repo.requirements
992 )
991 )
993 if bad_share:
992 if bad_share:
994 m = _(b'cannot upgrade repository; share repository without share-safe')
993 m = _(b'cannot upgrade repository; share repository without share-safe')
995 h = _(b'check :hg:`help config.format.use-share-safe`')
994 h = _(b'check :hg:`help config.format.use-share-safe`')
996 raise error.Abort(m, hint=h)
995 raise error.Abort(m, hint=h)
997
996
998
997
999 ### Verify the validity of the planned requirement changes ####################
998 ### Verify the validity of the planned requirement changes ####################
1000
999
1001
1000
1002 def supportremovedrequirements(repo):
1001 def supportremovedrequirements(repo):
1003 """Obtain requirements that can be removed during an upgrade.
1002 """Obtain requirements that can be removed during an upgrade.
1004
1003
1005 If an upgrade were to create a repository that dropped a requirement,
1004 If an upgrade were to create a repository that dropped a requirement,
1006 the dropped requirement must appear in the returned set for the upgrade
1005 the dropped requirement must appear in the returned set for the upgrade
1007 to be allowed.
1006 to be allowed.
1008 """
1007 """
1009 supported = {
1008 supported = {
1010 requirements.SPARSEREVLOG_REQUIREMENT,
1009 requirements.SPARSEREVLOG_REQUIREMENT,
1011 requirements.COPIESSDC_REQUIREMENT,
1010 requirements.COPIESSDC_REQUIREMENT,
1012 requirements.NODEMAP_REQUIREMENT,
1011 requirements.NODEMAP_REQUIREMENT,
1013 requirements.SHARESAFE_REQUIREMENT,
1012 requirements.SHARESAFE_REQUIREMENT,
1014 requirements.REVLOGV2_REQUIREMENT,
1013 requirements.REVLOGV2_REQUIREMENT,
1015 requirements.CHANGELOGV2_REQUIREMENT,
1014 requirements.CHANGELOGV2_REQUIREMENT,
1016 requirements.REVLOGV1_REQUIREMENT,
1015 requirements.REVLOGV1_REQUIREMENT,
1017 requirements.DIRSTATE_TRACKED_HINT_V1,
1016 requirements.DIRSTATE_TRACKED_HINT_V1,
1018 requirements.DIRSTATE_V2_REQUIREMENT,
1017 requirements.DIRSTATE_V2_REQUIREMENT,
1019 }
1018 }
1020 for name in compression.compengines:
1019 for name in compression.compengines:
1021 engine = compression.compengines[name]
1020 engine = compression.compengines[name]
1022 if engine.available() and engine.revlogheader():
1021 if engine.available() and engine.revlogheader():
1023 supported.add(b'exp-compression-%s' % name)
1022 supported.add(b'exp-compression-%s' % name)
1024 if engine.name() == b'zstd':
1023 if engine.name() == b'zstd':
1025 supported.add(b'revlog-compression-zstd')
1024 supported.add(b'revlog-compression-zstd')
1026 return supported
1025 return supported
1027
1026
1028
1027
1029 def supporteddestrequirements(repo):
1028 def supporteddestrequirements(repo):
1030 """Obtain requirements that upgrade supports in the destination.
1029 """Obtain requirements that upgrade supports in the destination.
1031
1030
1032 If the result of the upgrade would have requirements not in this set,
1031 If the result of the upgrade would have requirements not in this set,
1033 the upgrade is disallowed.
1032 the upgrade is disallowed.
1034
1033
1035 Extensions should monkeypatch this to add their custom requirements.
1034 Extensions should monkeypatch this to add their custom requirements.
1036 """
1035 """
1037 supported = {
1036 supported = {
1038 requirements.CHANGELOGV2_REQUIREMENT,
1037 requirements.CHANGELOGV2_REQUIREMENT,
1039 requirements.COPIESSDC_REQUIREMENT,
1038 requirements.COPIESSDC_REQUIREMENT,
1040 requirements.DIRSTATE_TRACKED_HINT_V1,
1039 requirements.DIRSTATE_TRACKED_HINT_V1,
1041 requirements.DIRSTATE_V2_REQUIREMENT,
1040 requirements.DIRSTATE_V2_REQUIREMENT,
1042 requirements.DOTENCODE_REQUIREMENT,
1041 requirements.DOTENCODE_REQUIREMENT,
1043 requirements.FNCACHE_REQUIREMENT,
1042 requirements.FNCACHE_REQUIREMENT,
1044 requirements.GENERALDELTA_REQUIREMENT,
1043 requirements.GENERALDELTA_REQUIREMENT,
1045 requirements.NODEMAP_REQUIREMENT,
1044 requirements.NODEMAP_REQUIREMENT,
1046 requirements.REVLOGV1_REQUIREMENT, # allowed in case of downgrade
1045 requirements.REVLOGV1_REQUIREMENT, # allowed in case of downgrade
1047 requirements.REVLOGV2_REQUIREMENT,
1046 requirements.REVLOGV2_REQUIREMENT,
1048 requirements.SHARED_REQUIREMENT,
1047 requirements.SHARED_REQUIREMENT,
1049 requirements.SHARESAFE_REQUIREMENT,
1048 requirements.SHARESAFE_REQUIREMENT,
1050 requirements.SPARSEREVLOG_REQUIREMENT,
1049 requirements.SPARSEREVLOG_REQUIREMENT,
1051 requirements.STORE_REQUIREMENT,
1050 requirements.STORE_REQUIREMENT,
1052 requirements.TREEMANIFEST_REQUIREMENT,
1051 requirements.TREEMANIFEST_REQUIREMENT,
1053 requirements.NARROW_REQUIREMENT,
1052 requirements.NARROW_REQUIREMENT,
1054 }
1053 }
1055 for name in compression.compengines:
1054 for name in compression.compengines:
1056 engine = compression.compengines[name]
1055 engine = compression.compengines[name]
1057 if engine.available() and engine.revlogheader():
1056 if engine.available() and engine.revlogheader():
1058 supported.add(b'exp-compression-%s' % name)
1057 supported.add(b'exp-compression-%s' % name)
1059 if engine.name() == b'zstd':
1058 if engine.name() == b'zstd':
1060 supported.add(b'revlog-compression-zstd')
1059 supported.add(b'revlog-compression-zstd')
1061 return supported
1060 return supported
1062
1061
1063
1062
1064 def allowednewrequirements(repo):
1063 def allowednewrequirements(repo):
1065 """Obtain requirements that can be added to a repository during upgrade.
1064 """Obtain requirements that can be added to a repository during upgrade.
1066
1065
1067 This is used to disallow proposed requirements from being added when
1066 This is used to disallow proposed requirements from being added when
1068 they weren't present before.
1067 they weren't present before.
1069
1068
1070 We use a list of allowed requirement additions instead of a list of known
1069 We use a list of allowed requirement additions instead of a list of known
1071 bad additions because the whitelist approach is safer and will prevent
1070 bad additions because the whitelist approach is safer and will prevent
1072 future, unknown requirements from accidentally being added.
1071 future, unknown requirements from accidentally being added.
1073 """
1072 """
1074 supported = {
1073 supported = {
1075 requirements.DOTENCODE_REQUIREMENT,
1074 requirements.DOTENCODE_REQUIREMENT,
1076 requirements.FNCACHE_REQUIREMENT,
1075 requirements.FNCACHE_REQUIREMENT,
1077 requirements.GENERALDELTA_REQUIREMENT,
1076 requirements.GENERALDELTA_REQUIREMENT,
1078 requirements.SPARSEREVLOG_REQUIREMENT,
1077 requirements.SPARSEREVLOG_REQUIREMENT,
1079 requirements.COPIESSDC_REQUIREMENT,
1078 requirements.COPIESSDC_REQUIREMENT,
1080 requirements.NODEMAP_REQUIREMENT,
1079 requirements.NODEMAP_REQUIREMENT,
1081 requirements.SHARESAFE_REQUIREMENT,
1080 requirements.SHARESAFE_REQUIREMENT,
1082 requirements.REVLOGV1_REQUIREMENT,
1081 requirements.REVLOGV1_REQUIREMENT,
1083 requirements.REVLOGV2_REQUIREMENT,
1082 requirements.REVLOGV2_REQUIREMENT,
1084 requirements.CHANGELOGV2_REQUIREMENT,
1083 requirements.CHANGELOGV2_REQUIREMENT,
1085 requirements.DIRSTATE_TRACKED_HINT_V1,
1084 requirements.DIRSTATE_TRACKED_HINT_V1,
1086 requirements.DIRSTATE_V2_REQUIREMENT,
1085 requirements.DIRSTATE_V2_REQUIREMENT,
1087 }
1086 }
1088 for name in compression.compengines:
1087 for name in compression.compengines:
1089 engine = compression.compengines[name]
1088 engine = compression.compengines[name]
1090 if engine.available() and engine.revlogheader():
1089 if engine.available() and engine.revlogheader():
1091 supported.add(b'exp-compression-%s' % name)
1090 supported.add(b'exp-compression-%s' % name)
1092 if engine.name() == b'zstd':
1091 if engine.name() == b'zstd':
1093 supported.add(b'revlog-compression-zstd')
1092 supported.add(b'revlog-compression-zstd')
1094 return supported
1093 return supported
1095
1094
1096
1095
1097 def check_requirements_changes(repo, new_reqs):
1096 def check_requirements_changes(repo, new_reqs):
1098 old_reqs = repo.requirements
1097 old_reqs = repo.requirements
1099 check_revlog_version(repo.requirements)
1098 check_revlog_version(repo.requirements)
1100 support_removal = supportremovedrequirements(repo)
1099 support_removal = supportremovedrequirements(repo)
1101 no_remove_reqs = old_reqs - new_reqs - support_removal
1100 no_remove_reqs = old_reqs - new_reqs - support_removal
1102 if no_remove_reqs:
1101 if no_remove_reqs:
1103 msg = _(b'cannot upgrade repository; requirement would be removed: %s')
1102 msg = _(b'cannot upgrade repository; requirement would be removed: %s')
1104 no_remove_reqs = b', '.join(sorted(no_remove_reqs))
1103 no_remove_reqs = b', '.join(sorted(no_remove_reqs))
1105 raise error.Abort(msg % no_remove_reqs)
1104 raise error.Abort(msg % no_remove_reqs)
1106
1105
1107 support_addition = allowednewrequirements(repo)
1106 support_addition = allowednewrequirements(repo)
1108 no_add_reqs = new_reqs - old_reqs - support_addition
1107 no_add_reqs = new_reqs - old_reqs - support_addition
1109 if no_add_reqs:
1108 if no_add_reqs:
1110 m = _(b'cannot upgrade repository; do not support adding requirement: ')
1109 m = _(b'cannot upgrade repository; do not support adding requirement: ')
1111 no_add_reqs = b', '.join(sorted(no_add_reqs))
1110 no_add_reqs = b', '.join(sorted(no_add_reqs))
1112 raise error.Abort(m + no_add_reqs)
1111 raise error.Abort(m + no_add_reqs)
1113
1112
1114 supported = supporteddestrequirements(repo)
1113 supported = supporteddestrequirements(repo)
1115 unsupported_reqs = new_reqs - supported
1114 unsupported_reqs = new_reqs - supported
1116 if unsupported_reqs:
1115 if unsupported_reqs:
1117 msg = _(
1116 msg = _(
1118 b'cannot upgrade repository; do not support destination '
1117 b'cannot upgrade repository; do not support destination '
1119 b'requirement: %s'
1118 b'requirement: %s'
1120 )
1119 )
1121 unsupported_reqs = b', '.join(sorted(unsupported_reqs))
1120 unsupported_reqs = b', '.join(sorted(unsupported_reqs))
1122 raise error.Abort(msg % unsupported_reqs)
1121 raise error.Abort(msg % unsupported_reqs)
@@ -1,951 +1,965 b''
1 // Copyright 2018-2023 Georges Racinet <georges.racinet@octobus.net>
1 // Copyright 2018-2023 Georges Racinet <georges.racinet@octobus.net>
2 // and Mercurial contributors
2 // and Mercurial contributors
3 //
3 //
4 // This software may be used and distributed according to the terms of the
4 // This software may be used and distributed according to the terms of the
5 // GNU General Public License version 2 or any later version.
5 // GNU General Public License version 2 or any later version.
6 //! Mercurial concepts for handling revision history
6 //! Mercurial concepts for handling revision history
7
7
8 pub mod node;
8 pub mod node;
9 pub mod nodemap;
9 pub mod nodemap;
10 mod nodemap_docket;
10 mod nodemap_docket;
11 pub mod path_encode;
11 pub mod path_encode;
12 pub use node::{FromHexError, Node, NodePrefix};
12 pub use node::{FromHexError, Node, NodePrefix};
13 pub mod changelog;
13 pub mod changelog;
14 pub mod filelog;
14 pub mod filelog;
15 pub mod index;
15 pub mod index;
16 pub mod manifest;
16 pub mod manifest;
17 pub mod patch;
17 pub mod patch;
18
18
19 use std::borrow::Cow;
19 use std::borrow::Cow;
20 use std::io::Read;
20 use std::io::Read;
21 use std::ops::Deref;
21 use std::ops::Deref;
22 use std::path::Path;
22 use std::path::Path;
23
23
24 use flate2::read::ZlibDecoder;
24 use flate2::read::ZlibDecoder;
25 use sha1::{Digest, Sha1};
25 use sha1::{Digest, Sha1};
26 use std::cell::RefCell;
26 use std::cell::RefCell;
27 use zstd;
27 use zstd;
28
28
29 use self::node::{NODE_BYTES_LENGTH, NULL_NODE};
29 use self::node::{NODE_BYTES_LENGTH, NULL_NODE};
30 use self::nodemap_docket::NodeMapDocket;
30 use self::nodemap_docket::NodeMapDocket;
31 use super::index::Index;
31 use super::index::Index;
32 use super::nodemap::{NodeMap, NodeMapError};
32 use super::nodemap::{NodeMap, NodeMapError};
33 use crate::errors::HgError;
33 use crate::errors::HgError;
34 use crate::vfs::Vfs;
34 use crate::vfs::Vfs;
35
35
36 /// As noted in revlog.c, revision numbers are actually encoded in
36 /// As noted in revlog.c, revision numbers are actually encoded in
37 /// 4 bytes, and are liberally converted to ints, whence the i32
37 /// 4 bytes, and are liberally converted to ints, whence the i32
38 pub type BaseRevision = i32;
38 pub type BaseRevision = i32;
39
39
40 /// Mercurial revision numbers
40 /// Mercurial revision numbers
41 /// In contrast to the more general [`UncheckedRevision`], these are "checked"
41 /// In contrast to the more general [`UncheckedRevision`], these are "checked"
42 /// in the sense that they should only be used for revisions that are
42 /// in the sense that they should only be used for revisions that are
43 /// valid for a given index (i.e. in bounds).
43 /// valid for a given index (i.e. in bounds).
44 #[derive(
44 #[derive(
45 Debug,
45 Debug,
46 derive_more::Display,
46 derive_more::Display,
47 Clone,
47 Clone,
48 Copy,
48 Copy,
49 Hash,
49 Hash,
50 PartialEq,
50 PartialEq,
51 Eq,
51 Eq,
52 PartialOrd,
52 PartialOrd,
53 Ord,
53 Ord,
54 )]
54 )]
55 pub struct Revision(pub BaseRevision);
55 pub struct Revision(pub BaseRevision);
56
56
57 impl format_bytes::DisplayBytes for Revision {
57 impl format_bytes::DisplayBytes for Revision {
58 fn display_bytes(
58 fn display_bytes(
59 &self,
59 &self,
60 output: &mut dyn std::io::Write,
60 output: &mut dyn std::io::Write,
61 ) -> std::io::Result<()> {
61 ) -> std::io::Result<()> {
62 self.0.display_bytes(output)
62 self.0.display_bytes(output)
63 }
63 }
64 }
64 }
65
65
66 /// Unchecked Mercurial revision numbers.
66 /// Unchecked Mercurial revision numbers.
67 ///
67 ///
68 /// Values of this type have no guarantee of being a valid revision number
68 /// Values of this type have no guarantee of being a valid revision number
69 /// in any context. Use method `check_revision` to get a valid revision within
69 /// in any context. Use method `check_revision` to get a valid revision within
70 /// the appropriate index object.
70 /// the appropriate index object.
71 #[derive(
71 #[derive(
72 Debug,
72 Debug,
73 derive_more::Display,
73 derive_more::Display,
74 Clone,
74 Clone,
75 Copy,
75 Copy,
76 Hash,
76 Hash,
77 PartialEq,
77 PartialEq,
78 Eq,
78 Eq,
79 PartialOrd,
79 PartialOrd,
80 Ord,
80 Ord,
81 )]
81 )]
82 pub struct UncheckedRevision(pub BaseRevision);
82 pub struct UncheckedRevision(pub BaseRevision);
83
83
84 impl format_bytes::DisplayBytes for UncheckedRevision {
84 impl format_bytes::DisplayBytes for UncheckedRevision {
85 fn display_bytes(
85 fn display_bytes(
86 &self,
86 &self,
87 output: &mut dyn std::io::Write,
87 output: &mut dyn std::io::Write,
88 ) -> std::io::Result<()> {
88 ) -> std::io::Result<()> {
89 self.0.display_bytes(output)
89 self.0.display_bytes(output)
90 }
90 }
91 }
91 }
92
92
93 impl From<Revision> for UncheckedRevision {
93 impl From<Revision> for UncheckedRevision {
94 fn from(value: Revision) -> Self {
94 fn from(value: Revision) -> Self {
95 Self(value.0)
95 Self(value.0)
96 }
96 }
97 }
97 }
98
98
99 impl From<BaseRevision> for UncheckedRevision {
99 impl From<BaseRevision> for UncheckedRevision {
100 fn from(value: BaseRevision) -> Self {
100 fn from(value: BaseRevision) -> Self {
101 Self(value)
101 Self(value)
102 }
102 }
103 }
103 }
104
104
105 /// Marker expressing the absence of a parent
105 /// Marker expressing the absence of a parent
106 ///
106 ///
107 /// Independently of the actual representation, `NULL_REVISION` is guaranteed
107 /// Independently of the actual representation, `NULL_REVISION` is guaranteed
108 /// to be smaller than all existing revisions.
108 /// to be smaller than all existing revisions.
109 pub const NULL_REVISION: Revision = Revision(-1);
109 pub const NULL_REVISION: Revision = Revision(-1);
110
110
111 /// Same as `mercurial.node.wdirrev`
111 /// Same as `mercurial.node.wdirrev`
112 ///
112 ///
113 /// This is also equal to `i32::max_value()`, but it's better to spell
113 /// This is also equal to `i32::max_value()`, but it's better to spell
114 /// it out explicitely, same as in `mercurial.node`
114 /// it out explicitely, same as in `mercurial.node`
115 #[allow(clippy::unreadable_literal)]
115 #[allow(clippy::unreadable_literal)]
116 pub const WORKING_DIRECTORY_REVISION: UncheckedRevision =
116 pub const WORKING_DIRECTORY_REVISION: UncheckedRevision =
117 UncheckedRevision(0x7fffffff);
117 UncheckedRevision(0x7fffffff);
118
118
119 pub const WORKING_DIRECTORY_HEX: &str =
119 pub const WORKING_DIRECTORY_HEX: &str =
120 "ffffffffffffffffffffffffffffffffffffffff";
120 "ffffffffffffffffffffffffffffffffffffffff";
121
121
122 /// The simplest expression of what we need of Mercurial DAGs.
122 /// The simplest expression of what we need of Mercurial DAGs.
123 pub trait Graph {
123 pub trait Graph {
124 /// Return the two parents of the given `Revision`.
124 /// Return the two parents of the given `Revision`.
125 ///
125 ///
126 /// Each of the parents can be independently `NULL_REVISION`
126 /// Each of the parents can be independently `NULL_REVISION`
127 fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError>;
127 fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError>;
128 }
128 }
129
129
130 #[derive(Clone, Debug, PartialEq)]
130 #[derive(Clone, Debug, PartialEq)]
131 pub enum GraphError {
131 pub enum GraphError {
132 ParentOutOfRange(Revision),
132 ParentOutOfRange(Revision),
133 }
133 }
134
134
135 /// The Mercurial Revlog Index
135 /// The Mercurial Revlog Index
136 ///
136 ///
137 /// This is currently limited to the minimal interface that is needed for
137 /// This is currently limited to the minimal interface that is needed for
138 /// the [`nodemap`](nodemap/index.html) module
138 /// the [`nodemap`](nodemap/index.html) module
139 pub trait RevlogIndex {
139 pub trait RevlogIndex {
140 /// Total number of Revisions referenced in this index
140 /// Total number of Revisions referenced in this index
141 fn len(&self) -> usize;
141 fn len(&self) -> usize;
142
142
143 fn is_empty(&self) -> bool {
143 fn is_empty(&self) -> bool {
144 self.len() == 0
144 self.len() == 0
145 }
145 }
146
146
147 /// Return a reference to the Node or `None` for `NULL_REVISION`
147 /// Return a reference to the Node or `None` for `NULL_REVISION`
148 fn node(&self, rev: Revision) -> Option<&Node>;
148 fn node(&self, rev: Revision) -> Option<&Node>;
149
149
150 /// Return a [`Revision`] if `rev` is a valid revision number for this
150 /// Return a [`Revision`] if `rev` is a valid revision number for this
151 /// index
151 /// index
152 fn check_revision(&self, rev: UncheckedRevision) -> Option<Revision> {
152 fn check_revision(&self, rev: UncheckedRevision) -> Option<Revision> {
153 let rev = rev.0;
153 let rev = rev.0;
154
154
155 if rev == NULL_REVISION.0 || (rev >= 0 && (rev as usize) < self.len())
155 if rev == NULL_REVISION.0 || (rev >= 0 && (rev as usize) < self.len())
156 {
156 {
157 Some(Revision(rev))
157 Some(Revision(rev))
158 } else {
158 } else {
159 None
159 None
160 }
160 }
161 }
161 }
162 }
162 }
163
163
164 const REVISION_FLAG_CENSORED: u16 = 1 << 15;
164 const REVISION_FLAG_CENSORED: u16 = 1 << 15;
165 const REVISION_FLAG_ELLIPSIS: u16 = 1 << 14;
165 const REVISION_FLAG_ELLIPSIS: u16 = 1 << 14;
166 const REVISION_FLAG_EXTSTORED: u16 = 1 << 13;
166 const REVISION_FLAG_EXTSTORED: u16 = 1 << 13;
167 const REVISION_FLAG_HASCOPIESINFO: u16 = 1 << 12;
167 const REVISION_FLAG_HASCOPIESINFO: u16 = 1 << 12;
168
168
169 // Keep this in sync with REVIDX_KNOWN_FLAGS in
169 // Keep this in sync with REVIDX_KNOWN_FLAGS in
170 // mercurial/revlogutils/flagutil.py
170 // mercurial/revlogutils/flagutil.py
171 const REVIDX_KNOWN_FLAGS: u16 = REVISION_FLAG_CENSORED
171 const REVIDX_KNOWN_FLAGS: u16 = REVISION_FLAG_CENSORED
172 | REVISION_FLAG_ELLIPSIS
172 | REVISION_FLAG_ELLIPSIS
173 | REVISION_FLAG_EXTSTORED
173 | REVISION_FLAG_EXTSTORED
174 | REVISION_FLAG_HASCOPIESINFO;
174 | REVISION_FLAG_HASCOPIESINFO;
175
175
176 const NULL_REVLOG_ENTRY_FLAGS: u16 = 0;
176 const NULL_REVLOG_ENTRY_FLAGS: u16 = 0;
177
177
178 #[derive(Debug, derive_more::From, derive_more::Display)]
178 #[derive(Debug, derive_more::From, derive_more::Display)]
179 pub enum RevlogError {
179 pub enum RevlogError {
180 InvalidRevision,
180 InvalidRevision,
181 /// Working directory is not supported
181 /// Working directory is not supported
182 WDirUnsupported,
182 WDirUnsupported,
183 /// Found more than one entry whose ID match the requested prefix
183 /// Found more than one entry whose ID match the requested prefix
184 AmbiguousPrefix,
184 AmbiguousPrefix,
185 #[from]
185 #[from]
186 Other(HgError),
186 Other(HgError),
187 }
187 }
188
188
189 impl From<NodeMapError> for RevlogError {
189 impl From<NodeMapError> for RevlogError {
190 fn from(error: NodeMapError) -> Self {
190 fn from(error: NodeMapError) -> Self {
191 match error {
191 match error {
192 NodeMapError::MultipleResults => RevlogError::AmbiguousPrefix,
192 NodeMapError::MultipleResults => RevlogError::AmbiguousPrefix,
193 NodeMapError::RevisionNotInIndex(rev) => RevlogError::corrupted(
193 NodeMapError::RevisionNotInIndex(rev) => RevlogError::corrupted(
194 format!("nodemap point to revision {} not in index", rev),
194 format!("nodemap point to revision {} not in index", rev),
195 ),
195 ),
196 }
196 }
197 }
197 }
198 }
198 }
199
199
200 fn corrupted<S: AsRef<str>>(context: S) -> HgError {
200 fn corrupted<S: AsRef<str>>(context: S) -> HgError {
201 HgError::corrupted(format!("corrupted revlog, {}", context.as_ref()))
201 HgError::corrupted(format!("corrupted revlog, {}", context.as_ref()))
202 }
202 }
203
203
204 impl RevlogError {
204 impl RevlogError {
205 fn corrupted<S: AsRef<str>>(context: S) -> Self {
205 fn corrupted<S: AsRef<str>>(context: S) -> Self {
206 RevlogError::Other(corrupted(context))
206 RevlogError::Other(corrupted(context))
207 }
207 }
208 }
208 }
209
209
210 /// Read only implementation of revlog.
210 /// Read only implementation of revlog.
211 pub struct Revlog {
211 pub struct Revlog {
212 /// When index and data are not interleaved: bytes of the revlog index.
212 /// When index and data are not interleaved: bytes of the revlog index.
213 /// When index and data are interleaved: bytes of the revlog index and
213 /// When index and data are interleaved: bytes of the revlog index and
214 /// data.
214 /// data.
215 index: Index,
215 index: Index,
216 /// When index and data are not interleaved: bytes of the revlog data
216 /// When index and data are not interleaved: bytes of the revlog data
217 data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>>,
217 data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>>,
218 /// When present on disk: the persistent nodemap for this revlog
218 /// When present on disk: the persistent nodemap for this revlog
219 nodemap: Option<nodemap::NodeTree>,
219 nodemap: Option<nodemap::NodeTree>,
220 }
220 }
221
221
222 impl Graph for Revlog {
222 impl Graph for Revlog {
223 fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
223 fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
224 self.index.parents(rev)
224 self.index.parents(rev)
225 }
225 }
226 }
226 }
227
227
228 impl Revlog {
228 impl Revlog {
229 /// Open a revlog index file.
229 /// Open a revlog index file.
230 ///
230 ///
231 /// It will also open the associated data file if index and data are not
231 /// It will also open the associated data file if index and data are not
232 /// interleaved.
232 /// interleaved.
233 pub fn open(
233 pub fn open(
234 store_vfs: &Vfs,
234 store_vfs: &Vfs,
235 index_path: impl AsRef<Path>,
235 index_path: impl AsRef<Path>,
236 data_path: Option<&Path>,
236 data_path: Option<&Path>,
237 use_nodemap: bool,
237 use_nodemap: bool,
238 ) -> Result<Self, HgError> {
238 ) -> Result<Self, HgError> {
239 Self::open_gen(store_vfs, index_path, data_path, use_nodemap, None)
240 }
241
242 fn open_gen(
243 store_vfs: &Vfs,
244 index_path: impl AsRef<Path>,
245 data_path: Option<&Path>,
246 use_nodemap: bool,
247 nodemap_for_test: Option<nodemap::NodeTree>,
248 ) -> Result<Self, HgError> {
239 let index_path = index_path.as_ref();
249 let index_path = index_path.as_ref();
240 let index = {
250 let index = {
241 match store_vfs.mmap_open_opt(&index_path)? {
251 match store_vfs.mmap_open_opt(&index_path)? {
242 None => Index::new(Box::new(vec![])),
252 None => Index::new(Box::new(vec![])),
243 Some(index_mmap) => {
253 Some(index_mmap) => {
244 let index = Index::new(Box::new(index_mmap))?;
254 let index = Index::new(Box::new(index_mmap))?;
245 Ok(index)
255 Ok(index)
246 }
256 }
247 }
257 }
248 }?;
258 }?;
249
259
250 let default_data_path = index_path.with_extension("d");
260 let default_data_path = index_path.with_extension("d");
251
261
252 // type annotation required
262 // type annotation required
253 // won't recognize Mmap as Deref<Target = [u8]>
263 // won't recognize Mmap as Deref<Target = [u8]>
254 let data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>> =
264 let data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>> =
255 if index.is_inline() {
265 if index.is_inline() {
256 None
266 None
257 } else {
267 } else {
258 let data_path = data_path.unwrap_or(&default_data_path);
268 let data_path = data_path.unwrap_or(&default_data_path);
259 let data_mmap = store_vfs.mmap_open(data_path)?;
269 let data_mmap = store_vfs.mmap_open(data_path)?;
260 Some(Box::new(data_mmap))
270 Some(Box::new(data_mmap))
261 };
271 };
262
272
263 let nodemap = if index.is_inline() || !use_nodemap {
273 let nodemap = if index.is_inline() || !use_nodemap {
264 None
274 None
265 } else {
275 } else {
266 NodeMapDocket::read_from_file(store_vfs, index_path)?.map(
276 NodeMapDocket::read_from_file(store_vfs, index_path)?.map(
267 |(docket, data)| {
277 |(docket, data)| {
268 nodemap::NodeTree::load_bytes(
278 nodemap::NodeTree::load_bytes(
269 Box::new(data),
279 Box::new(data),
270 docket.data_length,
280 docket.data_length,
271 )
281 )
272 },
282 },
273 )
283 )
274 };
284 };
275
285
286 let nodemap = nodemap_for_test.or(nodemap);
287
276 Ok(Revlog {
288 Ok(Revlog {
277 index,
289 index,
278 data_bytes,
290 data_bytes,
279 nodemap,
291 nodemap,
280 })
292 })
281 }
293 }
282
294
283 /// Return number of entries of the `Revlog`.
295 /// Return number of entries of the `Revlog`.
284 pub fn len(&self) -> usize {
296 pub fn len(&self) -> usize {
285 self.index.len()
297 self.index.len()
286 }
298 }
287
299
288 /// Returns `true` if the `Revlog` has zero `entries`.
300 /// Returns `true` if the `Revlog` has zero `entries`.
289 pub fn is_empty(&self) -> bool {
301 pub fn is_empty(&self) -> bool {
290 self.index.is_empty()
302 self.index.is_empty()
291 }
303 }
292
304
293 /// Returns the node ID for the given revision number, if it exists in this
305 /// Returns the node ID for the given revision number, if it exists in this
294 /// revlog
306 /// revlog
295 pub fn node_from_rev(&self, rev: UncheckedRevision) -> Option<&Node> {
307 pub fn node_from_rev(&self, rev: UncheckedRevision) -> Option<&Node> {
296 if rev == NULL_REVISION.into() {
308 if rev == NULL_REVISION.into() {
297 return Some(&NULL_NODE);
309 return Some(&NULL_NODE);
298 }
310 }
299 let rev = self.index.check_revision(rev)?;
311 let rev = self.index.check_revision(rev)?;
300 Some(self.index.get_entry(rev)?.hash())
312 Some(self.index.get_entry(rev)?.hash())
301 }
313 }
302
314
303 /// Return the revision number for the given node ID, if it exists in this
315 /// Return the revision number for the given node ID, if it exists in this
304 /// revlog
316 /// revlog
305 pub fn rev_from_node(
317 pub fn rev_from_node(
306 &self,
318 &self,
307 node: NodePrefix,
319 node: NodePrefix,
308 ) -> Result<Revision, RevlogError> {
320 ) -> Result<Revision, RevlogError> {
309 let looked_up = if let Some(nodemap) = &self.nodemap {
321 if let Some(nodemap) = &self.nodemap {
310 nodemap
322 nodemap
311 .find_bin(&self.index, node)?
323 .find_bin(&self.index, node)?
312 .ok_or(RevlogError::InvalidRevision)
324 .ok_or(RevlogError::InvalidRevision)
313 } else {
325 } else {
314 self.rev_from_node_no_persistent_nodemap(node)
326 self.rev_from_node_no_persistent_nodemap(node)
315 };
327 }
316
317 if node.is_prefix_of(&NULL_NODE) {
318 return match looked_up {
319 Ok(_) => Err(RevlogError::AmbiguousPrefix),
320 Err(RevlogError::InvalidRevision) => Ok(NULL_REVISION),
321 res => res,
322 };
323 };
324
325 looked_up
326 }
328 }
327
329
328 /// Same as `rev_from_node`, without using a persistent nodemap
330 /// Same as `rev_from_node`, without using a persistent nodemap
329 ///
331 ///
330 /// This is used as fallback when a persistent nodemap is not present.
332 /// This is used as fallback when a persistent nodemap is not present.
331 /// This happens when the persistent-nodemap experimental feature is not
333 /// This happens when the persistent-nodemap experimental feature is not
332 /// enabled, or for small revlogs.
334 /// enabled, or for small revlogs.
333 fn rev_from_node_no_persistent_nodemap(
335 fn rev_from_node_no_persistent_nodemap(
334 &self,
336 &self,
335 node: NodePrefix,
337 node: NodePrefix,
336 ) -> Result<Revision, RevlogError> {
338 ) -> Result<Revision, RevlogError> {
337 // Linear scan of the revlog
339 // Linear scan of the revlog
338 // TODO: consider building a non-persistent nodemap in memory to
340 // TODO: consider building a non-persistent nodemap in memory to
339 // optimize these cases.
341 // optimize these cases.
340 let mut found_by_prefix = None;
342 let mut found_by_prefix = None;
341 for rev in (0..self.len()).rev() {
343 for rev in (-1..self.len() as BaseRevision).rev() {
342 let rev = Revision(rev as BaseRevision);
344 let rev = Revision(rev as BaseRevision);
343 let index_entry = self.index.get_entry(rev).ok_or_else(|| {
345 let candidate_node = if rev == Revision(-1) {
344 HgError::corrupted(
346 NULL_NODE
345 "revlog references a revision not in the index",
347 } else {
346 )
348 let index_entry =
347 })?;
349 self.index.get_entry(rev).ok_or_else(|| {
348 if node == *index_entry.hash() {
350 HgError::corrupted(
351 "revlog references a revision not in the index",
352 )
353 })?;
354 *index_entry.hash()
355 };
356 if node == candidate_node {
349 return Ok(rev);
357 return Ok(rev);
350 }
358 }
351 if node.is_prefix_of(index_entry.hash()) {
359 if node.is_prefix_of(&candidate_node) {
352 if found_by_prefix.is_some() {
360 if found_by_prefix.is_some() {
353 return Err(RevlogError::AmbiguousPrefix);
361 return Err(RevlogError::AmbiguousPrefix);
354 }
362 }
355 found_by_prefix = Some(rev)
363 found_by_prefix = Some(rev)
356 }
364 }
357 }
365 }
358 found_by_prefix.ok_or(RevlogError::InvalidRevision)
366 found_by_prefix.ok_or(RevlogError::InvalidRevision)
359 }
367 }
360
368
361 /// Returns whether the given revision exists in this revlog.
369 /// Returns whether the given revision exists in this revlog.
362 pub fn has_rev(&self, rev: UncheckedRevision) -> bool {
370 pub fn has_rev(&self, rev: UncheckedRevision) -> bool {
363 self.index.check_revision(rev).is_some()
371 self.index.check_revision(rev).is_some()
364 }
372 }
365
373
366 /// Return the full data associated to a revision.
374 /// Return the full data associated to a revision.
367 ///
375 ///
368 /// All entries required to build the final data out of deltas will be
376 /// All entries required to build the final data out of deltas will be
369 /// retrieved as needed, and the deltas will be applied to the inital
377 /// retrieved as needed, and the deltas will be applied to the inital
370 /// snapshot to rebuild the final data.
378 /// snapshot to rebuild the final data.
371 pub fn get_rev_data(
379 pub fn get_rev_data(
372 &self,
380 &self,
373 rev: UncheckedRevision,
381 rev: UncheckedRevision,
374 ) -> Result<Cow<[u8]>, RevlogError> {
382 ) -> Result<Cow<[u8]>, RevlogError> {
375 if rev == NULL_REVISION.into() {
383 if rev == NULL_REVISION.into() {
376 return Ok(Cow::Borrowed(&[]));
384 return Ok(Cow::Borrowed(&[]));
377 };
385 };
378 self.get_entry(rev)?.data()
386 self.get_entry(rev)?.data()
379 }
387 }
380
388
381 /// [`Self::get_rev_data`] for checked revisions.
389 /// [`Self::get_rev_data`] for checked revisions.
382 pub fn get_rev_data_for_checked_rev(
390 pub fn get_rev_data_for_checked_rev(
383 &self,
391 &self,
384 rev: Revision,
392 rev: Revision,
385 ) -> Result<Cow<[u8]>, RevlogError> {
393 ) -> Result<Cow<[u8]>, RevlogError> {
386 if rev == NULL_REVISION {
394 if rev == NULL_REVISION {
387 return Ok(Cow::Borrowed(&[]));
395 return Ok(Cow::Borrowed(&[]));
388 };
396 };
389 self.get_entry_for_checked_rev(rev)?.data()
397 self.get_entry_for_checked_rev(rev)?.data()
390 }
398 }
391
399
392 /// Check the hash of some given data against the recorded hash.
400 /// Check the hash of some given data against the recorded hash.
393 pub fn check_hash(
401 pub fn check_hash(
394 &self,
402 &self,
395 p1: Revision,
403 p1: Revision,
396 p2: Revision,
404 p2: Revision,
397 expected: &[u8],
405 expected: &[u8],
398 data: &[u8],
406 data: &[u8],
399 ) -> bool {
407 ) -> bool {
400 let e1 = self.index.get_entry(p1);
408 let e1 = self.index.get_entry(p1);
401 let h1 = match e1 {
409 let h1 = match e1 {
402 Some(ref entry) => entry.hash(),
410 Some(ref entry) => entry.hash(),
403 None => &NULL_NODE,
411 None => &NULL_NODE,
404 };
412 };
405 let e2 = self.index.get_entry(p2);
413 let e2 = self.index.get_entry(p2);
406 let h2 = match e2 {
414 let h2 = match e2 {
407 Some(ref entry) => entry.hash(),
415 Some(ref entry) => entry.hash(),
408 None => &NULL_NODE,
416 None => &NULL_NODE,
409 };
417 };
410
418
411 hash(data, h1.as_bytes(), h2.as_bytes()) == expected
419 hash(data, h1.as_bytes(), h2.as_bytes()) == expected
412 }
420 }
413
421
414 /// Build the full data of a revision out its snapshot
422 /// Build the full data of a revision out its snapshot
415 /// and its deltas.
423 /// and its deltas.
416 fn build_data_from_deltas(
424 fn build_data_from_deltas(
417 snapshot: RevlogEntry,
425 snapshot: RevlogEntry,
418 deltas: &[RevlogEntry],
426 deltas: &[RevlogEntry],
419 ) -> Result<Vec<u8>, HgError> {
427 ) -> Result<Vec<u8>, HgError> {
420 let snapshot = snapshot.data_chunk()?;
428 let snapshot = snapshot.data_chunk()?;
421 let deltas = deltas
429 let deltas = deltas
422 .iter()
430 .iter()
423 .rev()
431 .rev()
424 .map(RevlogEntry::data_chunk)
432 .map(RevlogEntry::data_chunk)
425 .collect::<Result<Vec<_>, _>>()?;
433 .collect::<Result<Vec<_>, _>>()?;
426 let patches: Vec<_> =
434 let patches: Vec<_> =
427 deltas.iter().map(|d| patch::PatchList::new(d)).collect();
435 deltas.iter().map(|d| patch::PatchList::new(d)).collect();
428 let patch = patch::fold_patch_lists(&patches);
436 let patch = patch::fold_patch_lists(&patches);
429 Ok(patch.apply(&snapshot))
437 Ok(patch.apply(&snapshot))
430 }
438 }
431
439
432 /// Return the revlog data.
440 /// Return the revlog data.
433 fn data(&self) -> &[u8] {
441 fn data(&self) -> &[u8] {
434 match &self.data_bytes {
442 match &self.data_bytes {
435 Some(data_bytes) => data_bytes,
443 Some(data_bytes) => data_bytes,
436 None => panic!(
444 None => panic!(
437 "forgot to load the data or trying to access inline data"
445 "forgot to load the data or trying to access inline data"
438 ),
446 ),
439 }
447 }
440 }
448 }
441
449
442 pub fn make_null_entry(&self) -> RevlogEntry {
450 pub fn make_null_entry(&self) -> RevlogEntry {
443 RevlogEntry {
451 RevlogEntry {
444 revlog: self,
452 revlog: self,
445 rev: NULL_REVISION,
453 rev: NULL_REVISION,
446 bytes: b"",
454 bytes: b"",
447 compressed_len: 0,
455 compressed_len: 0,
448 uncompressed_len: 0,
456 uncompressed_len: 0,
449 base_rev_or_base_of_delta_chain: None,
457 base_rev_or_base_of_delta_chain: None,
450 p1: NULL_REVISION,
458 p1: NULL_REVISION,
451 p2: NULL_REVISION,
459 p2: NULL_REVISION,
452 flags: NULL_REVLOG_ENTRY_FLAGS,
460 flags: NULL_REVLOG_ENTRY_FLAGS,
453 hash: NULL_NODE,
461 hash: NULL_NODE,
454 }
462 }
455 }
463 }
456
464
457 fn get_entry_for_checked_rev(
465 fn get_entry_for_checked_rev(
458 &self,
466 &self,
459 rev: Revision,
467 rev: Revision,
460 ) -> Result<RevlogEntry, RevlogError> {
468 ) -> Result<RevlogEntry, RevlogError> {
461 if rev == NULL_REVISION {
469 if rev == NULL_REVISION {
462 return Ok(self.make_null_entry());
470 return Ok(self.make_null_entry());
463 }
471 }
464 let index_entry = self
472 let index_entry = self
465 .index
473 .index
466 .get_entry(rev)
474 .get_entry(rev)
467 .ok_or(RevlogError::InvalidRevision)?;
475 .ok_or(RevlogError::InvalidRevision)?;
468 let start = index_entry.offset();
476 let start = index_entry.offset();
469 let end = start + index_entry.compressed_len() as usize;
477 let end = start + index_entry.compressed_len() as usize;
470 let data = if self.index.is_inline() {
478 let data = if self.index.is_inline() {
471 self.index.data(start, end)
479 self.index.data(start, end)
472 } else {
480 } else {
473 &self.data()[start..end]
481 &self.data()[start..end]
474 };
482 };
475 let base_rev = self
483 let base_rev = self
476 .index
484 .index
477 .check_revision(index_entry.base_revision_or_base_of_delta_chain())
485 .check_revision(index_entry.base_revision_or_base_of_delta_chain())
478 .ok_or_else(|| {
486 .ok_or_else(|| {
479 RevlogError::corrupted(format!(
487 RevlogError::corrupted(format!(
480 "base revision for rev {} is invalid",
488 "base revision for rev {} is invalid",
481 rev
489 rev
482 ))
490 ))
483 })?;
491 })?;
484 let p1 =
492 let p1 =
485 self.index.check_revision(index_entry.p1()).ok_or_else(|| {
493 self.index.check_revision(index_entry.p1()).ok_or_else(|| {
486 RevlogError::corrupted(format!(
494 RevlogError::corrupted(format!(
487 "p1 for rev {} is invalid",
495 "p1 for rev {} is invalid",
488 rev
496 rev
489 ))
497 ))
490 })?;
498 })?;
491 let p2 =
499 let p2 =
492 self.index.check_revision(index_entry.p2()).ok_or_else(|| {
500 self.index.check_revision(index_entry.p2()).ok_or_else(|| {
493 RevlogError::corrupted(format!(
501 RevlogError::corrupted(format!(
494 "p2 for rev {} is invalid",
502 "p2 for rev {} is invalid",
495 rev
503 rev
496 ))
504 ))
497 })?;
505 })?;
498 let entry = RevlogEntry {
506 let entry = RevlogEntry {
499 revlog: self,
507 revlog: self,
500 rev,
508 rev,
501 bytes: data,
509 bytes: data,
502 compressed_len: index_entry.compressed_len(),
510 compressed_len: index_entry.compressed_len(),
503 uncompressed_len: index_entry.uncompressed_len(),
511 uncompressed_len: index_entry.uncompressed_len(),
504 base_rev_or_base_of_delta_chain: if base_rev == rev {
512 base_rev_or_base_of_delta_chain: if base_rev == rev {
505 None
513 None
506 } else {
514 } else {
507 Some(base_rev)
515 Some(base_rev)
508 },
516 },
509 p1,
517 p1,
510 p2,
518 p2,
511 flags: index_entry.flags(),
519 flags: index_entry.flags(),
512 hash: *index_entry.hash(),
520 hash: *index_entry.hash(),
513 };
521 };
514 Ok(entry)
522 Ok(entry)
515 }
523 }
516
524
517 /// Get an entry of the revlog.
525 /// Get an entry of the revlog.
518 pub fn get_entry(
526 pub fn get_entry(
519 &self,
527 &self,
520 rev: UncheckedRevision,
528 rev: UncheckedRevision,
521 ) -> Result<RevlogEntry, RevlogError> {
529 ) -> Result<RevlogEntry, RevlogError> {
522 if rev == NULL_REVISION.into() {
530 if rev == NULL_REVISION.into() {
523 return Ok(self.make_null_entry());
531 return Ok(self.make_null_entry());
524 }
532 }
525 let rev = self.index.check_revision(rev).ok_or_else(|| {
533 let rev = self.index.check_revision(rev).ok_or_else(|| {
526 RevlogError::corrupted(format!("rev {} is invalid", rev))
534 RevlogError::corrupted(format!("rev {} is invalid", rev))
527 })?;
535 })?;
528 self.get_entry_for_checked_rev(rev)
536 self.get_entry_for_checked_rev(rev)
529 }
537 }
530 }
538 }
531
539
532 /// The revlog entry's bytes and the necessary informations to extract
540 /// The revlog entry's bytes and the necessary informations to extract
533 /// the entry's data.
541 /// the entry's data.
534 #[derive(Clone)]
542 #[derive(Clone)]
535 pub struct RevlogEntry<'revlog> {
543 pub struct RevlogEntry<'revlog> {
536 revlog: &'revlog Revlog,
544 revlog: &'revlog Revlog,
537 rev: Revision,
545 rev: Revision,
538 bytes: &'revlog [u8],
546 bytes: &'revlog [u8],
539 compressed_len: u32,
547 compressed_len: u32,
540 uncompressed_len: i32,
548 uncompressed_len: i32,
541 base_rev_or_base_of_delta_chain: Option<Revision>,
549 base_rev_or_base_of_delta_chain: Option<Revision>,
542 p1: Revision,
550 p1: Revision,
543 p2: Revision,
551 p2: Revision,
544 flags: u16,
552 flags: u16,
545 hash: Node,
553 hash: Node,
546 }
554 }
547
555
548 thread_local! {
556 thread_local! {
549 // seems fine to [unwrap] here: this can only fail due to memory allocation
557 // seems fine to [unwrap] here: this can only fail due to memory allocation
550 // failing, and it's normal for that to cause panic.
558 // failing, and it's normal for that to cause panic.
551 static ZSTD_DECODER : RefCell<zstd::bulk::Decompressor<'static>> =
559 static ZSTD_DECODER : RefCell<zstd::bulk::Decompressor<'static>> =
552 RefCell::new(zstd::bulk::Decompressor::new().ok().unwrap());
560 RefCell::new(zstd::bulk::Decompressor::new().ok().unwrap());
553 }
561 }
554
562
555 fn zstd_decompress_to_buffer(
563 fn zstd_decompress_to_buffer(
556 bytes: &[u8],
564 bytes: &[u8],
557 buf: &mut Vec<u8>,
565 buf: &mut Vec<u8>,
558 ) -> Result<usize, std::io::Error> {
566 ) -> Result<usize, std::io::Error> {
559 ZSTD_DECODER
567 ZSTD_DECODER
560 .with(|decoder| decoder.borrow_mut().decompress_to_buffer(bytes, buf))
568 .with(|decoder| decoder.borrow_mut().decompress_to_buffer(bytes, buf))
561 }
569 }
562
570
563 impl<'revlog> RevlogEntry<'revlog> {
571 impl<'revlog> RevlogEntry<'revlog> {
564 pub fn revision(&self) -> Revision {
572 pub fn revision(&self) -> Revision {
565 self.rev
573 self.rev
566 }
574 }
567
575
568 pub fn node(&self) -> &Node {
576 pub fn node(&self) -> &Node {
569 &self.hash
577 &self.hash
570 }
578 }
571
579
572 pub fn uncompressed_len(&self) -> Option<u32> {
580 pub fn uncompressed_len(&self) -> Option<u32> {
573 u32::try_from(self.uncompressed_len).ok()
581 u32::try_from(self.uncompressed_len).ok()
574 }
582 }
575
583
576 pub fn has_p1(&self) -> bool {
584 pub fn has_p1(&self) -> bool {
577 self.p1 != NULL_REVISION
585 self.p1 != NULL_REVISION
578 }
586 }
579
587
580 pub fn p1_entry(
588 pub fn p1_entry(
581 &self,
589 &self,
582 ) -> Result<Option<RevlogEntry<'revlog>>, RevlogError> {
590 ) -> Result<Option<RevlogEntry<'revlog>>, RevlogError> {
583 if self.p1 == NULL_REVISION {
591 if self.p1 == NULL_REVISION {
584 Ok(None)
592 Ok(None)
585 } else {
593 } else {
586 Ok(Some(self.revlog.get_entry_for_checked_rev(self.p1)?))
594 Ok(Some(self.revlog.get_entry_for_checked_rev(self.p1)?))
587 }
595 }
588 }
596 }
589
597
590 pub fn p2_entry(
598 pub fn p2_entry(
591 &self,
599 &self,
592 ) -> Result<Option<RevlogEntry<'revlog>>, RevlogError> {
600 ) -> Result<Option<RevlogEntry<'revlog>>, RevlogError> {
593 if self.p2 == NULL_REVISION {
601 if self.p2 == NULL_REVISION {
594 Ok(None)
602 Ok(None)
595 } else {
603 } else {
596 Ok(Some(self.revlog.get_entry_for_checked_rev(self.p2)?))
604 Ok(Some(self.revlog.get_entry_for_checked_rev(self.p2)?))
597 }
605 }
598 }
606 }
599
607
600 pub fn p1(&self) -> Option<Revision> {
608 pub fn p1(&self) -> Option<Revision> {
601 if self.p1 == NULL_REVISION {
609 if self.p1 == NULL_REVISION {
602 None
610 None
603 } else {
611 } else {
604 Some(self.p1)
612 Some(self.p1)
605 }
613 }
606 }
614 }
607
615
608 pub fn p2(&self) -> Option<Revision> {
616 pub fn p2(&self) -> Option<Revision> {
609 if self.p2 == NULL_REVISION {
617 if self.p2 == NULL_REVISION {
610 None
618 None
611 } else {
619 } else {
612 Some(self.p2)
620 Some(self.p2)
613 }
621 }
614 }
622 }
615
623
616 pub fn is_censored(&self) -> bool {
624 pub fn is_censored(&self) -> bool {
617 (self.flags & REVISION_FLAG_CENSORED) != 0
625 (self.flags & REVISION_FLAG_CENSORED) != 0
618 }
626 }
619
627
620 pub fn has_length_affecting_flag_processor(&self) -> bool {
628 pub fn has_length_affecting_flag_processor(&self) -> bool {
621 // Relevant Python code: revlog.size()
629 // Relevant Python code: revlog.size()
622 // note: ELLIPSIS is known to not change the content
630 // note: ELLIPSIS is known to not change the content
623 (self.flags & (REVIDX_KNOWN_FLAGS ^ REVISION_FLAG_ELLIPSIS)) != 0
631 (self.flags & (REVIDX_KNOWN_FLAGS ^ REVISION_FLAG_ELLIPSIS)) != 0
624 }
632 }
625
633
626 /// The data for this entry, after resolving deltas if any.
634 /// The data for this entry, after resolving deltas if any.
627 pub fn rawdata(&self) -> Result<Cow<'revlog, [u8]>, RevlogError> {
635 pub fn rawdata(&self) -> Result<Cow<'revlog, [u8]>, RevlogError> {
628 let mut entry = self.clone();
636 let mut entry = self.clone();
629 let mut delta_chain = vec![];
637 let mut delta_chain = vec![];
630
638
631 // The meaning of `base_rev_or_base_of_delta_chain` depends on
639 // The meaning of `base_rev_or_base_of_delta_chain` depends on
632 // generaldelta. See the doc on `ENTRY_DELTA_BASE` in
640 // generaldelta. See the doc on `ENTRY_DELTA_BASE` in
633 // `mercurial/revlogutils/constants.py` and the code in
641 // `mercurial/revlogutils/constants.py` and the code in
634 // [_chaininfo] and in [index_deltachain].
642 // [_chaininfo] and in [index_deltachain].
635 let uses_generaldelta = self.revlog.index.uses_generaldelta();
643 let uses_generaldelta = self.revlog.index.uses_generaldelta();
636 while let Some(base_rev) = entry.base_rev_or_base_of_delta_chain {
644 while let Some(base_rev) = entry.base_rev_or_base_of_delta_chain {
637 entry = if uses_generaldelta {
645 entry = if uses_generaldelta {
638 delta_chain.push(entry);
646 delta_chain.push(entry);
639 self.revlog.get_entry_for_checked_rev(base_rev)?
647 self.revlog.get_entry_for_checked_rev(base_rev)?
640 } else {
648 } else {
641 let base_rev = UncheckedRevision(entry.rev.0 - 1);
649 let base_rev = UncheckedRevision(entry.rev.0 - 1);
642 delta_chain.push(entry);
650 delta_chain.push(entry);
643 self.revlog.get_entry(base_rev)?
651 self.revlog.get_entry(base_rev)?
644 };
652 };
645 }
653 }
646
654
647 let data = if delta_chain.is_empty() {
655 let data = if delta_chain.is_empty() {
648 entry.data_chunk()?
656 entry.data_chunk()?
649 } else {
657 } else {
650 Revlog::build_data_from_deltas(entry, &delta_chain)?.into()
658 Revlog::build_data_from_deltas(entry, &delta_chain)?.into()
651 };
659 };
652
660
653 Ok(data)
661 Ok(data)
654 }
662 }
655
663
656 fn check_data(
664 fn check_data(
657 &self,
665 &self,
658 data: Cow<'revlog, [u8]>,
666 data: Cow<'revlog, [u8]>,
659 ) -> Result<Cow<'revlog, [u8]>, RevlogError> {
667 ) -> Result<Cow<'revlog, [u8]>, RevlogError> {
660 if self.revlog.check_hash(
668 if self.revlog.check_hash(
661 self.p1,
669 self.p1,
662 self.p2,
670 self.p2,
663 self.hash.as_bytes(),
671 self.hash.as_bytes(),
664 &data,
672 &data,
665 ) {
673 ) {
666 Ok(data)
674 Ok(data)
667 } else {
675 } else {
668 if (self.flags & REVISION_FLAG_ELLIPSIS) != 0 {
676 if (self.flags & REVISION_FLAG_ELLIPSIS) != 0 {
669 return Err(HgError::unsupported(
677 return Err(HgError::unsupported(
670 "ellipsis revisions are not supported by rhg",
678 "ellipsis revisions are not supported by rhg",
671 )
679 )
672 .into());
680 .into());
673 }
681 }
674 Err(corrupted(format!(
682 Err(corrupted(format!(
675 "hash check failed for revision {}",
683 "hash check failed for revision {}",
676 self.rev
684 self.rev
677 ))
685 ))
678 .into())
686 .into())
679 }
687 }
680 }
688 }
681
689
682 pub fn data(&self) -> Result<Cow<'revlog, [u8]>, RevlogError> {
690 pub fn data(&self) -> Result<Cow<'revlog, [u8]>, RevlogError> {
683 let data = self.rawdata()?;
691 let data = self.rawdata()?;
684 if self.rev == NULL_REVISION {
692 if self.rev == NULL_REVISION {
685 return Ok(data);
693 return Ok(data);
686 }
694 }
687 if self.is_censored() {
695 if self.is_censored() {
688 return Err(HgError::CensoredNodeError.into());
696 return Err(HgError::CensoredNodeError.into());
689 }
697 }
690 self.check_data(data)
698 self.check_data(data)
691 }
699 }
692
700
693 /// Extract the data contained in the entry.
701 /// Extract the data contained in the entry.
694 /// This may be a delta. (See `is_delta`.)
702 /// This may be a delta. (See `is_delta`.)
695 fn data_chunk(&self) -> Result<Cow<'revlog, [u8]>, HgError> {
703 fn data_chunk(&self) -> Result<Cow<'revlog, [u8]>, HgError> {
696 if self.bytes.is_empty() {
704 if self.bytes.is_empty() {
697 return Ok(Cow::Borrowed(&[]));
705 return Ok(Cow::Borrowed(&[]));
698 }
706 }
699 match self.bytes[0] {
707 match self.bytes[0] {
700 // Revision data is the entirety of the entry, including this
708 // Revision data is the entirety of the entry, including this
701 // header.
709 // header.
702 b'\0' => Ok(Cow::Borrowed(self.bytes)),
710 b'\0' => Ok(Cow::Borrowed(self.bytes)),
703 // Raw revision data follows.
711 // Raw revision data follows.
704 b'u' => Ok(Cow::Borrowed(&self.bytes[1..])),
712 b'u' => Ok(Cow::Borrowed(&self.bytes[1..])),
705 // zlib (RFC 1950) data.
713 // zlib (RFC 1950) data.
706 b'x' => Ok(Cow::Owned(self.uncompressed_zlib_data()?)),
714 b'x' => Ok(Cow::Owned(self.uncompressed_zlib_data()?)),
707 // zstd data.
715 // zstd data.
708 b'\x28' => Ok(Cow::Owned(self.uncompressed_zstd_data()?)),
716 b'\x28' => Ok(Cow::Owned(self.uncompressed_zstd_data()?)),
709 // A proper new format should have had a repo/store requirement.
717 // A proper new format should have had a repo/store requirement.
710 format_type => Err(corrupted(format!(
718 format_type => Err(corrupted(format!(
711 "unknown compression header '{}'",
719 "unknown compression header '{}'",
712 format_type
720 format_type
713 ))),
721 ))),
714 }
722 }
715 }
723 }
716
724
717 fn uncompressed_zlib_data(&self) -> Result<Vec<u8>, HgError> {
725 fn uncompressed_zlib_data(&self) -> Result<Vec<u8>, HgError> {
718 let mut decoder = ZlibDecoder::new(self.bytes);
726 let mut decoder = ZlibDecoder::new(self.bytes);
719 if self.is_delta() {
727 if self.is_delta() {
720 let mut buf = Vec::with_capacity(self.compressed_len as usize);
728 let mut buf = Vec::with_capacity(self.compressed_len as usize);
721 decoder
729 decoder
722 .read_to_end(&mut buf)
730 .read_to_end(&mut buf)
723 .map_err(|e| corrupted(e.to_string()))?;
731 .map_err(|e| corrupted(e.to_string()))?;
724 Ok(buf)
732 Ok(buf)
725 } else {
733 } else {
726 let cap = self.uncompressed_len.max(0) as usize;
734 let cap = self.uncompressed_len.max(0) as usize;
727 let mut buf = vec![0; cap];
735 let mut buf = vec![0; cap];
728 decoder
736 decoder
729 .read_exact(&mut buf)
737 .read_exact(&mut buf)
730 .map_err(|e| corrupted(e.to_string()))?;
738 .map_err(|e| corrupted(e.to_string()))?;
731 Ok(buf)
739 Ok(buf)
732 }
740 }
733 }
741 }
734
742
735 fn uncompressed_zstd_data(&self) -> Result<Vec<u8>, HgError> {
743 fn uncompressed_zstd_data(&self) -> Result<Vec<u8>, HgError> {
736 let cap = self.uncompressed_len.max(0) as usize;
744 let cap = self.uncompressed_len.max(0) as usize;
737 if self.is_delta() {
745 if self.is_delta() {
738 // [cap] is usually an over-estimate of the space needed because
746 // [cap] is usually an over-estimate of the space needed because
739 // it's the length of delta-decoded data, but we're interested
747 // it's the length of delta-decoded data, but we're interested
740 // in the size of the delta.
748 // in the size of the delta.
741 // This means we have to [shrink_to_fit] to avoid holding on
749 // This means we have to [shrink_to_fit] to avoid holding on
742 // to a large chunk of memory, but it also means we must have a
750 // to a large chunk of memory, but it also means we must have a
743 // fallback branch, for the case when the delta is longer than
751 // fallback branch, for the case when the delta is longer than
744 // the original data (surprisingly, this does happen in practice)
752 // the original data (surprisingly, this does happen in practice)
745 let mut buf = Vec::with_capacity(cap);
753 let mut buf = Vec::with_capacity(cap);
746 match zstd_decompress_to_buffer(self.bytes, &mut buf) {
754 match zstd_decompress_to_buffer(self.bytes, &mut buf) {
747 Ok(_) => buf.shrink_to_fit(),
755 Ok(_) => buf.shrink_to_fit(),
748 Err(_) => {
756 Err(_) => {
749 buf.clear();
757 buf.clear();
750 zstd::stream::copy_decode(self.bytes, &mut buf)
758 zstd::stream::copy_decode(self.bytes, &mut buf)
751 .map_err(|e| corrupted(e.to_string()))?;
759 .map_err(|e| corrupted(e.to_string()))?;
752 }
760 }
753 };
761 };
754 Ok(buf)
762 Ok(buf)
755 } else {
763 } else {
756 let mut buf = Vec::with_capacity(cap);
764 let mut buf = Vec::with_capacity(cap);
757 let len = zstd_decompress_to_buffer(self.bytes, &mut buf)
765 let len = zstd_decompress_to_buffer(self.bytes, &mut buf)
758 .map_err(|e| corrupted(e.to_string()))?;
766 .map_err(|e| corrupted(e.to_string()))?;
759 if len != self.uncompressed_len as usize {
767 if len != self.uncompressed_len as usize {
760 Err(corrupted("uncompressed length does not match"))
768 Err(corrupted("uncompressed length does not match"))
761 } else {
769 } else {
762 Ok(buf)
770 Ok(buf)
763 }
771 }
764 }
772 }
765 }
773 }
766
774
767 /// Tell if the entry is a snapshot or a delta
775 /// Tell if the entry is a snapshot or a delta
768 /// (influences on decompression).
776 /// (influences on decompression).
769 fn is_delta(&self) -> bool {
777 fn is_delta(&self) -> bool {
770 self.base_rev_or_base_of_delta_chain.is_some()
778 self.base_rev_or_base_of_delta_chain.is_some()
771 }
779 }
772 }
780 }
773
781
774 /// Calculate the hash of a revision given its data and its parents.
782 /// Calculate the hash of a revision given its data and its parents.
775 fn hash(
783 fn hash(
776 data: &[u8],
784 data: &[u8],
777 p1_hash: &[u8],
785 p1_hash: &[u8],
778 p2_hash: &[u8],
786 p2_hash: &[u8],
779 ) -> [u8; NODE_BYTES_LENGTH] {
787 ) -> [u8; NODE_BYTES_LENGTH] {
780 let mut hasher = Sha1::new();
788 let mut hasher = Sha1::new();
781 let (a, b) = (p1_hash, p2_hash);
789 let (a, b) = (p1_hash, p2_hash);
782 if a > b {
790 if a > b {
783 hasher.update(b);
791 hasher.update(b);
784 hasher.update(a);
792 hasher.update(a);
785 } else {
793 } else {
786 hasher.update(a);
794 hasher.update(a);
787 hasher.update(b);
795 hasher.update(b);
788 }
796 }
789 hasher.update(data);
797 hasher.update(data);
790 *hasher.finalize().as_ref()
798 *hasher.finalize().as_ref()
791 }
799 }
792
800
793 #[cfg(test)]
801 #[cfg(test)]
794 mod tests {
802 mod tests {
795 use super::*;
803 use super::*;
796 use crate::index::{IndexEntryBuilder, INDEX_ENTRY_SIZE};
804 use crate::index::{IndexEntryBuilder, INDEX_ENTRY_SIZE};
797 use itertools::Itertools;
805 use itertools::Itertools;
798
806
799 #[test]
807 #[test]
800 fn test_empty() {
808 fn test_empty() {
801 let temp = tempfile::tempdir().unwrap();
809 let temp = tempfile::tempdir().unwrap();
802 let vfs = Vfs { base: temp.path() };
810 let vfs = Vfs { base: temp.path() };
803 std::fs::write(temp.path().join("foo.i"), b"").unwrap();
811 std::fs::write(temp.path().join("foo.i"), b"").unwrap();
804 let revlog = Revlog::open(&vfs, "foo.i", None, false).unwrap();
812 let revlog = Revlog::open(&vfs, "foo.i", None, false).unwrap();
805 assert!(revlog.is_empty());
813 assert!(revlog.is_empty());
806 assert_eq!(revlog.len(), 0);
814 assert_eq!(revlog.len(), 0);
807 assert!(revlog.get_entry(0.into()).is_err());
815 assert!(revlog.get_entry(0.into()).is_err());
808 assert!(!revlog.has_rev(0.into()));
816 assert!(!revlog.has_rev(0.into()));
809 assert_eq!(
817 assert_eq!(
810 revlog.rev_from_node(NULL_NODE.into()).unwrap(),
818 revlog.rev_from_node(NULL_NODE.into()).unwrap(),
811 NULL_REVISION
819 NULL_REVISION
812 );
820 );
813 let null_entry = revlog.get_entry(NULL_REVISION.into()).ok().unwrap();
821 let null_entry = revlog.get_entry(NULL_REVISION.into()).ok().unwrap();
814 assert_eq!(null_entry.revision(), NULL_REVISION);
822 assert_eq!(null_entry.revision(), NULL_REVISION);
815 assert!(null_entry.data().unwrap().is_empty());
823 assert!(null_entry.data().unwrap().is_empty());
816 }
824 }
817
825
818 #[test]
826 #[test]
819 fn test_inline() {
827 fn test_inline() {
820 let temp = tempfile::tempdir().unwrap();
828 let temp = tempfile::tempdir().unwrap();
821 let vfs = Vfs { base: temp.path() };
829 let vfs = Vfs { base: temp.path() };
822 let node0 = Node::from_hex("2ed2a3912a0b24502043eae84ee4b279c18b90dd")
830 let node0 = Node::from_hex("2ed2a3912a0b24502043eae84ee4b279c18b90dd")
823 .unwrap();
831 .unwrap();
824 let node1 = Node::from_hex("b004912a8510032a0350a74daa2803dadfb00e12")
832 let node1 = Node::from_hex("b004912a8510032a0350a74daa2803dadfb00e12")
825 .unwrap();
833 .unwrap();
826 let node2 = Node::from_hex("dd6ad206e907be60927b5a3117b97dffb2590582")
834 let node2 = Node::from_hex("dd6ad206e907be60927b5a3117b97dffb2590582")
827 .unwrap();
835 .unwrap();
828 let entry0_bytes = IndexEntryBuilder::new()
836 let entry0_bytes = IndexEntryBuilder::new()
829 .is_first(true)
837 .is_first(true)
830 .with_version(1)
838 .with_version(1)
831 .with_inline(true)
839 .with_inline(true)
832 .with_offset(INDEX_ENTRY_SIZE)
840 .with_offset(INDEX_ENTRY_SIZE)
833 .with_node(node0)
841 .with_node(node0)
834 .build();
842 .build();
835 let entry1_bytes = IndexEntryBuilder::new()
843 let entry1_bytes = IndexEntryBuilder::new()
836 .with_offset(INDEX_ENTRY_SIZE)
844 .with_offset(INDEX_ENTRY_SIZE)
837 .with_node(node1)
845 .with_node(node1)
838 .build();
846 .build();
839 let entry2_bytes = IndexEntryBuilder::new()
847 let entry2_bytes = IndexEntryBuilder::new()
840 .with_offset(INDEX_ENTRY_SIZE)
848 .with_offset(INDEX_ENTRY_SIZE)
841 .with_p1(Revision(0))
849 .with_p1(Revision(0))
842 .with_p2(Revision(1))
850 .with_p2(Revision(1))
843 .with_node(node2)
851 .with_node(node2)
844 .build();
852 .build();
845 let contents = vec![entry0_bytes, entry1_bytes, entry2_bytes]
853 let contents = vec![entry0_bytes, entry1_bytes, entry2_bytes]
846 .into_iter()
854 .into_iter()
847 .flatten()
855 .flatten()
848 .collect_vec();
856 .collect_vec();
849 std::fs::write(temp.path().join("foo.i"), contents).unwrap();
857 std::fs::write(temp.path().join("foo.i"), contents).unwrap();
850 let revlog = Revlog::open(&vfs, "foo.i", None, false).unwrap();
858 let revlog = Revlog::open(&vfs, "foo.i", None, false).unwrap();
851
859
852 let entry0 = revlog.get_entry(0.into()).ok().unwrap();
860 let entry0 = revlog.get_entry(0.into()).ok().unwrap();
853 assert_eq!(entry0.revision(), Revision(0));
861 assert_eq!(entry0.revision(), Revision(0));
854 assert_eq!(*entry0.node(), node0);
862 assert_eq!(*entry0.node(), node0);
855 assert!(!entry0.has_p1());
863 assert!(!entry0.has_p1());
856 assert_eq!(entry0.p1(), None);
864 assert_eq!(entry0.p1(), None);
857 assert_eq!(entry0.p2(), None);
865 assert_eq!(entry0.p2(), None);
858 let p1_entry = entry0.p1_entry().unwrap();
866 let p1_entry = entry0.p1_entry().unwrap();
859 assert!(p1_entry.is_none());
867 assert!(p1_entry.is_none());
860 let p2_entry = entry0.p2_entry().unwrap();
868 let p2_entry = entry0.p2_entry().unwrap();
861 assert!(p2_entry.is_none());
869 assert!(p2_entry.is_none());
862
870
863 let entry1 = revlog.get_entry(1.into()).ok().unwrap();
871 let entry1 = revlog.get_entry(1.into()).ok().unwrap();
864 assert_eq!(entry1.revision(), Revision(1));
872 assert_eq!(entry1.revision(), Revision(1));
865 assert_eq!(*entry1.node(), node1);
873 assert_eq!(*entry1.node(), node1);
866 assert!(!entry1.has_p1());
874 assert!(!entry1.has_p1());
867 assert_eq!(entry1.p1(), None);
875 assert_eq!(entry1.p1(), None);
868 assert_eq!(entry1.p2(), None);
876 assert_eq!(entry1.p2(), None);
869 let p1_entry = entry1.p1_entry().unwrap();
877 let p1_entry = entry1.p1_entry().unwrap();
870 assert!(p1_entry.is_none());
878 assert!(p1_entry.is_none());
871 let p2_entry = entry1.p2_entry().unwrap();
879 let p2_entry = entry1.p2_entry().unwrap();
872 assert!(p2_entry.is_none());
880 assert!(p2_entry.is_none());
873
881
874 let entry2 = revlog.get_entry(2.into()).ok().unwrap();
882 let entry2 = revlog.get_entry(2.into()).ok().unwrap();
875 assert_eq!(entry2.revision(), Revision(2));
883 assert_eq!(entry2.revision(), Revision(2));
876 assert_eq!(*entry2.node(), node2);
884 assert_eq!(*entry2.node(), node2);
877 assert!(entry2.has_p1());
885 assert!(entry2.has_p1());
878 assert_eq!(entry2.p1(), Some(Revision(0)));
886 assert_eq!(entry2.p1(), Some(Revision(0)));
879 assert_eq!(entry2.p2(), Some(Revision(1)));
887 assert_eq!(entry2.p2(), Some(Revision(1)));
880 let p1_entry = entry2.p1_entry().unwrap();
888 let p1_entry = entry2.p1_entry().unwrap();
881 assert!(p1_entry.is_some());
889 assert!(p1_entry.is_some());
882 assert_eq!(p1_entry.unwrap().revision(), Revision(0));
890 assert_eq!(p1_entry.unwrap().revision(), Revision(0));
883 let p2_entry = entry2.p2_entry().unwrap();
891 let p2_entry = entry2.p2_entry().unwrap();
884 assert!(p2_entry.is_some());
892 assert!(p2_entry.is_some());
885 assert_eq!(p2_entry.unwrap().revision(), Revision(1));
893 assert_eq!(p2_entry.unwrap().revision(), Revision(1));
886 }
894 }
887
895
888 #[test]
896 #[test]
889 fn test_nodemap() {
897 fn test_nodemap() {
890 let temp = tempfile::tempdir().unwrap();
898 let temp = tempfile::tempdir().unwrap();
891 let vfs = Vfs { base: temp.path() };
899 let vfs = Vfs { base: temp.path() };
892
900
893 // building a revlog with a forced Node starting with zeros
901 // building a revlog with a forced Node starting with zeros
894 // This is a corruption, but it does not preclude using the nodemap
902 // This is a corruption, but it does not preclude using the nodemap
895 // if we don't try and access the data
903 // if we don't try and access the data
896 let node0 = Node::from_hex("00d2a3912a0b24502043eae84ee4b279c18b90dd")
904 let node0 = Node::from_hex("00d2a3912a0b24502043eae84ee4b279c18b90dd")
897 .unwrap();
905 .unwrap();
898 let node1 = Node::from_hex("b004912a8510032a0350a74daa2803dadfb00e12")
906 let node1 = Node::from_hex("b004912a8510032a0350a74daa2803dadfb00e12")
899 .unwrap();
907 .unwrap();
900 let entry0_bytes = IndexEntryBuilder::new()
908 let entry0_bytes = IndexEntryBuilder::new()
901 .is_first(true)
909 .is_first(true)
902 .with_version(1)
910 .with_version(1)
903 .with_inline(true)
911 .with_inline(true)
904 .with_offset(INDEX_ENTRY_SIZE)
912 .with_offset(INDEX_ENTRY_SIZE)
905 .with_node(node0)
913 .with_node(node0)
906 .build();
914 .build();
907 let entry1_bytes = IndexEntryBuilder::new()
915 let entry1_bytes = IndexEntryBuilder::new()
908 .with_offset(INDEX_ENTRY_SIZE)
916 .with_offset(INDEX_ENTRY_SIZE)
909 .with_node(node1)
917 .with_node(node1)
910 .build();
918 .build();
911 let contents = vec![entry0_bytes, entry1_bytes]
919 let contents = vec![entry0_bytes, entry1_bytes]
912 .into_iter()
920 .into_iter()
913 .flatten()
921 .flatten()
914 .collect_vec();
922 .collect_vec();
915 std::fs::write(temp.path().join("foo.i"), contents).unwrap();
923 std::fs::write(temp.path().join("foo.i"), contents).unwrap();
916 let revlog = Revlog::open(&vfs, "foo.i", None, false).unwrap();
924
925 let mut idx = nodemap::tests::TestNtIndex::new();
926 idx.insert_node(Revision(0), node0).unwrap();
927 idx.insert_node(Revision(1), node1).unwrap();
928
929 let revlog =
930 Revlog::open_gen(&vfs, "foo.i", None, true, Some(idx.nt)).unwrap();
917
931
918 // accessing the data shows the corruption
932 // accessing the data shows the corruption
919 revlog.get_entry(0.into()).unwrap().data().unwrap_err();
933 revlog.get_entry(0.into()).unwrap().data().unwrap_err();
920
934
921 assert_eq!(
935 assert_eq!(
922 revlog.rev_from_node(NULL_NODE.into()).unwrap(),
936 revlog.rev_from_node(NULL_NODE.into()).unwrap(),
923 Revision(-1)
937 Revision(-1)
924 );
938 );
925 assert_eq!(revlog.rev_from_node(node0.into()).unwrap(), Revision(0));
939 assert_eq!(revlog.rev_from_node(node0.into()).unwrap(), Revision(0));
926 assert_eq!(revlog.rev_from_node(node1.into()).unwrap(), Revision(1));
940 assert_eq!(revlog.rev_from_node(node1.into()).unwrap(), Revision(1));
927 assert_eq!(
941 assert_eq!(
928 revlog
942 revlog
929 .rev_from_node(NodePrefix::from_hex("000").unwrap())
943 .rev_from_node(NodePrefix::from_hex("000").unwrap())
930 .unwrap(),
944 .unwrap(),
931 Revision(-1)
945 Revision(-1)
932 );
946 );
933 assert_eq!(
947 assert_eq!(
934 revlog
948 revlog
935 .rev_from_node(NodePrefix::from_hex("b00").unwrap())
949 .rev_from_node(NodePrefix::from_hex("b00").unwrap())
936 .unwrap(),
950 .unwrap(),
937 Revision(1)
951 Revision(1)
938 );
952 );
939 // RevlogError does not implement PartialEq
953 // RevlogError does not implement PartialEq
940 // (ultimately because io::Error does not)
954 // (ultimately because io::Error does not)
941 match revlog
955 match revlog
942 .rev_from_node(NodePrefix::from_hex("00").unwrap())
956 .rev_from_node(NodePrefix::from_hex("00").unwrap())
943 .expect_err("Expected to give AmbiguousPrefix error")
957 .expect_err("Expected to give AmbiguousPrefix error")
944 {
958 {
945 RevlogError::AmbiguousPrefix => (),
959 RevlogError::AmbiguousPrefix => (),
946 e => {
960 e => {
947 panic!("Got another error than AmbiguousPrefix: {:?}", e);
961 panic!("Got another error than AmbiguousPrefix: {:?}", e);
948 }
962 }
949 };
963 };
950 }
964 }
951 }
965 }
@@ -1,1095 +1,1102 b''
1 // Copyright 2018-2020 Georges Racinet <georges.racinet@octobus.net>
1 // Copyright 2018-2020 Georges Racinet <georges.racinet@octobus.net>
2 // and Mercurial contributors
2 // and Mercurial contributors
3 //
3 //
4 // This software may be used and distributed according to the terms of the
4 // This software may be used and distributed according to the terms of the
5 // GNU General Public License version 2 or any later version.
5 // GNU General Public License version 2 or any later version.
6 //! Indexing facilities for fast retrieval of `Revision` from `Node`
6 //! Indexing facilities for fast retrieval of `Revision` from `Node`
7 //!
7 //!
8 //! This provides a variation on the 16-ary radix tree that is
8 //! This provides a variation on the 16-ary radix tree that is
9 //! provided as "nodetree" in revlog.c, ready for append-only persistence
9 //! provided as "nodetree" in revlog.c, ready for append-only persistence
10 //! on disk.
10 //! on disk.
11 //!
11 //!
12 //! Following existing implicit conventions, the "nodemap" terminology
12 //! Following existing implicit conventions, the "nodemap" terminology
13 //! is used in a more abstract context.
13 //! is used in a more abstract context.
14
14
15 use crate::UncheckedRevision;
15 use crate::UncheckedRevision;
16
16
17 use super::{
17 use super::{
18 node::NULL_NODE, Node, NodePrefix, Revision, RevlogIndex, NULL_REVISION,
18 node::NULL_NODE, Node, NodePrefix, Revision, RevlogIndex, NULL_REVISION,
19 };
19 };
20
20
21 use bytes_cast::{unaligned, BytesCast};
21 use bytes_cast::{unaligned, BytesCast};
22 use std::cmp::max;
22 use std::cmp::max;
23 use std::fmt;
23 use std::fmt;
24 use std::mem::{self, align_of, size_of};
24 use std::mem::{self, align_of, size_of};
25 use std::ops::Deref;
25 use std::ops::Deref;
26 use std::ops::Index;
26 use std::ops::Index;
27
27
28 #[derive(Debug, PartialEq)]
28 #[derive(Debug, PartialEq)]
29 pub enum NodeMapError {
29 pub enum NodeMapError {
30 /// A `NodePrefix` matches several [`Revision`]s.
30 /// A `NodePrefix` matches several [`Revision`]s.
31 ///
31 ///
32 /// This can be returned by methods meant for (at most) one match.
32 /// This can be returned by methods meant for (at most) one match.
33 MultipleResults,
33 MultipleResults,
34 /// A `Revision` stored in the nodemap could not be found in the index
34 /// A `Revision` stored in the nodemap could not be found in the index
35 RevisionNotInIndex(UncheckedRevision),
35 RevisionNotInIndex(UncheckedRevision),
36 }
36 }
37
37
38 /// Mapping system from Mercurial nodes to revision numbers.
38 /// Mapping system from Mercurial nodes to revision numbers.
39 ///
39 ///
40 /// ## `RevlogIndex` and `NodeMap`
40 /// ## `RevlogIndex` and `NodeMap`
41 ///
41 ///
42 /// One way to think about their relationship is that
42 /// One way to think about their relationship is that
43 /// the `NodeMap` is a prefix-oriented reverse index of the [`Node`]
43 /// the `NodeMap` is a prefix-oriented reverse index of the [`Node`]
44 /// information carried by a [`RevlogIndex`].
44 /// information carried by a [`RevlogIndex`].
45 ///
45 ///
46 /// Many of the methods in this trait take a `RevlogIndex` argument
46 /// Many of the methods in this trait take a `RevlogIndex` argument
47 /// which is used for validation of their results. This index must naturally
47 /// which is used for validation of their results. This index must naturally
48 /// be the one the `NodeMap` is about, and it must be consistent.
48 /// be the one the `NodeMap` is about, and it must be consistent.
49 ///
49 ///
50 /// Notably, the `NodeMap` must not store
50 /// Notably, the `NodeMap` must not store
51 /// information about more `Revision` values than there are in the index.
51 /// information about more `Revision` values than there are in the index.
52 /// In these methods, an encountered `Revision` is not in the index, a
52 /// In these methods, an encountered `Revision` is not in the index, a
53 /// [RevisionNotInIndex](NodeMapError) error is returned.
53 /// [RevisionNotInIndex](NodeMapError) error is returned.
54 ///
54 ///
55 /// In insert operations, the rule is thus that the `NodeMap` must always
55 /// In insert operations, the rule is thus that the `NodeMap` must always
56 /// be updated after the `RevlogIndex` it is about.
56 /// be updated after the `RevlogIndex` it is about.
57 pub trait NodeMap {
57 pub trait NodeMap {
58 /// Find the unique `Revision` having the given `Node`
58 /// Find the unique `Revision` having the given `Node`
59 ///
59 ///
60 /// If no Revision matches the given `Node`, `Ok(None)` is returned.
60 /// If no Revision matches the given `Node`, `Ok(None)` is returned.
61 fn find_node(
61 fn find_node(
62 &self,
62 &self,
63 index: &impl RevlogIndex,
63 index: &impl RevlogIndex,
64 node: &Node,
64 node: &Node,
65 ) -> Result<Option<Revision>, NodeMapError> {
65 ) -> Result<Option<Revision>, NodeMapError> {
66 self.find_bin(index, node.into())
66 self.find_bin(index, node.into())
67 }
67 }
68
68
69 /// Find the unique Revision whose `Node` starts with a given binary prefix
69 /// Find the unique Revision whose `Node` starts with a given binary prefix
70 ///
70 ///
71 /// If no Revision matches the given prefix, `Ok(None)` is returned.
71 /// If no Revision matches the given prefix, `Ok(None)` is returned.
72 ///
72 ///
73 /// If several Revisions match the given prefix, a
73 /// If several Revisions match the given prefix, a
74 /// [MultipleResults](NodeMapError) error is returned.
74 /// [MultipleResults](NodeMapError) error is returned.
75 fn find_bin(
75 fn find_bin(
76 &self,
76 &self,
77 idx: &impl RevlogIndex,
77 idx: &impl RevlogIndex,
78 prefix: NodePrefix,
78 prefix: NodePrefix,
79 ) -> Result<Option<Revision>, NodeMapError>;
79 ) -> Result<Option<Revision>, NodeMapError>;
80
80
81 /// Give the size of the shortest node prefix that determines
81 /// Give the size of the shortest node prefix that determines
82 /// the revision uniquely.
82 /// the revision uniquely.
83 ///
83 ///
84 /// From a binary node prefix, if it is matched in the node map, this
84 /// From a binary node prefix, if it is matched in the node map, this
85 /// returns the number of hexadecimal digits that would had sufficed
85 /// returns the number of hexadecimal digits that would had sufficed
86 /// to find the revision uniquely.
86 /// to find the revision uniquely.
87 ///
87 ///
88 /// Returns `None` if no [`Revision`] could be found for the prefix.
88 /// Returns `None` if no [`Revision`] could be found for the prefix.
89 ///
89 ///
90 /// If several Revisions match the given prefix, a
90 /// If several Revisions match the given prefix, a
91 /// [MultipleResults](NodeMapError) error is returned.
91 /// [MultipleResults](NodeMapError) error is returned.
92 fn unique_prefix_len_bin(
92 fn unique_prefix_len_bin(
93 &self,
93 &self,
94 idx: &impl RevlogIndex,
94 idx: &impl RevlogIndex,
95 node_prefix: NodePrefix,
95 node_prefix: NodePrefix,
96 ) -> Result<Option<usize>, NodeMapError>;
96 ) -> Result<Option<usize>, NodeMapError>;
97
97
98 /// Same as [unique_prefix_len_bin](Self::unique_prefix_len_bin), with
98 /// Same as [unique_prefix_len_bin](Self::unique_prefix_len_bin), with
99 /// a full [`Node`] as input
99 /// a full [`Node`] as input
100 fn unique_prefix_len_node(
100 fn unique_prefix_len_node(
101 &self,
101 &self,
102 idx: &impl RevlogIndex,
102 idx: &impl RevlogIndex,
103 node: &Node,
103 node: &Node,
104 ) -> Result<Option<usize>, NodeMapError> {
104 ) -> Result<Option<usize>, NodeMapError> {
105 self.unique_prefix_len_bin(idx, node.into())
105 self.unique_prefix_len_bin(idx, node.into())
106 }
106 }
107 }
107 }
108
108
109 pub trait MutableNodeMap: NodeMap {
109 pub trait MutableNodeMap: NodeMap {
110 fn insert<I: RevlogIndex>(
110 fn insert<I: RevlogIndex>(
111 &mut self,
111 &mut self,
112 index: &I,
112 index: &I,
113 node: &Node,
113 node: &Node,
114 rev: Revision,
114 rev: Revision,
115 ) -> Result<(), NodeMapError>;
115 ) -> Result<(), NodeMapError>;
116 }
116 }
117
117
118 /// Low level NodeTree [`Block`] elements
118 /// Low level NodeTree [`Block`] elements
119 ///
119 ///
120 /// These are exactly as for instance on persistent storage.
120 /// These are exactly as for instance on persistent storage.
121 type RawElement = unaligned::I32Be;
121 type RawElement = unaligned::I32Be;
122
122
123 /// High level representation of values in NodeTree
123 /// High level representation of values in NodeTree
124 /// [`Blocks`](struct.Block.html)
124 /// [`Blocks`](struct.Block.html)
125 ///
125 ///
126 /// This is the high level representation that most algorithms should
126 /// This is the high level representation that most algorithms should
127 /// use.
127 /// use.
128 #[derive(Clone, Debug, Eq, PartialEq)]
128 #[derive(Clone, Debug, Eq, PartialEq)]
129 enum Element {
129 enum Element {
130 // This is not a Mercurial revision. It's a `i32` because this is the
130 // This is not a Mercurial revision. It's a `i32` because this is the
131 // right type for this structure.
131 // right type for this structure.
132 Rev(i32),
132 Rev(i32),
133 Block(usize),
133 Block(usize),
134 None,
134 None,
135 }
135 }
136
136
137 impl From<RawElement> for Element {
137 impl From<RawElement> for Element {
138 /// Conversion from low level representation, after endianness conversion.
138 /// Conversion from low level representation, after endianness conversion.
139 ///
139 ///
140 /// See [`Block`](struct.Block.html) for explanation about the encoding.
140 /// See [`Block`](struct.Block.html) for explanation about the encoding.
141 fn from(raw: RawElement) -> Element {
141 fn from(raw: RawElement) -> Element {
142 let int = raw.get();
142 let int = raw.get();
143 if int >= 0 {
143 if int >= 0 {
144 Element::Block(int as usize)
144 Element::Block(int as usize)
145 } else if int == -1 {
145 } else if int == -1 {
146 Element::None
146 Element::None
147 } else {
147 } else {
148 Element::Rev(-int - 2)
148 Element::Rev(-int - 2)
149 }
149 }
150 }
150 }
151 }
151 }
152
152
153 impl From<Element> for RawElement {
153 impl From<Element> for RawElement {
154 fn from(element: Element) -> RawElement {
154 fn from(element: Element) -> RawElement {
155 RawElement::from(match element {
155 RawElement::from(match element {
156 Element::None => 0,
156 Element::None => 0,
157 Element::Block(i) => i as i32,
157 Element::Block(i) => i as i32,
158 Element::Rev(rev) => -rev - 2,
158 Element::Rev(rev) => -rev - 2,
159 })
159 })
160 }
160 }
161 }
161 }
162
162
163 const ELEMENTS_PER_BLOCK: usize = 16; // number of different values in a nybble
163 const ELEMENTS_PER_BLOCK: usize = 16; // number of different values in a nybble
164
164
165 /// A logical block of the [`NodeTree`], packed with a fixed size.
165 /// A logical block of the [`NodeTree`], packed with a fixed size.
166 ///
166 ///
167 /// These are always used in container types implementing `Index<Block>`,
167 /// These are always used in container types implementing `Index<Block>`,
168 /// such as `&Block`
168 /// such as `&Block`
169 ///
169 ///
170 /// As an array of integers, its ith element encodes that the
170 /// As an array of integers, its ith element encodes that the
171 /// ith potential edge from the block, representing the ith hexadecimal digit
171 /// ith potential edge from the block, representing the ith hexadecimal digit
172 /// (nybble) `i` is either:
172 /// (nybble) `i` is either:
173 ///
173 ///
174 /// - absent (value -1)
174 /// - absent (value -1)
175 /// - another `Block` in the same indexable container (value β‰₯ 0)
175 /// - another `Block` in the same indexable container (value β‰₯ 0)
176 /// - a [`Revision`] leaf (value ≀ -2)
176 /// - a [`Revision`] leaf (value ≀ -2)
177 ///
177 ///
178 /// Endianness has to be fixed for consistency on shared storage across
178 /// Endianness has to be fixed for consistency on shared storage across
179 /// different architectures.
179 /// different architectures.
180 ///
180 ///
181 /// A key difference with the C `nodetree` is that we need to be
181 /// A key difference with the C `nodetree` is that we need to be
182 /// able to represent the [`Block`] at index 0, hence -1 is the empty marker
182 /// able to represent the [`Block`] at index 0, hence -1 is the empty marker
183 /// rather than 0 and the [`Revision`] range upper limit of -2 instead of -1.
183 /// rather than 0 and the [`Revision`] range upper limit of -2 instead of -1.
184 ///
184 ///
185 /// Another related difference is that `NULL_REVISION` (-1) is not
185 /// Another related difference is that `NULL_REVISION` (-1) is not
186 /// represented at all, because we want an immutable empty nodetree
186 /// represented at all, because we want an immutable empty nodetree
187 /// to be valid.
187 /// to be valid.
188 #[derive(Copy, Clone, BytesCast, PartialEq)]
188 #[derive(Copy, Clone, BytesCast, PartialEq)]
189 #[repr(transparent)]
189 #[repr(transparent)]
190 pub struct Block([RawElement; ELEMENTS_PER_BLOCK]);
190 pub struct Block([RawElement; ELEMENTS_PER_BLOCK]);
191
191
192 impl Block {
192 impl Block {
193 fn new() -> Self {
193 fn new() -> Self {
194 let absent_node = RawElement::from(-1);
194 let absent_node = RawElement::from(-1);
195 Block([absent_node; ELEMENTS_PER_BLOCK])
195 Block([absent_node; ELEMENTS_PER_BLOCK])
196 }
196 }
197
197
198 fn get(&self, nybble: u8) -> Element {
198 fn get(&self, nybble: u8) -> Element {
199 self.0[nybble as usize].into()
199 self.0[nybble as usize].into()
200 }
200 }
201
201
202 fn set(&mut self, nybble: u8, element: Element) {
202 fn set(&mut self, nybble: u8, element: Element) {
203 self.0[nybble as usize] = element.into()
203 self.0[nybble as usize] = element.into()
204 }
204 }
205 }
205 }
206
206
207 impl fmt::Debug for Block {
207 impl fmt::Debug for Block {
208 /// sparse representation for testing and debugging purposes
208 /// sparse representation for testing and debugging purposes
209 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
209 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
210 f.debug_map()
210 f.debug_map()
211 .entries((0..16).filter_map(|i| match self.get(i) {
211 .entries((0..16).filter_map(|i| match self.get(i) {
212 Element::None => None,
212 Element::None => None,
213 element => Some((i, element)),
213 element => Some((i, element)),
214 }))
214 }))
215 .finish()
215 .finish()
216 }
216 }
217 }
217 }
218
218
219 /// A mutable 16-radix tree with the root block logically at the end
219 /// A mutable 16-radix tree with the root block logically at the end
220 ///
220 ///
221 /// Because of the append only nature of our node trees, we need to
221 /// Because of the append only nature of our node trees, we need to
222 /// keep the original untouched and store new blocks separately.
222 /// keep the original untouched and store new blocks separately.
223 ///
223 ///
224 /// The mutable root [`Block`] is kept apart so that we don't have to rebump
224 /// The mutable root [`Block`] is kept apart so that we don't have to rebump
225 /// it on each insertion.
225 /// it on each insertion.
226 pub struct NodeTree {
226 pub struct NodeTree {
227 readonly: Box<dyn Deref<Target = [Block]> + Send>,
227 readonly: Box<dyn Deref<Target = [Block]> + Send>,
228 growable: Vec<Block>,
228 growable: Vec<Block>,
229 root: Block,
229 root: Block,
230 masked_inner_blocks: usize,
230 masked_inner_blocks: usize,
231 }
231 }
232
232
233 impl Index<usize> for NodeTree {
233 impl Index<usize> for NodeTree {
234 type Output = Block;
234 type Output = Block;
235
235
236 fn index(&self, i: usize) -> &Block {
236 fn index(&self, i: usize) -> &Block {
237 let ro_len = self.readonly.len();
237 let ro_len = self.readonly.len();
238 if i < ro_len {
238 if i < ro_len {
239 &self.readonly[i]
239 &self.readonly[i]
240 } else if i == ro_len + self.growable.len() {
240 } else if i == ro_len + self.growable.len() {
241 &self.root
241 &self.root
242 } else {
242 } else {
243 &self.growable[i - ro_len]
243 &self.growable[i - ro_len]
244 }
244 }
245 }
245 }
246 }
246 }
247
247
248 /// Return `None` unless the [`Node`] for `rev` has given prefix in `idx`.
248 /// Return `None` unless the [`Node`] for `rev` has given prefix in `idx`.
249 fn has_prefix_or_none(
249 fn has_prefix_or_none(
250 idx: &impl RevlogIndex,
250 idx: &impl RevlogIndex,
251 prefix: NodePrefix,
251 prefix: NodePrefix,
252 rev: UncheckedRevision,
252 rev: UncheckedRevision,
253 ) -> Result<Option<Revision>, NodeMapError> {
253 ) -> Result<Option<Revision>, NodeMapError> {
254 match idx.check_revision(rev) {
254 match idx.check_revision(rev) {
255 Some(checked) => idx
255 Some(checked) => idx
256 .node(checked)
256 .node(checked)
257 .ok_or(NodeMapError::RevisionNotInIndex(rev))
257 .ok_or(NodeMapError::RevisionNotInIndex(rev))
258 .map(|node| {
258 .map(|node| {
259 if prefix.is_prefix_of(node) {
259 if prefix.is_prefix_of(node) {
260 Some(checked)
260 Some(checked)
261 } else {
261 } else {
262 None
262 None
263 }
263 }
264 }),
264 }),
265 None => Err(NodeMapError::RevisionNotInIndex(rev)),
265 None => Err(NodeMapError::RevisionNotInIndex(rev)),
266 }
266 }
267 }
267 }
268
268
269 /// validate that the candidate's node starts indeed with given prefix,
269 /// validate that the candidate's node starts indeed with given prefix,
270 /// and treat ambiguities related to [`NULL_REVISION`].
270 /// and treat ambiguities related to [`NULL_REVISION`].
271 ///
271 ///
272 /// From the data in the NodeTree, one can only conclude that some
272 /// From the data in the NodeTree, one can only conclude that some
273 /// revision is the only one for a *subprefix* of the one being looked up.
273 /// revision is the only one for a *subprefix* of the one being looked up.
274 fn validate_candidate(
274 fn validate_candidate(
275 idx: &impl RevlogIndex,
275 idx: &impl RevlogIndex,
276 prefix: NodePrefix,
276 prefix: NodePrefix,
277 candidate: (Option<UncheckedRevision>, usize),
277 candidate: (Option<UncheckedRevision>, usize),
278 ) -> Result<(Option<Revision>, usize), NodeMapError> {
278 ) -> Result<(Option<Revision>, usize), NodeMapError> {
279 let (rev, steps) = candidate;
279 let (rev, steps) = candidate;
280 if let Some(nz_nybble) = prefix.first_different_nybble(&NULL_NODE) {
280 if let Some(nz_nybble) = prefix.first_different_nybble(&NULL_NODE) {
281 rev.map_or(Ok((None, steps)), |r| {
281 rev.map_or(Ok((None, steps)), |r| {
282 has_prefix_or_none(idx, prefix, r)
282 has_prefix_or_none(idx, prefix, r)
283 .map(|opt| (opt, max(steps, nz_nybble + 1)))
283 .map(|opt| (opt, max(steps, nz_nybble + 1)))
284 })
284 })
285 } else {
285 } else {
286 // the prefix is only made of zeros; NULL_REVISION always matches it
286 // the prefix is only made of zeros; NULL_REVISION always matches it
287 // and any other *valid* result is an ambiguity
287 // and any other *valid* result is an ambiguity
288 match rev {
288 match rev {
289 None => Ok((Some(NULL_REVISION), steps + 1)),
289 None => Ok((Some(NULL_REVISION), steps + 1)),
290 Some(r) => match has_prefix_or_none(idx, prefix, r)? {
290 Some(r) => match has_prefix_or_none(idx, prefix, r)? {
291 None => Ok((Some(NULL_REVISION), steps + 1)),
291 None => Ok((Some(NULL_REVISION), steps + 1)),
292 _ => Err(NodeMapError::MultipleResults),
292 _ => Err(NodeMapError::MultipleResults),
293 },
293 },
294 }
294 }
295 }
295 }
296 }
296 }
297
297
298 impl NodeTree {
298 impl NodeTree {
299 /// Initiate a NodeTree from an immutable slice-like of `Block`
299 /// Initiate a NodeTree from an immutable slice-like of `Block`
300 ///
300 ///
301 /// We keep `readonly` and clone its root block if it isn't empty.
301 /// We keep `readonly` and clone its root block if it isn't empty.
302 fn new(readonly: Box<dyn Deref<Target = [Block]> + Send>) -> Self {
302 fn new(readonly: Box<dyn Deref<Target = [Block]> + Send>) -> Self {
303 let root = readonly.last().cloned().unwrap_or_else(Block::new);
303 let root = readonly.last().cloned().unwrap_or_else(Block::new);
304 NodeTree {
304 NodeTree {
305 readonly,
305 readonly,
306 growable: Vec::new(),
306 growable: Vec::new(),
307 root,
307 root,
308 masked_inner_blocks: 0,
308 masked_inner_blocks: 0,
309 }
309 }
310 }
310 }
311
311
312 /// Create from an opaque bunch of bytes
312 /// Create from an opaque bunch of bytes
313 ///
313 ///
314 /// The created [`NodeTreeBytes`] from `bytes`,
314 /// The created [`NodeTreeBytes`] from `bytes`,
315 /// of which exactly `amount` bytes are used.
315 /// of which exactly `amount` bytes are used.
316 ///
316 ///
317 /// - `buffer` could be derived from `PyBuffer` and `Mmap` objects.
317 /// - `buffer` could be derived from `PyBuffer` and `Mmap` objects.
318 /// - `amount` is expressed in bytes, and is not automatically derived from
318 /// - `amount` is expressed in bytes, and is not automatically derived from
319 /// `bytes`, so that a caller that manages them atomically can perform
319 /// `bytes`, so that a caller that manages them atomically can perform
320 /// temporary disk serializations and still rollback easily if needed.
320 /// temporary disk serializations and still rollback easily if needed.
321 /// First use-case for this would be to support Mercurial shell hooks.
321 /// First use-case for this would be to support Mercurial shell hooks.
322 ///
322 ///
323 /// panics if `buffer` is smaller than `amount`
323 /// panics if `buffer` is smaller than `amount`
324 pub fn load_bytes(
324 pub fn load_bytes(
325 bytes: Box<dyn Deref<Target = [u8]> + Send>,
325 bytes: Box<dyn Deref<Target = [u8]> + Send>,
326 amount: usize,
326 amount: usize,
327 ) -> Self {
327 ) -> Self {
328 NodeTree::new(Box::new(NodeTreeBytes::new(bytes, amount)))
328 NodeTree::new(Box::new(NodeTreeBytes::new(bytes, amount)))
329 }
329 }
330
330
331 /// Retrieve added [`Block`]s and the original immutable data
331 /// Retrieve added [`Block`]s and the original immutable data
332 pub fn into_readonly_and_added(
332 pub fn into_readonly_and_added(
333 self,
333 self,
334 ) -> (Box<dyn Deref<Target = [Block]> + Send>, Vec<Block>) {
334 ) -> (Box<dyn Deref<Target = [Block]> + Send>, Vec<Block>) {
335 let mut vec = self.growable;
335 let mut vec = self.growable;
336 let readonly = self.readonly;
336 let readonly = self.readonly;
337 if readonly.last() != Some(&self.root) {
337 if readonly.last() != Some(&self.root) {
338 vec.push(self.root);
338 vec.push(self.root);
339 }
339 }
340 (readonly, vec)
340 (readonly, vec)
341 }
341 }
342
342
343 /// Retrieve added [`Block]s as bytes, ready to be written to persistent
343 /// Retrieve added [`Block]s as bytes, ready to be written to persistent
344 /// storage
344 /// storage
345 pub fn into_readonly_and_added_bytes(
345 pub fn into_readonly_and_added_bytes(
346 self,
346 self,
347 ) -> (Box<dyn Deref<Target = [Block]> + Send>, Vec<u8>) {
347 ) -> (Box<dyn Deref<Target = [Block]> + Send>, Vec<u8>) {
348 let (readonly, vec) = self.into_readonly_and_added();
348 let (readonly, vec) = self.into_readonly_and_added();
349 // Prevent running `v`'s destructor so we are in complete control
349 // Prevent running `v`'s destructor so we are in complete control
350 // of the allocation.
350 // of the allocation.
351 let vec = mem::ManuallyDrop::new(vec);
351 let vec = mem::ManuallyDrop::new(vec);
352
352
353 // Transmute the `Vec<Block>` to a `Vec<u8>`. Blocks are contiguous
353 // Transmute the `Vec<Block>` to a `Vec<u8>`. Blocks are contiguous
354 // bytes, so this is perfectly safe.
354 // bytes, so this is perfectly safe.
355 let bytes = unsafe {
355 let bytes = unsafe {
356 // Check for compatible allocation layout.
356 // Check for compatible allocation layout.
357 // (Optimized away by constant-folding + dead code elimination.)
357 // (Optimized away by constant-folding + dead code elimination.)
358 assert_eq!(size_of::<Block>(), 64);
358 assert_eq!(size_of::<Block>(), 64);
359 assert_eq!(align_of::<Block>(), 1);
359 assert_eq!(align_of::<Block>(), 1);
360
360
361 // /!\ Any use of `vec` after this is use-after-free.
361 // /!\ Any use of `vec` after this is use-after-free.
362 // TODO: use `into_raw_parts` once stabilized
362 // TODO: use `into_raw_parts` once stabilized
363 Vec::from_raw_parts(
363 Vec::from_raw_parts(
364 vec.as_ptr() as *mut u8,
364 vec.as_ptr() as *mut u8,
365 vec.len() * size_of::<Block>(),
365 vec.len() * size_of::<Block>(),
366 vec.capacity() * size_of::<Block>(),
366 vec.capacity() * size_of::<Block>(),
367 )
367 )
368 };
368 };
369 (readonly, bytes)
369 (readonly, bytes)
370 }
370 }
371
371
372 /// Total number of blocks
372 /// Total number of blocks
373 fn len(&self) -> usize {
373 fn len(&self) -> usize {
374 self.readonly.len() + self.growable.len() + 1
374 self.readonly.len() + self.growable.len() + 1
375 }
375 }
376
376
377 /// Implemented for completeness
377 /// Implemented for completeness
378 ///
378 ///
379 /// A `NodeTree` always has at least the mutable root block.
379 /// A `NodeTree` always has at least the mutable root block.
380 #[allow(dead_code)]
380 #[allow(dead_code)]
381 fn is_empty(&self) -> bool {
381 fn is_empty(&self) -> bool {
382 false
382 false
383 }
383 }
384
384
385 /// Main working method for `NodeTree` searches
385 /// Main working method for `NodeTree` searches
386 ///
386 ///
387 /// The first returned value is the result of analysing `NodeTree` data
387 /// The first returned value is the result of analysing `NodeTree` data
388 /// *alone*: whereas `None` guarantees that the given prefix is absent
388 /// *alone*: whereas `None` guarantees that the given prefix is absent
389 /// from the [`NodeTree`] data (but still could match [`NULL_NODE`]), with
389 /// from the [`NodeTree`] data (but still could match [`NULL_NODE`]), with
390 /// `Some(rev)`, it is to be understood that `rev` is the unique
390 /// `Some(rev)`, it is to be understood that `rev` is the unique
391 /// [`Revision`] that could match the prefix. Actually, all that can
391 /// [`Revision`] that could match the prefix. Actually, all that can
392 /// be inferred from
392 /// be inferred from
393 /// the `NodeTree` data is that `rev` is the revision with the longest
393 /// the `NodeTree` data is that `rev` is the revision with the longest
394 /// common node prefix with the given prefix.
394 /// common node prefix with the given prefix.
395 /// We return an [`UncheckedRevision`] because we have no guarantee that
395 /// We return an [`UncheckedRevision`] because we have no guarantee that
396 /// the revision we found is valid for the index.
396 /// the revision we found is valid for the index.
397 ///
397 ///
398 /// The second returned value is the size of the smallest subprefix
398 /// The second returned value is the size of the smallest subprefix
399 /// of `prefix` that would give the same result, i.e. not the
399 /// of `prefix` that would give the same result, i.e. not the
400 /// [MultipleResults](NodeMapError) error variant (again, using only the
400 /// [MultipleResults](NodeMapError) error variant (again, using only the
401 /// data of the [`NodeTree`]).
401 /// data of the [`NodeTree`]).
402 fn lookup(
402 fn lookup(
403 &self,
403 &self,
404 prefix: NodePrefix,
404 prefix: NodePrefix,
405 ) -> Result<(Option<UncheckedRevision>, usize), NodeMapError> {
405 ) -> Result<(Option<UncheckedRevision>, usize), NodeMapError> {
406 for (i, visit_item) in self.visit(prefix).enumerate() {
406 for (i, visit_item) in self.visit(prefix).enumerate() {
407 if let Some(opt) = visit_item.final_revision() {
407 if let Some(opt) = visit_item.final_revision() {
408 return Ok((opt, i + 1));
408 return Ok((opt, i + 1));
409 }
409 }
410 }
410 }
411 Err(NodeMapError::MultipleResults)
411 Err(NodeMapError::MultipleResults)
412 }
412 }
413
413
414 fn visit(&self, prefix: NodePrefix) -> NodeTreeVisitor {
414 fn visit(&self, prefix: NodePrefix) -> NodeTreeVisitor {
415 NodeTreeVisitor {
415 NodeTreeVisitor {
416 nt: self,
416 nt: self,
417 prefix,
417 prefix,
418 visit: self.len() - 1,
418 visit: self.len() - 1,
419 nybble_idx: 0,
419 nybble_idx: 0,
420 done: false,
420 done: false,
421 }
421 }
422 }
422 }
423 /// Return a mutable reference for `Block` at index `idx`.
423 /// Return a mutable reference for `Block` at index `idx`.
424 ///
424 ///
425 /// If `idx` lies in the immutable area, then the reference is to
425 /// If `idx` lies in the immutable area, then the reference is to
426 /// a newly appended copy.
426 /// a newly appended copy.
427 ///
427 ///
428 /// Returns (new_idx, glen, mut_ref) where
428 /// Returns (new_idx, glen, mut_ref) where
429 ///
429 ///
430 /// - `new_idx` is the index of the mutable `Block`
430 /// - `new_idx` is the index of the mutable `Block`
431 /// - `mut_ref` is a mutable reference to the mutable Block.
431 /// - `mut_ref` is a mutable reference to the mutable Block.
432 /// - `glen` is the new length of `self.growable`
432 /// - `glen` is the new length of `self.growable`
433 ///
433 ///
434 /// Note: the caller wouldn't be allowed to query `self.growable.len()`
434 /// Note: the caller wouldn't be allowed to query `self.growable.len()`
435 /// itself because of the mutable borrow taken with the returned `Block`
435 /// itself because of the mutable borrow taken with the returned `Block`
436 fn mutable_block(&mut self, idx: usize) -> (usize, &mut Block, usize) {
436 fn mutable_block(&mut self, idx: usize) -> (usize, &mut Block, usize) {
437 let ro_blocks = &self.readonly;
437 let ro_blocks = &self.readonly;
438 let ro_len = ro_blocks.len();
438 let ro_len = ro_blocks.len();
439 let glen = self.growable.len();
439 let glen = self.growable.len();
440 if idx < ro_len {
440 if idx < ro_len {
441 self.masked_inner_blocks += 1;
441 self.masked_inner_blocks += 1;
442 self.growable.push(ro_blocks[idx]);
442 self.growable.push(ro_blocks[idx]);
443 (glen + ro_len, &mut self.growable[glen], glen + 1)
443 (glen + ro_len, &mut self.growable[glen], glen + 1)
444 } else if glen + ro_len == idx {
444 } else if glen + ro_len == idx {
445 (idx, &mut self.root, glen)
445 (idx, &mut self.root, glen)
446 } else {
446 } else {
447 (idx, &mut self.growable[idx - ro_len], glen)
447 (idx, &mut self.growable[idx - ro_len], glen)
448 }
448 }
449 }
449 }
450
450
451 /// Main insertion method
451 /// Main insertion method
452 ///
452 ///
453 /// This will dive in the node tree to find the deepest `Block` for
453 /// This will dive in the node tree to find the deepest `Block` for
454 /// `node`, split it as much as needed and record `node` in there.
454 /// `node`, split it as much as needed and record `node` in there.
455 /// The method then backtracks, updating references in all the visited
455 /// The method then backtracks, updating references in all the visited
456 /// blocks from the root.
456 /// blocks from the root.
457 ///
457 ///
458 /// All the mutated `Block` are copied first to the growable part if
458 /// All the mutated `Block` are copied first to the growable part if
459 /// needed. That happens for those in the immutable part except the root.
459 /// needed. That happens for those in the immutable part except the root.
460 pub fn insert<I: RevlogIndex>(
460 pub fn insert<I: RevlogIndex>(
461 &mut self,
461 &mut self,
462 index: &I,
462 index: &I,
463 node: &Node,
463 node: &Node,
464 rev: Revision,
464 rev: Revision,
465 ) -> Result<(), NodeMapError> {
465 ) -> Result<(), NodeMapError> {
466 let ro_len = &self.readonly.len();
466 let ro_len = &self.readonly.len();
467
467
468 let mut visit_steps: Vec<_> = self.visit(node.into()).collect();
468 let mut visit_steps: Vec<_> = self.visit(node.into()).collect();
469 let read_nybbles = visit_steps.len();
469 let read_nybbles = visit_steps.len();
470 // visit_steps cannot be empty, since we always visit the root block
470 // visit_steps cannot be empty, since we always visit the root block
471 let deepest = visit_steps.pop().unwrap();
471 let deepest = visit_steps.pop().unwrap();
472
472
473 let (mut block_idx, mut block, mut glen) =
473 let (mut block_idx, mut block, mut glen) =
474 self.mutable_block(deepest.block_idx);
474 self.mutable_block(deepest.block_idx);
475
475
476 if let Element::Rev(old_rev) = deepest.element {
476 if let Element::Rev(old_rev) = deepest.element {
477 let old_node = index
477 let old_node = index
478 .check_revision(old_rev.into())
478 .check_revision(old_rev.into())
479 .and_then(|rev| index.node(rev))
479 .and_then(|rev| index.node(rev))
480 .ok_or_else(|| {
480 .ok_or_else(|| {
481 NodeMapError::RevisionNotInIndex(old_rev.into())
481 NodeMapError::RevisionNotInIndex(old_rev.into())
482 })?;
482 })?;
483 if old_node == node {
483 if old_node == node {
484 return Ok(()); // avoid creating lots of useless blocks
484 return Ok(()); // avoid creating lots of useless blocks
485 }
485 }
486
486
487 // Looping over the tail of nybbles in both nodes, creating
487 // Looping over the tail of nybbles in both nodes, creating
488 // new blocks until we find the difference
488 // new blocks until we find the difference
489 let mut new_block_idx = ro_len + glen;
489 let mut new_block_idx = ro_len + glen;
490 let mut nybble = deepest.nybble;
490 let mut nybble = deepest.nybble;
491 for nybble_pos in read_nybbles..node.nybbles_len() {
491 for nybble_pos in read_nybbles..node.nybbles_len() {
492 block.set(nybble, Element::Block(new_block_idx));
492 block.set(nybble, Element::Block(new_block_idx));
493
493
494 let new_nybble = node.get_nybble(nybble_pos);
494 let new_nybble = node.get_nybble(nybble_pos);
495 let old_nybble = old_node.get_nybble(nybble_pos);
495 let old_nybble = old_node.get_nybble(nybble_pos);
496
496
497 if old_nybble == new_nybble {
497 if old_nybble == new_nybble {
498 self.growable.push(Block::new());
498 self.growable.push(Block::new());
499 block = &mut self.growable[glen];
499 block = &mut self.growable[glen];
500 glen += 1;
500 glen += 1;
501 new_block_idx += 1;
501 new_block_idx += 1;
502 nybble = new_nybble;
502 nybble = new_nybble;
503 } else {
503 } else {
504 let mut new_block = Block::new();
504 let mut new_block = Block::new();
505 new_block.set(old_nybble, Element::Rev(old_rev));
505 new_block.set(old_nybble, Element::Rev(old_rev));
506 new_block.set(new_nybble, Element::Rev(rev.0));
506 new_block.set(new_nybble, Element::Rev(rev.0));
507 self.growable.push(new_block);
507 self.growable.push(new_block);
508 break;
508 break;
509 }
509 }
510 }
510 }
511 } else {
511 } else {
512 // Free slot in the deepest block: no splitting has to be done
512 // Free slot in the deepest block: no splitting has to be done
513 block.set(deepest.nybble, Element::Rev(rev.0));
513 block.set(deepest.nybble, Element::Rev(rev.0));
514 }
514 }
515
515
516 // Backtrack over visit steps to update references
516 // Backtrack over visit steps to update references
517 while let Some(visited) = visit_steps.pop() {
517 while let Some(visited) = visit_steps.pop() {
518 let to_write = Element::Block(block_idx);
518 let to_write = Element::Block(block_idx);
519 if visit_steps.is_empty() {
519 if visit_steps.is_empty() {
520 self.root.set(visited.nybble, to_write);
520 self.root.set(visited.nybble, to_write);
521 break;
521 break;
522 }
522 }
523 let (new_idx, block, _) = self.mutable_block(visited.block_idx);
523 let (new_idx, block, _) = self.mutable_block(visited.block_idx);
524 if block.get(visited.nybble) == to_write {
524 if block.get(visited.nybble) == to_write {
525 break;
525 break;
526 }
526 }
527 block.set(visited.nybble, to_write);
527 block.set(visited.nybble, to_write);
528 block_idx = new_idx;
528 block_idx = new_idx;
529 }
529 }
530 Ok(())
530 Ok(())
531 }
531 }
532
532
533 /// Make the whole `NodeTree` logically empty, without touching the
533 /// Make the whole `NodeTree` logically empty, without touching the
534 /// immutable part.
534 /// immutable part.
535 pub fn invalidate_all(&mut self) {
535 pub fn invalidate_all(&mut self) {
536 self.root = Block::new();
536 self.root = Block::new();
537 self.growable = Vec::new();
537 self.growable = Vec::new();
538 self.masked_inner_blocks = self.readonly.len();
538 self.masked_inner_blocks = self.readonly.len();
539 }
539 }
540
540
541 /// Return the number of blocks in the readonly part that are currently
541 /// Return the number of blocks in the readonly part that are currently
542 /// masked in the mutable part.
542 /// masked in the mutable part.
543 ///
543 ///
544 /// The `NodeTree` structure has no efficient way to know how many blocks
544 /// The `NodeTree` structure has no efficient way to know how many blocks
545 /// are already unreachable in the readonly part.
545 /// are already unreachable in the readonly part.
546 ///
546 ///
547 /// After a call to `invalidate_all()`, the returned number can be actually
547 /// After a call to `invalidate_all()`, the returned number can be actually
548 /// bigger than the whole readonly part, a conventional way to mean that
548 /// bigger than the whole readonly part, a conventional way to mean that
549 /// all the readonly blocks have been masked. This is what is really
549 /// all the readonly blocks have been masked. This is what is really
550 /// useful to the caller and does not require to know how many were
550 /// useful to the caller and does not require to know how many were
551 /// actually unreachable to begin with.
551 /// actually unreachable to begin with.
552 pub fn masked_readonly_blocks(&self) -> usize {
552 pub fn masked_readonly_blocks(&self) -> usize {
553 if let Some(readonly_root) = self.readonly.last() {
553 if let Some(readonly_root) = self.readonly.last() {
554 if readonly_root == &self.root {
554 if readonly_root == &self.root {
555 return 0;
555 return 0;
556 }
556 }
557 } else {
557 } else {
558 return 0;
558 return 0;
559 }
559 }
560 self.masked_inner_blocks + 1
560 self.masked_inner_blocks + 1
561 }
561 }
562 }
562 }
563
563
564 pub struct NodeTreeBytes {
564 pub struct NodeTreeBytes {
565 buffer: Box<dyn Deref<Target = [u8]> + Send>,
565 buffer: Box<dyn Deref<Target = [u8]> + Send>,
566 len_in_blocks: usize,
566 len_in_blocks: usize,
567 }
567 }
568
568
569 impl NodeTreeBytes {
569 impl NodeTreeBytes {
570 fn new(
570 fn new(
571 buffer: Box<dyn Deref<Target = [u8]> + Send>,
571 buffer: Box<dyn Deref<Target = [u8]> + Send>,
572 amount: usize,
572 amount: usize,
573 ) -> Self {
573 ) -> Self {
574 assert!(buffer.len() >= amount);
574 assert!(buffer.len() >= amount);
575 let len_in_blocks = amount / size_of::<Block>();
575 let len_in_blocks = amount / size_of::<Block>();
576 NodeTreeBytes {
576 NodeTreeBytes {
577 buffer,
577 buffer,
578 len_in_blocks,
578 len_in_blocks,
579 }
579 }
580 }
580 }
581 }
581 }
582
582
583 impl Deref for NodeTreeBytes {
583 impl Deref for NodeTreeBytes {
584 type Target = [Block];
584 type Target = [Block];
585
585
586 fn deref(&self) -> &[Block] {
586 fn deref(&self) -> &[Block] {
587 Block::slice_from_bytes(&self.buffer, self.len_in_blocks)
587 Block::slice_from_bytes(&self.buffer, self.len_in_blocks)
588 // `NodeTreeBytes::new` already asserted that `self.buffer` is
588 // `NodeTreeBytes::new` already asserted that `self.buffer` is
589 // large enough.
589 // large enough.
590 .unwrap()
590 .unwrap()
591 .0
591 .0
592 }
592 }
593 }
593 }
594
594
595 struct NodeTreeVisitor<'n> {
595 struct NodeTreeVisitor<'n> {
596 nt: &'n NodeTree,
596 nt: &'n NodeTree,
597 prefix: NodePrefix,
597 prefix: NodePrefix,
598 visit: usize,
598 visit: usize,
599 nybble_idx: usize,
599 nybble_idx: usize,
600 done: bool,
600 done: bool,
601 }
601 }
602
602
603 #[derive(Debug, PartialEq, Clone)]
603 #[derive(Debug, PartialEq, Clone)]
604 struct NodeTreeVisitItem {
604 struct NodeTreeVisitItem {
605 block_idx: usize,
605 block_idx: usize,
606 nybble: u8,
606 nybble: u8,
607 element: Element,
607 element: Element,
608 }
608 }
609
609
610 impl<'n> Iterator for NodeTreeVisitor<'n> {
610 impl<'n> Iterator for NodeTreeVisitor<'n> {
611 type Item = NodeTreeVisitItem;
611 type Item = NodeTreeVisitItem;
612
612
613 fn next(&mut self) -> Option<Self::Item> {
613 fn next(&mut self) -> Option<Self::Item> {
614 if self.done || self.nybble_idx >= self.prefix.nybbles_len() {
614 if self.done || self.nybble_idx >= self.prefix.nybbles_len() {
615 return None;
615 return None;
616 }
616 }
617
617
618 let nybble = self.prefix.get_nybble(self.nybble_idx);
618 let nybble = self.prefix.get_nybble(self.nybble_idx);
619 self.nybble_idx += 1;
619 self.nybble_idx += 1;
620
620
621 let visit = self.visit;
621 let visit = self.visit;
622 let element = self.nt[visit].get(nybble);
622 let element = self.nt[visit].get(nybble);
623 if let Element::Block(idx) = element {
623 if let Element::Block(idx) = element {
624 self.visit = idx;
624 self.visit = idx;
625 } else {
625 } else {
626 self.done = true;
626 self.done = true;
627 }
627 }
628
628
629 Some(NodeTreeVisitItem {
629 Some(NodeTreeVisitItem {
630 block_idx: visit,
630 block_idx: visit,
631 nybble,
631 nybble,
632 element,
632 element,
633 })
633 })
634 }
634 }
635 }
635 }
636
636
637 impl NodeTreeVisitItem {
637 impl NodeTreeVisitItem {
638 // Return `Some(opt)` if this item is final, with `opt` being the
638 // Return `Some(opt)` if this item is final, with `opt` being the
639 // `UncheckedRevision` that it may represent.
639 // `UncheckedRevision` that it may represent.
640 //
640 //
641 // If the item is not terminal, return `None`
641 // If the item is not terminal, return `None`
642 fn final_revision(&self) -> Option<Option<UncheckedRevision>> {
642 fn final_revision(&self) -> Option<Option<UncheckedRevision>> {
643 match self.element {
643 match self.element {
644 Element::Block(_) => None,
644 Element::Block(_) => None,
645 Element::Rev(r) => Some(Some(r.into())),
645 Element::Rev(r) => Some(Some(r.into())),
646 Element::None => Some(None),
646 Element::None => Some(None),
647 }
647 }
648 }
648 }
649 }
649 }
650
650
651 impl From<Vec<Block>> for NodeTree {
651 impl From<Vec<Block>> for NodeTree {
652 fn from(vec: Vec<Block>) -> Self {
652 fn from(vec: Vec<Block>) -> Self {
653 Self::new(Box::new(vec))
653 Self::new(Box::new(vec))
654 }
654 }
655 }
655 }
656
656
657 impl fmt::Debug for NodeTree {
657 impl fmt::Debug for NodeTree {
658 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
658 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
659 let readonly: &[Block] = &*self.readonly;
659 let readonly: &[Block] = &*self.readonly;
660 write!(
660 write!(
661 f,
661 f,
662 "readonly: {:?}, growable: {:?}, root: {:?}",
662 "readonly: {:?}, growable: {:?}, root: {:?}",
663 readonly, self.growable, self.root
663 readonly, self.growable, self.root
664 )
664 )
665 }
665 }
666 }
666 }
667
667
668 impl Default for NodeTree {
668 impl Default for NodeTree {
669 /// Create a fully mutable empty NodeTree
669 /// Create a fully mutable empty NodeTree
670 fn default() -> Self {
670 fn default() -> Self {
671 NodeTree::new(Box::new(Vec::new()))
671 NodeTree::new(Box::new(Vec::new()))
672 }
672 }
673 }
673 }
674
674
675 impl NodeMap for NodeTree {
675 impl NodeMap for NodeTree {
676 fn find_bin<'a>(
676 fn find_bin<'a>(
677 &self,
677 &self,
678 idx: &impl RevlogIndex,
678 idx: &impl RevlogIndex,
679 prefix: NodePrefix,
679 prefix: NodePrefix,
680 ) -> Result<Option<Revision>, NodeMapError> {
680 ) -> Result<Option<Revision>, NodeMapError> {
681 validate_candidate(idx, prefix, self.lookup(prefix)?)
681 validate_candidate(idx, prefix, self.lookup(prefix)?)
682 .map(|(opt, _shortest)| opt)
682 .map(|(opt, _shortest)| opt)
683 }
683 }
684
684
685 fn unique_prefix_len_bin<'a>(
685 fn unique_prefix_len_bin<'a>(
686 &self,
686 &self,
687 idx: &impl RevlogIndex,
687 idx: &impl RevlogIndex,
688 prefix: NodePrefix,
688 prefix: NodePrefix,
689 ) -> Result<Option<usize>, NodeMapError> {
689 ) -> Result<Option<usize>, NodeMapError> {
690 validate_candidate(idx, prefix, self.lookup(prefix)?)
690 validate_candidate(idx, prefix, self.lookup(prefix)?)
691 .map(|(opt, shortest)| opt.map(|_rev| shortest))
691 .map(|(opt, shortest)| opt.map(|_rev| shortest))
692 }
692 }
693 }
693 }
694
694
695 #[cfg(test)]
695 #[cfg(test)]
696 mod tests {
696 pub mod tests {
697 use super::NodeMapError::*;
697 use super::NodeMapError::*;
698 use super::*;
698 use super::*;
699 use crate::revlog::node::{hex_pad_right, Node};
699 use crate::revlog::node::{hex_pad_right, Node};
700 use std::collections::HashMap;
700 use std::collections::HashMap;
701
701
702 /// Creates a `Block` using a syntax close to the `Debug` output
702 /// Creates a `Block` using a syntax close to the `Debug` output
703 macro_rules! block {
703 macro_rules! block {
704 {$($nybble:tt : $variant:ident($val:tt)),*} => (
704 {$($nybble:tt : $variant:ident($val:tt)),*} => (
705 {
705 {
706 let mut block = Block::new();
706 let mut block = Block::new();
707 $(block.set($nybble, Element::$variant($val)));*;
707 $(block.set($nybble, Element::$variant($val)));*;
708 block
708 block
709 }
709 }
710 )
710 )
711 }
711 }
712
712
713 /// Shorthand to reduce boilerplate when creating [`Revision`] for testing
713 /// Shorthand to reduce boilerplate when creating [`Revision`] for testing
714 macro_rules! R {
714 macro_rules! R {
715 ($revision:literal) => {
715 ($revision:literal) => {
716 Revision($revision)
716 Revision($revision)
717 };
717 };
718 }
718 }
719
719
720 #[test]
720 #[test]
721 fn test_block_debug() {
721 fn test_block_debug() {
722 let mut block = Block::new();
722 let mut block = Block::new();
723 block.set(1, Element::Rev(3));
723 block.set(1, Element::Rev(3));
724 block.set(10, Element::Block(0));
724 block.set(10, Element::Block(0));
725 assert_eq!(format!("{:?}", block), "{1: Rev(3), 10: Block(0)}");
725 assert_eq!(format!("{:?}", block), "{1: Rev(3), 10: Block(0)}");
726 }
726 }
727
727
728 #[test]
728 #[test]
729 fn test_block_macro() {
729 fn test_block_macro() {
730 let block = block! {5: Block(2)};
730 let block = block! {5: Block(2)};
731 assert_eq!(format!("{:?}", block), "{5: Block(2)}");
731 assert_eq!(format!("{:?}", block), "{5: Block(2)}");
732
732
733 let block = block! {13: Rev(15), 5: Block(2)};
733 let block = block! {13: Rev(15), 5: Block(2)};
734 assert_eq!(format!("{:?}", block), "{5: Block(2), 13: Rev(15)}");
734 assert_eq!(format!("{:?}", block), "{5: Block(2), 13: Rev(15)}");
735 }
735 }
736
736
737 #[test]
737 #[test]
738 fn test_raw_block() {
738 fn test_raw_block() {
739 let mut raw = [255u8; 64];
739 let mut raw = [255u8; 64];
740
740
741 let mut counter = 0;
741 let mut counter = 0;
742 for val in [0_i32, 15, -2, -1, -3].iter() {
742 for val in [0_i32, 15, -2, -1, -3].iter() {
743 for byte in val.to_be_bytes().iter() {
743 for byte in val.to_be_bytes().iter() {
744 raw[counter] = *byte;
744 raw[counter] = *byte;
745 counter += 1;
745 counter += 1;
746 }
746 }
747 }
747 }
748 let (block, _) = Block::from_bytes(&raw).unwrap();
748 let (block, _) = Block::from_bytes(&raw).unwrap();
749 assert_eq!(block.get(0), Element::Block(0));
749 assert_eq!(block.get(0), Element::Block(0));
750 assert_eq!(block.get(1), Element::Block(15));
750 assert_eq!(block.get(1), Element::Block(15));
751 assert_eq!(block.get(3), Element::None);
751 assert_eq!(block.get(3), Element::None);
752 assert_eq!(block.get(2), Element::Rev(0));
752 assert_eq!(block.get(2), Element::Rev(0));
753 assert_eq!(block.get(4), Element::Rev(1));
753 assert_eq!(block.get(4), Element::Rev(1));
754 }
754 }
755
755
756 type TestIndex = HashMap<UncheckedRevision, Node>;
756 type TestIndex = HashMap<UncheckedRevision, Node>;
757
757
758 impl RevlogIndex for TestIndex {
758 impl RevlogIndex for TestIndex {
759 fn node(&self, rev: Revision) -> Option<&Node> {
759 fn node(&self, rev: Revision) -> Option<&Node> {
760 self.get(&rev.into())
760 self.get(&rev.into())
761 }
761 }
762
762
763 fn len(&self) -> usize {
763 fn len(&self) -> usize {
764 self.len()
764 self.len()
765 }
765 }
766
766
767 fn check_revision(&self, rev: UncheckedRevision) -> Option<Revision> {
767 fn check_revision(&self, rev: UncheckedRevision) -> Option<Revision> {
768 self.get(&rev).map(|_| Revision(rev.0))
768 self.get(&rev).map(|_| Revision(rev.0))
769 }
769 }
770 }
770 }
771
771
772 /// Pad hexadecimal Node prefix with zeros on the right
772 /// Pad hexadecimal Node prefix with zeros on the right
773 ///
773 ///
774 /// This avoids having to repeatedly write very long hexadecimal
774 /// This avoids having to repeatedly write very long hexadecimal
775 /// strings for test data, and brings actual hash size independency.
775 /// strings for test data, and brings actual hash size independency.
776 #[cfg(test)]
776 #[cfg(test)]
777 fn pad_node(hex: &str) -> Node {
777 fn pad_node(hex: &str) -> Node {
778 Node::from_hex(&hex_pad_right(hex)).unwrap()
778 Node::from_hex(&hex_pad_right(hex)).unwrap()
779 }
779 }
780
780
781 /// Pad hexadecimal Node prefix with zeros on the right, then insert
781 /// Pad hexadecimal Node prefix with zeros on the right, then insert
782 fn pad_insert(idx: &mut TestIndex, rev: Revision, hex: &str) {
782 fn pad_insert(idx: &mut TestIndex, rev: Revision, hex: &str) {
783 idx.insert(rev.into(), pad_node(hex));
783 idx.insert(rev.into(), pad_node(hex));
784 }
784 }
785
785
786 fn sample_nodetree() -> NodeTree {
786 fn sample_nodetree() -> NodeTree {
787 NodeTree::from(vec![
787 NodeTree::from(vec![
788 block![0: Rev(9)],
788 block![0: Rev(9)],
789 block![0: Rev(0), 1: Rev(9)],
789 block![0: Rev(0), 1: Rev(9)],
790 block![0: Block(1), 1:Rev(1)],
790 block![0: Block(1), 1:Rev(1)],
791 ])
791 ])
792 }
792 }
793
793
794 fn hex(s: &str) -> NodePrefix {
794 fn hex(s: &str) -> NodePrefix {
795 NodePrefix::from_hex(s).unwrap()
795 NodePrefix::from_hex(s).unwrap()
796 }
796 }
797
797
798 #[test]
798 #[test]
799 fn test_nt_debug() {
799 fn test_nt_debug() {
800 let nt = sample_nodetree();
800 let nt = sample_nodetree();
801 assert_eq!(
801 assert_eq!(
802 format!("{:?}", nt),
802 format!("{:?}", nt),
803 "readonly: \
803 "readonly: \
804 [{0: Rev(9)}, {0: Rev(0), 1: Rev(9)}, {0: Block(1), 1: Rev(1)}], \
804 [{0: Rev(9)}, {0: Rev(0), 1: Rev(9)}, {0: Block(1), 1: Rev(1)}], \
805 growable: [], \
805 growable: [], \
806 root: {0: Block(1), 1: Rev(1)}",
806 root: {0: Block(1), 1: Rev(1)}",
807 );
807 );
808 }
808 }
809
809
810 #[test]
810 #[test]
811 fn test_immutable_find_simplest() -> Result<(), NodeMapError> {
811 fn test_immutable_find_simplest() -> Result<(), NodeMapError> {
812 let mut idx: TestIndex = HashMap::new();
812 let mut idx: TestIndex = HashMap::new();
813 pad_insert(&mut idx, R!(1), "1234deadcafe");
813 pad_insert(&mut idx, R!(1), "1234deadcafe");
814
814
815 let nt = NodeTree::from(vec![block! {1: Rev(1)}]);
815 let nt = NodeTree::from(vec![block! {1: Rev(1)}]);
816 assert_eq!(nt.find_bin(&idx, hex("1"))?, Some(R!(1)));
816 assert_eq!(nt.find_bin(&idx, hex("1"))?, Some(R!(1)));
817 assert_eq!(nt.find_bin(&idx, hex("12"))?, Some(R!(1)));
817 assert_eq!(nt.find_bin(&idx, hex("12"))?, Some(R!(1)));
818 assert_eq!(nt.find_bin(&idx, hex("1234de"))?, Some(R!(1)));
818 assert_eq!(nt.find_bin(&idx, hex("1234de"))?, Some(R!(1)));
819 assert_eq!(nt.find_bin(&idx, hex("1a"))?, None);
819 assert_eq!(nt.find_bin(&idx, hex("1a"))?, None);
820 assert_eq!(nt.find_bin(&idx, hex("ab"))?, None);
820 assert_eq!(nt.find_bin(&idx, hex("ab"))?, None);
821
821
822 // and with full binary Nodes
822 // and with full binary Nodes
823 assert_eq!(
823 assert_eq!(
824 nt.find_node(&idx, idx.get(&1.into()).unwrap())?,
824 nt.find_node(&idx, idx.get(&1.into()).unwrap())?,
825 Some(R!(1))
825 Some(R!(1))
826 );
826 );
827 let unknown = Node::from_hex(&hex_pad_right("3d")).unwrap();
827 let unknown = Node::from_hex(&hex_pad_right("3d")).unwrap();
828 assert_eq!(nt.find_node(&idx, &unknown)?, None);
828 assert_eq!(nt.find_node(&idx, &unknown)?, None);
829 Ok(())
829 Ok(())
830 }
830 }
831
831
832 #[test]
832 #[test]
833 fn test_immutable_find_one_jump() {
833 fn test_immutable_find_one_jump() {
834 let mut idx = TestIndex::new();
834 let mut idx = TestIndex::new();
835 pad_insert(&mut idx, R!(9), "012");
835 pad_insert(&mut idx, R!(9), "012");
836 pad_insert(&mut idx, R!(0), "00a");
836 pad_insert(&mut idx, R!(0), "00a");
837
837
838 let nt = sample_nodetree();
838 let nt = sample_nodetree();
839
839
840 assert_eq!(nt.find_bin(&idx, hex("0")), Err(MultipleResults));
840 assert_eq!(nt.find_bin(&idx, hex("0")), Err(MultipleResults));
841 assert_eq!(nt.find_bin(&idx, hex("01")), Ok(Some(R!(9))));
841 assert_eq!(nt.find_bin(&idx, hex("01")), Ok(Some(R!(9))));
842 assert_eq!(nt.find_bin(&idx, hex("00")), Err(MultipleResults));
842 assert_eq!(nt.find_bin(&idx, hex("00")), Err(MultipleResults));
843 assert_eq!(nt.find_bin(&idx, hex("00a")), Ok(Some(R!(0))));
843 assert_eq!(nt.find_bin(&idx, hex("00a")), Ok(Some(R!(0))));
844 assert_eq!(nt.unique_prefix_len_bin(&idx, hex("00a")), Ok(Some(3)));
844 assert_eq!(nt.unique_prefix_len_bin(&idx, hex("00a")), Ok(Some(3)));
845 assert_eq!(nt.find_bin(&idx, hex("000")), Ok(Some(NULL_REVISION)));
845 assert_eq!(nt.find_bin(&idx, hex("000")), Ok(Some(NULL_REVISION)));
846 }
846 }
847
847
848 #[test]
848 #[test]
849 fn test_mutated_find() -> Result<(), NodeMapError> {
849 fn test_mutated_find() -> Result<(), NodeMapError> {
850 let mut idx = TestIndex::new();
850 let mut idx = TestIndex::new();
851 pad_insert(&mut idx, R!(9), "012");
851 pad_insert(&mut idx, R!(9), "012");
852 pad_insert(&mut idx, R!(0), "00a");
852 pad_insert(&mut idx, R!(0), "00a");
853 pad_insert(&mut idx, R!(2), "cafe");
853 pad_insert(&mut idx, R!(2), "cafe");
854 pad_insert(&mut idx, R!(3), "15");
854 pad_insert(&mut idx, R!(3), "15");
855 pad_insert(&mut idx, R!(1), "10");
855 pad_insert(&mut idx, R!(1), "10");
856
856
857 let nt = NodeTree {
857 let nt = NodeTree {
858 readonly: sample_nodetree().readonly,
858 readonly: sample_nodetree().readonly,
859 growable: vec![block![0: Rev(1), 5: Rev(3)]],
859 growable: vec![block![0: Rev(1), 5: Rev(3)]],
860 root: block![0: Block(1), 1:Block(3), 12: Rev(2)],
860 root: block![0: Block(1), 1:Block(3), 12: Rev(2)],
861 masked_inner_blocks: 1,
861 masked_inner_blocks: 1,
862 };
862 };
863 assert_eq!(nt.find_bin(&idx, hex("10"))?, Some(R!(1)));
863 assert_eq!(nt.find_bin(&idx, hex("10"))?, Some(R!(1)));
864 assert_eq!(nt.find_bin(&idx, hex("c"))?, Some(R!(2)));
864 assert_eq!(nt.find_bin(&idx, hex("c"))?, Some(R!(2)));
865 assert_eq!(nt.unique_prefix_len_bin(&idx, hex("c"))?, Some(1));
865 assert_eq!(nt.unique_prefix_len_bin(&idx, hex("c"))?, Some(1));
866 assert_eq!(nt.find_bin(&idx, hex("00")), Err(MultipleResults));
866 assert_eq!(nt.find_bin(&idx, hex("00")), Err(MultipleResults));
867 assert_eq!(nt.find_bin(&idx, hex("000"))?, Some(NULL_REVISION));
867 assert_eq!(nt.find_bin(&idx, hex("000"))?, Some(NULL_REVISION));
868 assert_eq!(nt.unique_prefix_len_bin(&idx, hex("000"))?, Some(3));
868 assert_eq!(nt.unique_prefix_len_bin(&idx, hex("000"))?, Some(3));
869 assert_eq!(nt.find_bin(&idx, hex("01"))?, Some(R!(9)));
869 assert_eq!(nt.find_bin(&idx, hex("01"))?, Some(R!(9)));
870 assert_eq!(nt.masked_readonly_blocks(), 2);
870 assert_eq!(nt.masked_readonly_blocks(), 2);
871 Ok(())
871 Ok(())
872 }
872 }
873
873
874 struct TestNtIndex {
874 pub struct TestNtIndex {
875 index: TestIndex,
875 pub index: TestIndex,
876 nt: NodeTree,
876 pub nt: NodeTree,
877 }
877 }
878
878
879 impl TestNtIndex {
879 impl TestNtIndex {
880 fn new() -> Self {
880 pub fn new() -> Self {
881 TestNtIndex {
881 TestNtIndex {
882 index: HashMap::new(),
882 index: HashMap::new(),
883 nt: NodeTree::default(),
883 nt: NodeTree::default(),
884 }
884 }
885 }
885 }
886
886
887 fn insert(&mut self, rev: i32, hex: &str) -> Result<(), NodeMapError> {
887 pub fn insert_node(
888 &mut self,
889 rev: Revision,
890 node: Node,
891 ) -> Result<(), NodeMapError> {
892 self.index.insert(rev.into(), node);
893 self.nt.insert(&self.index, &node, rev)?;
894 Ok(())
895 }
896
897 pub fn insert(
898 &mut self,
899 rev: Revision,
900 hex: &str,
901 ) -> Result<(), NodeMapError> {
888 let node = pad_node(hex);
902 let node = pad_node(hex);
889 let rev: UncheckedRevision = rev.into();
903 return self.insert_node(rev, node);
890 self.index.insert(rev, node);
891 self.nt.insert(
892 &self.index,
893 &node,
894 self.index.check_revision(rev).unwrap(),
895 )?;
896 Ok(())
897 }
904 }
898
905
899 fn find_hex(
906 fn find_hex(
900 &self,
907 &self,
901 prefix: &str,
908 prefix: &str,
902 ) -> Result<Option<Revision>, NodeMapError> {
909 ) -> Result<Option<Revision>, NodeMapError> {
903 self.nt.find_bin(&self.index, hex(prefix))
910 self.nt.find_bin(&self.index, hex(prefix))
904 }
911 }
905
912
906 fn unique_prefix_len_hex(
913 fn unique_prefix_len_hex(
907 &self,
914 &self,
908 prefix: &str,
915 prefix: &str,
909 ) -> Result<Option<usize>, NodeMapError> {
916 ) -> Result<Option<usize>, NodeMapError> {
910 self.nt.unique_prefix_len_bin(&self.index, hex(prefix))
917 self.nt.unique_prefix_len_bin(&self.index, hex(prefix))
911 }
918 }
912
919
913 /// Drain `added` and restart a new one
920 /// Drain `added` and restart a new one
914 fn commit(self) -> Self {
921 fn commit(self) -> Self {
915 let mut as_vec: Vec<Block> =
922 let mut as_vec: Vec<Block> =
916 self.nt.readonly.iter().copied().collect();
923 self.nt.readonly.iter().copied().collect();
917 as_vec.extend(self.nt.growable);
924 as_vec.extend(self.nt.growable);
918 as_vec.push(self.nt.root);
925 as_vec.push(self.nt.root);
919
926
920 Self {
927 Self {
921 index: self.index,
928 index: self.index,
922 nt: NodeTree::from(as_vec),
929 nt: NodeTree::from(as_vec),
923 }
930 }
924 }
931 }
925 }
932 }
926
933
927 #[test]
934 #[test]
928 fn test_insert_full_mutable() -> Result<(), NodeMapError> {
935 fn test_insert_full_mutable() -> Result<(), NodeMapError> {
929 let mut idx = TestNtIndex::new();
936 let mut idx = TestNtIndex::new();
930 idx.insert(0, "1234")?;
937 idx.insert(Revision(0), "1234")?;
931 assert_eq!(idx.find_hex("1")?, Some(R!(0)));
938 assert_eq!(idx.find_hex("1")?, Some(R!(0)));
932 assert_eq!(idx.find_hex("12")?, Some(R!(0)));
939 assert_eq!(idx.find_hex("12")?, Some(R!(0)));
933
940
934 // let's trigger a simple split
941 // let's trigger a simple split
935 idx.insert(1, "1a34")?;
942 idx.insert(Revision(1), "1a34")?;
936 assert_eq!(idx.nt.growable.len(), 1);
943 assert_eq!(idx.nt.growable.len(), 1);
937 assert_eq!(idx.find_hex("12")?, Some(R!(0)));
944 assert_eq!(idx.find_hex("12")?, Some(R!(0)));
938 assert_eq!(idx.find_hex("1a")?, Some(R!(1)));
945 assert_eq!(idx.find_hex("1a")?, Some(R!(1)));
939
946
940 // reinserting is a no_op
947 // reinserting is a no_op
941 idx.insert(1, "1a34")?;
948 idx.insert(Revision(1), "1a34")?;
942 assert_eq!(idx.nt.growable.len(), 1);
949 assert_eq!(idx.nt.growable.len(), 1);
943 assert_eq!(idx.find_hex("12")?, Some(R!(0)));
950 assert_eq!(idx.find_hex("12")?, Some(R!(0)));
944 assert_eq!(idx.find_hex("1a")?, Some(R!(1)));
951 assert_eq!(idx.find_hex("1a")?, Some(R!(1)));
945
952
946 idx.insert(2, "1a01")?;
953 idx.insert(Revision(2), "1a01")?;
947 assert_eq!(idx.nt.growable.len(), 2);
954 assert_eq!(idx.nt.growable.len(), 2);
948 assert_eq!(idx.find_hex("1a"), Err(NodeMapError::MultipleResults));
955 assert_eq!(idx.find_hex("1a"), Err(NodeMapError::MultipleResults));
949 assert_eq!(idx.find_hex("12")?, Some(R!(0)));
956 assert_eq!(idx.find_hex("12")?, Some(R!(0)));
950 assert_eq!(idx.find_hex("1a3")?, Some(R!(1)));
957 assert_eq!(idx.find_hex("1a3")?, Some(R!(1)));
951 assert_eq!(idx.find_hex("1a0")?, Some(R!(2)));
958 assert_eq!(idx.find_hex("1a0")?, Some(R!(2)));
952 assert_eq!(idx.find_hex("1a12")?, None);
959 assert_eq!(idx.find_hex("1a12")?, None);
953
960
954 // now let's make it split and create more than one additional block
961 // now let's make it split and create more than one additional block
955 idx.insert(3, "1a345")?;
962 idx.insert(Revision(3), "1a345")?;
956 assert_eq!(idx.nt.growable.len(), 4);
963 assert_eq!(idx.nt.growable.len(), 4);
957 assert_eq!(idx.find_hex("1a340")?, Some(R!(1)));
964 assert_eq!(idx.find_hex("1a340")?, Some(R!(1)));
958 assert_eq!(idx.find_hex("1a345")?, Some(R!(3)));
965 assert_eq!(idx.find_hex("1a345")?, Some(R!(3)));
959 assert_eq!(idx.find_hex("1a341")?, None);
966 assert_eq!(idx.find_hex("1a341")?, None);
960
967
961 // there's no readonly block to mask
968 // there's no readonly block to mask
962 assert_eq!(idx.nt.masked_readonly_blocks(), 0);
969 assert_eq!(idx.nt.masked_readonly_blocks(), 0);
963 Ok(())
970 Ok(())
964 }
971 }
965
972
966 #[test]
973 #[test]
967 fn test_unique_prefix_len_zero_prefix() {
974 fn test_unique_prefix_len_zero_prefix() {
968 let mut idx = TestNtIndex::new();
975 let mut idx = TestNtIndex::new();
969 idx.insert(0, "00000abcd").unwrap();
976 idx.insert(Revision(0), "00000abcd").unwrap();
970
977
971 assert_eq!(idx.find_hex("000"), Err(NodeMapError::MultipleResults));
978 assert_eq!(idx.find_hex("000"), Err(NodeMapError::MultipleResults));
972 // in the nodetree proper, this will be found at the first nybble
979 // in the nodetree proper, this will be found at the first nybble
973 // yet the correct answer for unique_prefix_len is not 1, nor 1+1,
980 // yet the correct answer for unique_prefix_len is not 1, nor 1+1,
974 // but the first difference with `NULL_NODE`
981 // but the first difference with `NULL_NODE`
975 assert_eq!(idx.unique_prefix_len_hex("00000a"), Ok(Some(6)));
982 assert_eq!(idx.unique_prefix_len_hex("00000a"), Ok(Some(6)));
976 assert_eq!(idx.unique_prefix_len_hex("00000ab"), Ok(Some(6)));
983 assert_eq!(idx.unique_prefix_len_hex("00000ab"), Ok(Some(6)));
977
984
978 // same with odd result
985 // same with odd result
979 idx.insert(1, "00123").unwrap();
986 idx.insert(Revision(1), "00123").unwrap();
980 assert_eq!(idx.unique_prefix_len_hex("001"), Ok(Some(3)));
987 assert_eq!(idx.unique_prefix_len_hex("001"), Ok(Some(3)));
981 assert_eq!(idx.unique_prefix_len_hex("0012"), Ok(Some(3)));
988 assert_eq!(idx.unique_prefix_len_hex("0012"), Ok(Some(3)));
982
989
983 // these are unchanged of course
990 // these are unchanged of course
984 assert_eq!(idx.unique_prefix_len_hex("00000a"), Ok(Some(6)));
991 assert_eq!(idx.unique_prefix_len_hex("00000a"), Ok(Some(6)));
985 assert_eq!(idx.unique_prefix_len_hex("00000ab"), Ok(Some(6)));
992 assert_eq!(idx.unique_prefix_len_hex("00000ab"), Ok(Some(6)));
986 }
993 }
987
994
988 #[test]
995 #[test]
989 fn test_insert_extreme_splitting() -> Result<(), NodeMapError> {
996 fn test_insert_extreme_splitting() -> Result<(), NodeMapError> {
990 // check that the splitting loop is long enough
997 // check that the splitting loop is long enough
991 let mut nt_idx = TestNtIndex::new();
998 let mut nt_idx = TestNtIndex::new();
992 let nt = &mut nt_idx.nt;
999 let nt = &mut nt_idx.nt;
993 let idx = &mut nt_idx.index;
1000 let idx = &mut nt_idx.index;
994
1001
995 let node0_hex = hex_pad_right("444444");
1002 let node0_hex = hex_pad_right("444444");
996 let mut node1_hex = hex_pad_right("444444");
1003 let mut node1_hex = hex_pad_right("444444");
997 node1_hex.pop();
1004 node1_hex.pop();
998 node1_hex.push('5');
1005 node1_hex.push('5');
999 let node0 = Node::from_hex(&node0_hex).unwrap();
1006 let node0 = Node::from_hex(&node0_hex).unwrap();
1000 let node1 = Node::from_hex(&node1_hex).unwrap();
1007 let node1 = Node::from_hex(&node1_hex).unwrap();
1001
1008
1002 idx.insert(0.into(), node0);
1009 idx.insert(0.into(), node0);
1003 nt.insert(idx, &node0, R!(0))?;
1010 nt.insert(idx, &node0, R!(0))?;
1004 idx.insert(1.into(), node1);
1011 idx.insert(1.into(), node1);
1005 nt.insert(idx, &node1, R!(1))?;
1012 nt.insert(idx, &node1, R!(1))?;
1006
1013
1007 assert_eq!(nt.find_bin(idx, (&node0).into())?, Some(R!(0)));
1014 assert_eq!(nt.find_bin(idx, (&node0).into())?, Some(R!(0)));
1008 assert_eq!(nt.find_bin(idx, (&node1).into())?, Some(R!(1)));
1015 assert_eq!(nt.find_bin(idx, (&node1).into())?, Some(R!(1)));
1009 Ok(())
1016 Ok(())
1010 }
1017 }
1011
1018
1012 #[test]
1019 #[test]
1013 fn test_insert_partly_immutable() -> Result<(), NodeMapError> {
1020 fn test_insert_partly_immutable() -> Result<(), NodeMapError> {
1014 let mut idx = TestNtIndex::new();
1021 let mut idx = TestNtIndex::new();
1015 idx.insert(0, "1234")?;
1022 idx.insert(Revision(0), "1234")?;
1016 idx.insert(1, "1235")?;
1023 idx.insert(Revision(1), "1235")?;
1017 idx.insert(2, "131")?;
1024 idx.insert(Revision(2), "131")?;
1018 idx.insert(3, "cafe")?;
1025 idx.insert(Revision(3), "cafe")?;
1019 let mut idx = idx.commit();
1026 let mut idx = idx.commit();
1020 assert_eq!(idx.find_hex("1234")?, Some(R!(0)));
1027 assert_eq!(idx.find_hex("1234")?, Some(R!(0)));
1021 assert_eq!(idx.find_hex("1235")?, Some(R!(1)));
1028 assert_eq!(idx.find_hex("1235")?, Some(R!(1)));
1022 assert_eq!(idx.find_hex("131")?, Some(R!(2)));
1029 assert_eq!(idx.find_hex("131")?, Some(R!(2)));
1023 assert_eq!(idx.find_hex("cafe")?, Some(R!(3)));
1030 assert_eq!(idx.find_hex("cafe")?, Some(R!(3)));
1024 // we did not add anything since init from readonly
1031 // we did not add anything since init from readonly
1025 assert_eq!(idx.nt.masked_readonly_blocks(), 0);
1032 assert_eq!(idx.nt.masked_readonly_blocks(), 0);
1026
1033
1027 idx.insert(4, "123A")?;
1034 idx.insert(Revision(4), "123A")?;
1028 assert_eq!(idx.find_hex("1234")?, Some(R!(0)));
1035 assert_eq!(idx.find_hex("1234")?, Some(R!(0)));
1029 assert_eq!(idx.find_hex("1235")?, Some(R!(1)));
1036 assert_eq!(idx.find_hex("1235")?, Some(R!(1)));
1030 assert_eq!(idx.find_hex("131")?, Some(R!(2)));
1037 assert_eq!(idx.find_hex("131")?, Some(R!(2)));
1031 assert_eq!(idx.find_hex("cafe")?, Some(R!(3)));
1038 assert_eq!(idx.find_hex("cafe")?, Some(R!(3)));
1032 assert_eq!(idx.find_hex("123A")?, Some(R!(4)));
1039 assert_eq!(idx.find_hex("123A")?, Some(R!(4)));
1033 // we masked blocks for all prefixes of "123", including the root
1040 // we masked blocks for all prefixes of "123", including the root
1034 assert_eq!(idx.nt.masked_readonly_blocks(), 4);
1041 assert_eq!(idx.nt.masked_readonly_blocks(), 4);
1035
1042
1036 eprintln!("{:?}", idx.nt);
1043 eprintln!("{:?}", idx.nt);
1037 idx.insert(5, "c0")?;
1044 idx.insert(Revision(5), "c0")?;
1038 assert_eq!(idx.find_hex("cafe")?, Some(R!(3)));
1045 assert_eq!(idx.find_hex("cafe")?, Some(R!(3)));
1039 assert_eq!(idx.find_hex("c0")?, Some(R!(5)));
1046 assert_eq!(idx.find_hex("c0")?, Some(R!(5)));
1040 assert_eq!(idx.find_hex("c1")?, None);
1047 assert_eq!(idx.find_hex("c1")?, None);
1041 assert_eq!(idx.find_hex("1234")?, Some(R!(0)));
1048 assert_eq!(idx.find_hex("1234")?, Some(R!(0)));
1042 // inserting "c0" is just splitting the 'c' slot of the mutable root,
1049 // inserting "c0" is just splitting the 'c' slot of the mutable root,
1043 // it doesn't mask anything
1050 // it doesn't mask anything
1044 assert_eq!(idx.nt.masked_readonly_blocks(), 4);
1051 assert_eq!(idx.nt.masked_readonly_blocks(), 4);
1045
1052
1046 Ok(())
1053 Ok(())
1047 }
1054 }
1048
1055
1049 #[test]
1056 #[test]
1050 fn test_invalidate_all() -> Result<(), NodeMapError> {
1057 fn test_invalidate_all() -> Result<(), NodeMapError> {
1051 let mut idx = TestNtIndex::new();
1058 let mut idx = TestNtIndex::new();
1052 idx.insert(0, "1234")?;
1059 idx.insert(Revision(0), "1234")?;
1053 idx.insert(1, "1235")?;
1060 idx.insert(Revision(1), "1235")?;
1054 idx.insert(2, "131")?;
1061 idx.insert(Revision(2), "131")?;
1055 idx.insert(3, "cafe")?;
1062 idx.insert(Revision(3), "cafe")?;
1056 let mut idx = idx.commit();
1063 let mut idx = idx.commit();
1057
1064
1058 idx.nt.invalidate_all();
1065 idx.nt.invalidate_all();
1059
1066
1060 assert_eq!(idx.find_hex("1234")?, None);
1067 assert_eq!(idx.find_hex("1234")?, None);
1061 assert_eq!(idx.find_hex("1235")?, None);
1068 assert_eq!(idx.find_hex("1235")?, None);
1062 assert_eq!(idx.find_hex("131")?, None);
1069 assert_eq!(idx.find_hex("131")?, None);
1063 assert_eq!(idx.find_hex("cafe")?, None);
1070 assert_eq!(idx.find_hex("cafe")?, None);
1064 // all the readonly blocks have been masked, this is the
1071 // all the readonly blocks have been masked, this is the
1065 // conventional expected response
1072 // conventional expected response
1066 assert_eq!(idx.nt.masked_readonly_blocks(), idx.nt.readonly.len() + 1);
1073 assert_eq!(idx.nt.masked_readonly_blocks(), idx.nt.readonly.len() + 1);
1067 Ok(())
1074 Ok(())
1068 }
1075 }
1069
1076
1070 #[test]
1077 #[test]
1071 fn test_into_added_empty() {
1078 fn test_into_added_empty() {
1072 assert!(sample_nodetree().into_readonly_and_added().1.is_empty());
1079 assert!(sample_nodetree().into_readonly_and_added().1.is_empty());
1073 assert!(sample_nodetree()
1080 assert!(sample_nodetree()
1074 .into_readonly_and_added_bytes()
1081 .into_readonly_and_added_bytes()
1075 .1
1082 .1
1076 .is_empty());
1083 .is_empty());
1077 }
1084 }
1078
1085
1079 #[test]
1086 #[test]
1080 fn test_into_added_bytes() -> Result<(), NodeMapError> {
1087 fn test_into_added_bytes() -> Result<(), NodeMapError> {
1081 let mut idx = TestNtIndex::new();
1088 let mut idx = TestNtIndex::new();
1082 idx.insert(0, "1234")?;
1089 idx.insert(Revision(0), "1234")?;
1083 let mut idx = idx.commit();
1090 let mut idx = idx.commit();
1084 idx.insert(4, "cafe")?;
1091 idx.insert(Revision(4), "cafe")?;
1085 let (_, bytes) = idx.nt.into_readonly_and_added_bytes();
1092 let (_, bytes) = idx.nt.into_readonly_and_added_bytes();
1086
1093
1087 // only the root block has been changed
1094 // only the root block has been changed
1088 assert_eq!(bytes.len(), size_of::<Block>());
1095 assert_eq!(bytes.len(), size_of::<Block>());
1089 // big endian for -2
1096 // big endian for -2
1090 assert_eq!(&bytes[4..2 * 4], [255, 255, 255, 254]);
1097 assert_eq!(&bytes[4..2 * 4], [255, 255, 255, 254]);
1091 // big endian for -6
1098 // big endian for -6
1092 assert_eq!(&bytes[12 * 4..13 * 4], [255, 255, 255, 250]);
1099 assert_eq!(&bytes[12 * 4..13 * 4], [255, 255, 255, 250]);
1093 Ok(())
1100 Ok(())
1094 }
1101 }
1095 }
1102 }
@@ -1,483 +1,483 b''
1 #require test-repo
1 #require test-repo
2
2
3 Set vars:
3 Set vars:
4
4
5 $ . "$TESTDIR/helpers-testrepo.sh"
5 $ . "$TESTDIR/helpers-testrepo.sh"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
7
7
8 Prepare repo:
8 Prepare repo:
9
9
10 $ hg init
10 $ hg init
11
11
12 $ echo this is file a > a
12 $ echo this is file a > a
13 $ hg add a
13 $ hg add a
14 $ hg commit -m first
14 $ hg commit -m first
15
15
16 $ echo adding to file a >> a
16 $ echo adding to file a >> a
17 $ hg commit -m second
17 $ hg commit -m second
18
18
19 $ echo adding more to file a >> a
19 $ echo adding more to file a >> a
20 $ hg commit -m third
20 $ hg commit -m third
21
21
22 $ hg up -r 0
22 $ hg up -r 0
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 $ echo merge-this >> a
24 $ echo merge-this >> a
25 $ hg commit -m merge-able
25 $ hg commit -m merge-able
26 created new head
26 created new head
27
27
28 $ hg up -r 2
28 $ hg up -r 2
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30
30
31 perfstatus
31 perfstatus
32
32
33 $ cat >> $HGRCPATH << EOF
33 $ cat >> $HGRCPATH << EOF
34 > [extensions]
34 > [extensions]
35 > perf=$CONTRIBDIR/perf.py
35 > perf=$CONTRIBDIR/perf.py
36 > [perf]
36 > [perf]
37 > presleep=0
37 > presleep=0
38 > stub=on
38 > stub=on
39 > parentscount=1
39 > parentscount=1
40 > EOF
40 > EOF
41 $ hg help -e perf
41 $ hg help -e perf
42 perf extension - helper extension to measure performance
42 perf extension - helper extension to measure performance
43
43
44 Configurations
44 Configurations
45 ==============
45 ==============
46
46
47 "perf"
47 "perf"
48 ------
48 ------
49
49
50 "all-timing"
50 "all-timing"
51 When set, additional statistics will be reported for each benchmark: best,
51 When set, additional statistics will be reported for each benchmark: best,
52 worst, median average. If not set only the best timing is reported
52 worst, median average. If not set only the best timing is reported
53 (default: off).
53 (default: off).
54
54
55 "presleep"
55 "presleep"
56 number of second to wait before any group of runs (default: 1)
56 number of second to wait before any group of runs (default: 1)
57
57
58 "pre-run"
58 "pre-run"
59 number of run to perform before starting measurement.
59 number of run to perform before starting measurement.
60
60
61 "profile-benchmark"
61 "profile-benchmark"
62 Enable profiling for the benchmarked section. (The first iteration is
62 Enable profiling for the benchmarked section. (The first iteration is
63 benchmarked)
63 benchmarked)
64
64
65 "run-limits"
65 "run-limits"
66 Control the number of runs each benchmark will perform. The option value
66 Control the number of runs each benchmark will perform. The option value
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
68 conditions are considered in order with the following logic:
68 conditions are considered in order with the following logic:
69
69
70 If benchmark has been running for <time> seconds, and we have performed
70 If benchmark has been running for <time> seconds, and we have performed
71 <numberofrun> iterations, stop the benchmark,
71 <numberofrun> iterations, stop the benchmark,
72
72
73 The default value is: '3.0-100, 10.0-3'
73 The default value is: '3.0-100, 10.0-3'
74
74
75 "stub"
75 "stub"
76 When set, benchmarks will only be run once, useful for testing (default:
76 When set, benchmarks will only be run once, useful for testing (default:
77 off)
77 off)
78
78
79 list of commands:
79 list of commands:
80
80
81 perf::addremove
81 perf::addremove
82 (no help text available)
82 (no help text available)
83 perf::ancestors
83 perf::ancestors
84 (no help text available)
84 (no help text available)
85 perf::ancestorset
85 perf::ancestorset
86 (no help text available)
86 (no help text available)
87 perf::annotate
87 perf::annotate
88 (no help text available)
88 (no help text available)
89 perf::bdiff benchmark a bdiff between revisions
89 perf::bdiff benchmark a bdiff between revisions
90 perf::bookmarks
90 perf::bookmarks
91 benchmark parsing bookmarks from disk to memory
91 benchmark parsing bookmarks from disk to memory
92 perf::branchmap
92 perf::branchmap
93 benchmark the update of a branchmap
93 benchmark the update of a branchmap
94 perf::branchmapload
94 perf::branchmapload
95 benchmark reading the branchmap
95 benchmark reading the branchmap
96 perf::branchmapupdate
96 perf::branchmapupdate
97 benchmark branchmap update from for <base> revs to <target>
97 benchmark branchmap update from for <base> revs to <target>
98 revs
98 revs
99 perf::bundle benchmark the creation of a bundle from a repository
99 perf::bundle benchmark the creation of a bundle from a repository
100 perf::bundleread
100 perf::bundleread
101 Benchmark reading of bundle files.
101 Benchmark reading of bundle files.
102 perf::cca (no help text available)
102 perf::cca (no help text available)
103 perf::changegroupchangelog
103 perf::changegroupchangelog
104 Benchmark producing a changelog group for a changegroup.
104 Benchmark producing a changelog group for a changegroup.
105 perf::changeset
105 perf::changeset
106 (no help text available)
106 (no help text available)
107 perf::ctxfiles
107 perf::ctxfiles
108 (no help text available)
108 (no help text available)
109 perf::delta-find
109 perf::delta-find
110 benchmark the process of finding a valid delta for a revlog
110 benchmark the process of finding a valid delta for a revlog
111 revision
111 revision
112 perf::diffwd Profile diff of working directory changes
112 perf::diffwd Profile diff of working directory changes
113 perf::dirfoldmap
113 perf::dirfoldmap
114 benchmap a 'dirstate._map.dirfoldmap.get()' request
114 benchmap a 'dirstate._map.dirfoldmap.get()' request
115 perf::dirs (no help text available)
115 perf::dirs (no help text available)
116 perf::dirstate
116 perf::dirstate
117 benchmap the time of various distate operations
117 benchmap the time of various distate operations
118 perf::dirstatedirs
118 perf::dirstatedirs
119 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
119 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
120 perf::dirstatefoldmap
120 perf::dirstatefoldmap
121 benchmap a 'dirstate._map.filefoldmap.get()' request
121 benchmap a 'dirstate._map.filefoldmap.get()' request
122 perf::dirstatewrite
122 perf::dirstatewrite
123 benchmap the time it take to write a dirstate on disk
123 benchmap the time it take to write a dirstate on disk
124 perf::discovery
124 perf::discovery
125 benchmark discovery between local repo and the peer at given
125 benchmark discovery between local repo and the peer at given
126 path
126 path
127 perf::fncacheencode
127 perf::fncacheencode
128 (no help text available)
128 (no help text available)
129 perf::fncacheload
129 perf::fncacheload
130 (no help text available)
130 (no help text available)
131 perf::fncachewrite
131 perf::fncachewrite
132 (no help text available)
132 (no help text available)
133 perf::heads benchmark the computation of a changelog heads
133 perf::heads benchmark the computation of a changelog heads
134 perf::helper-mergecopies
134 perf::helper-mergecopies
135 find statistics about potential parameters for
135 find statistics about potential parameters for
136 'perfmergecopies'
136 'perfmergecopies'
137 perf::helper-pathcopies
137 perf::helper-pathcopies
138 find statistic about potential parameters for the
138 find statistic about potential parameters for the
139 'perftracecopies'
139 'perftracecopies'
140 perf::ignore benchmark operation related to computing ignore
140 perf::ignore benchmark operation related to computing ignore
141 perf::index benchmark index creation time followed by a lookup
141 perf::index benchmark index creation time followed by a lookup
142 perf::linelogedits
142 perf::linelogedits
143 (no help text available)
143 (no help text available)
144 perf::loadmarkers
144 perf::loadmarkers
145 benchmark the time to parse the on-disk markers for a repo
145 benchmark the time to parse the on-disk markers for a repo
146 perf::log (no help text available)
146 perf::log (no help text available)
147 perf::lookup (no help text available)
147 perf::lookup (no help text available)
148 perf::lrucachedict
148 perf::lrucachedict
149 (no help text available)
149 (no help text available)
150 perf::manifest
150 perf::manifest
151 benchmark the time to read a manifest from disk and return a
151 benchmark the time to read a manifest from disk and return a
152 usable
152 usable
153 perf::mergecalculate
153 perf::mergecalculate
154 (no help text available)
154 (no help text available)
155 perf::mergecopies
155 perf::mergecopies
156 measure runtime of 'copies.mergecopies'
156 measure runtime of 'copies.mergecopies'
157 perf::moonwalk
157 perf::moonwalk
158 benchmark walking the changelog backwards
158 benchmark walking the changelog backwards
159 perf::nodelookup
159 perf::nodelookup
160 (no help text available)
160 (no help text available)
161 perf::nodemap
161 perf::nodemap
162 benchmark the time necessary to look up revision from a cold
162 benchmark the time necessary to look up revision from a cold
163 nodemap
163 nodemap
164 perf::parents
164 perf::parents
165 benchmark the time necessary to fetch one changeset's parents.
165 benchmark the time necessary to fetch one changeset's parents.
166 perf::pathcopies
166 perf::pathcopies
167 benchmark the copy tracing logic
167 benchmark the copy tracing logic
168 perf::phases benchmark phasesets computation
168 perf::phases benchmark phasesets computation
169 perf::phasesremote
169 perf::phasesremote
170 benchmark time needed to analyse phases of the remote server
170 benchmark time needed to analyse phases of the remote server
171 perf::progress
171 perf::progress
172 printing of progress bars
172 printing of progress bars
173 perf::rawfiles
173 perf::rawfiles
174 (no help text available)
174 (no help text available)
175 perf::revlogchunks
175 perf::revlogchunks
176 Benchmark operations on revlog chunks.
176 Benchmark operations on revlog chunks.
177 perf::revlogindex
177 perf::revlogindex
178 Benchmark operations against a revlog index.
178 Benchmark operations against a revlog index.
179 perf::revlogrevision
179 perf::revlogrevision
180 Benchmark obtaining a revlog revision.
180 Benchmark obtaining a revlog revision.
181 perf::revlogrevisions
181 perf::revlogrevisions
182 Benchmark reading a series of revisions from a revlog.
182 Benchmark reading a series of revisions from a revlog.
183 perf::revlogwrite
183 perf::revlogwrite
184 Benchmark writing a series of revisions to a revlog.
184 Benchmark writing a series of revisions to a revlog.
185 perf::revrange
185 perf::revrange
186 (no help text available)
186 (no help text available)
187 perf::revset benchmark the execution time of a revset
187 perf::revset benchmark the execution time of a revset
188 perf::startup
188 perf::startup
189 (no help text available)
189 (no help text available)
190 perf::status benchmark the performance of a single status call
190 perf::status benchmark the performance of a single status call
191 perf::stream-consume
191 perf::stream-consume
192 benchmark the full application of a stream clone
192 benchmark the full application of a stream clone
193 perf::stream-generate
193 perf::stream-generate
194 benchmark the full generation of a stream clone
194 benchmark the full generation of a stream clone
195 perf::stream-locked-section
195 perf::stream-locked-section
196 benchmark the initial, repo-locked, section of a stream-clone
196 benchmark the initial, repo-locked, section of a stream-clone
197 perf::tags (no help text available)
197 perf::tags Benchmark tags retrieval in various situation
198 perf::templating
198 perf::templating
199 test the rendering time of a given template
199 test the rendering time of a given template
200 perf::unbundle
200 perf::unbundle
201 benchmark application of a bundle in a repository.
201 benchmark application of a bundle in a repository.
202 perf::unidiff
202 perf::unidiff
203 benchmark a unified diff between revisions
203 benchmark a unified diff between revisions
204 perf::volatilesets
204 perf::volatilesets
205 benchmark the computation of various volatile set
205 benchmark the computation of various volatile set
206 perf::walk (no help text available)
206 perf::walk (no help text available)
207 perf::write microbenchmark ui.write (and others)
207 perf::write microbenchmark ui.write (and others)
208
208
209 (use 'hg help -v perf' to show built-in aliases and global options)
209 (use 'hg help -v perf' to show built-in aliases and global options)
210
210
211 $ hg help perfaddremove
211 $ hg help perfaddremove
212 hg perf::addremove
212 hg perf::addremove
213
213
214 aliases: perfaddremove
214 aliases: perfaddremove
215
215
216 (no help text available)
216 (no help text available)
217
217
218 options:
218 options:
219
219
220 -T --template TEMPLATE display with template
220 -T --template TEMPLATE display with template
221
221
222 (some details hidden, use --verbose to show complete help)
222 (some details hidden, use --verbose to show complete help)
223
223
224 $ hg perfaddremove
224 $ hg perfaddremove
225 $ hg perfancestors
225 $ hg perfancestors
226 $ hg perfancestorset 2
226 $ hg perfancestorset 2
227 $ hg perfannotate a
227 $ hg perfannotate a
228 $ hg perfbdiff -c 1
228 $ hg perfbdiff -c 1
229 $ hg perfbdiff --alldata 1
229 $ hg perfbdiff --alldata 1
230 $ hg perfunidiff -c 1
230 $ hg perfunidiff -c 1
231 $ hg perfunidiff --alldata 1
231 $ hg perfunidiff --alldata 1
232 $ hg perfbookmarks
232 $ hg perfbookmarks
233 $ hg perfbranchmap
233 $ hg perfbranchmap
234 $ hg perfbranchmapload
234 $ hg perfbranchmapload
235 $ hg perfbranchmapupdate --base "not tip" --target "tip"
235 $ hg perfbranchmapupdate --base "not tip" --target "tip"
236 benchmark of branchmap with 3 revisions with 1 new ones
236 benchmark of branchmap with 3 revisions with 1 new ones
237 $ hg perfcca
237 $ hg perfcca
238 $ hg perfchangegroupchangelog
238 $ hg perfchangegroupchangelog
239 $ hg perfchangegroupchangelog --cgversion 01
239 $ hg perfchangegroupchangelog --cgversion 01
240 $ hg perfchangeset 2
240 $ hg perfchangeset 2
241 $ hg perfctxfiles 2
241 $ hg perfctxfiles 2
242 $ hg perfdiffwd
242 $ hg perfdiffwd
243 $ hg perfdirfoldmap
243 $ hg perfdirfoldmap
244 $ hg perfdirs
244 $ hg perfdirs
245 $ hg perfdirstate
245 $ hg perfdirstate
246 $ hg perfdirstate --contains
246 $ hg perfdirstate --contains
247 $ hg perfdirstate --iteration
247 $ hg perfdirstate --iteration
248 $ hg perfdirstatedirs
248 $ hg perfdirstatedirs
249 $ hg perfdirstatefoldmap
249 $ hg perfdirstatefoldmap
250 $ hg perfdirstatewrite
250 $ hg perfdirstatewrite
251 #if repofncache
251 #if repofncache
252 $ hg perffncacheencode
252 $ hg perffncacheencode
253 $ hg perffncacheload
253 $ hg perffncacheload
254 $ hg debugrebuildfncache
254 $ hg debugrebuildfncache
255 fncache already up to date
255 fncache already up to date
256 $ hg perffncachewrite
256 $ hg perffncachewrite
257 $ hg debugrebuildfncache
257 $ hg debugrebuildfncache
258 fncache already up to date
258 fncache already up to date
259 #endif
259 #endif
260 $ hg perfheads
260 $ hg perfheads
261 $ hg perfignore
261 $ hg perfignore
262 $ hg perfindex
262 $ hg perfindex
263 $ hg perflinelogedits -n 1
263 $ hg perflinelogedits -n 1
264 $ hg perfloadmarkers
264 $ hg perfloadmarkers
265 $ hg perflog
265 $ hg perflog
266 $ hg perflookup 2
266 $ hg perflookup 2
267 $ hg perflrucache
267 $ hg perflrucache
268 $ hg perfmanifest 2
268 $ hg perfmanifest 2
269 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
269 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
270 $ hg perfmanifest -m 44fe2c8352bb
270 $ hg perfmanifest -m 44fe2c8352bb
271 abort: manifest revision must be integer or full node
271 abort: manifest revision must be integer or full node
272 [255]
272 [255]
273 $ hg perfmergecalculate -r 3
273 $ hg perfmergecalculate -r 3
274 $ hg perfmoonwalk
274 $ hg perfmoonwalk
275 $ hg perfnodelookup 2
275 $ hg perfnodelookup 2
276 $ hg perfpathcopies 1 2
276 $ hg perfpathcopies 1 2
277 $ hg perfprogress --total 1000
277 $ hg perfprogress --total 1000
278 $ hg perfrawfiles 2
278 $ hg perfrawfiles 2
279 $ hg perfrevlogindex -c
279 $ hg perfrevlogindex -c
280 #if reporevlogstore
280 #if reporevlogstore
281 $ hg perfrevlogrevisions .hg/store/data/a.i
281 $ hg perfrevlogrevisions .hg/store/data/a.i
282 #endif
282 #endif
283 $ hg perfrevlogrevision -m 0
283 $ hg perfrevlogrevision -m 0
284 $ hg perfrevlogchunks -c
284 $ hg perfrevlogchunks -c
285 $ hg perfrevrange
285 $ hg perfrevrange
286 $ hg perfrevset 'all()'
286 $ hg perfrevset 'all()'
287 $ hg perfstartup
287 $ hg perfstartup
288 $ hg perfstatus
288 $ hg perfstatus
289 $ hg perfstatus --dirstate
289 $ hg perfstatus --dirstate
290 $ hg perftags
290 $ hg perftags
291 $ hg perftemplating
291 $ hg perftemplating
292 $ hg perfvolatilesets
292 $ hg perfvolatilesets
293 $ hg perfwalk
293 $ hg perfwalk
294 $ hg perfparents
294 $ hg perfparents
295 $ hg perfdiscovery -q .
295 $ hg perfdiscovery -q .
296
296
297 Test run control
297 Test run control
298 ----------------
298 ----------------
299
299
300 Simple single entry
300 Simple single entry
301
301
302 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
302 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
303 ! wall * comb * user * sys * (best of 15) (glob)
303 ! wall * comb * user * sys * (best of 15) (glob)
304 ! wall * comb * user * sys * (max of 15) (glob)
304 ! wall * comb * user * sys * (max of 15) (glob)
305 ! wall * comb * user * sys * (avg of 15) (glob)
305 ! wall * comb * user * sys * (avg of 15) (glob)
306 ! wall * comb * user * sys * (median of 15) (glob)
306 ! wall * comb * user * sys * (median of 15) (glob)
307
307
308 Multiple entries
308 Multiple entries
309
309
310 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-50'
310 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-50'
311 ! wall * comb * user * sys * (best of 50) (glob)
311 ! wall * comb * user * sys * (best of 50) (glob)
312 ! wall * comb * user * sys 0.000000 (max of 50) (glob)
312 ! wall * comb * user * sys 0.000000 (max of 50) (glob)
313 ! wall * comb * user * sys 0.000000 (avg of 50) (glob)
313 ! wall * comb * user * sys 0.000000 (avg of 50) (glob)
314 ! wall * comb * user * sys 0.000000 (median of 50) (glob)
314 ! wall * comb * user * sys 0.000000 (median of 50) (glob)
315
315
316 error case are ignored
316 error case are ignored
317
317
318 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-50'
318 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-50'
319 malformatted run limit entry, missing "-": 500
319 malformatted run limit entry, missing "-": 500
320 ! wall * comb * user * sys * (best of 50) (glob)
320 ! wall * comb * user * sys * (best of 50) (glob)
321 ! wall * comb * user * sys * (max of 50) (glob)
321 ! wall * comb * user * sys * (max of 50) (glob)
322 ! wall * comb * user * sys * (avg of 50) (glob)
322 ! wall * comb * user * sys * (avg of 50) (glob)
323 ! wall * comb * user * sys * (median of 50) (glob)
323 ! wall * comb * user * sys * (median of 50) (glob)
324 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-120, 0.000000001-50'
324 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-120, 0.000000001-50'
325 malformatted run limit entry, could not convert string to float: 'aaa': aaa-120
325 malformatted run limit entry, could not convert string to float: 'aaa': aaa-120
326 ! wall * comb * user * sys * (best of 50) (glob)
326 ! wall * comb * user * sys * (best of 50) (glob)
327 ! wall * comb * user * sys * (max of 50) (glob)
327 ! wall * comb * user * sys * (max of 50) (glob)
328 ! wall * comb * user * sys * (avg of 50) (glob)
328 ! wall * comb * user * sys * (avg of 50) (glob)
329 ! wall * comb * user * sys * (median of 50) (glob)
329 ! wall * comb * user * sys * (median of 50) (glob)
330 $ hg perfparents --config perf.stub=no --config perf.run-limits='120-aaaaaa, 0.000000001-50'
330 $ hg perfparents --config perf.stub=no --config perf.run-limits='120-aaaaaa, 0.000000001-50'
331 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 120-aaaaaa
331 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 120-aaaaaa
332 ! wall * comb * user * sys * (best of 50) (glob)
332 ! wall * comb * user * sys * (best of 50) (glob)
333 ! wall * comb * user * sys * (max of 50) (glob)
333 ! wall * comb * user * sys * (max of 50) (glob)
334 ! wall * comb * user * sys * (avg of 50) (glob)
334 ! wall * comb * user * sys * (avg of 50) (glob)
335 ! wall * comb * user * sys * (median of 50) (glob)
335 ! wall * comb * user * sys * (median of 50) (glob)
336
336
337 test actual output
337 test actual output
338 ------------------
338 ------------------
339
339
340 normal output:
340 normal output:
341
341
342 $ hg perfheads --config perf.stub=no
342 $ hg perfheads --config perf.stub=no
343 ! wall * comb * user * sys * (best of *) (glob)
343 ! wall * comb * user * sys * (best of *) (glob)
344 ! wall * comb * user * sys * (max of *) (glob)
344 ! wall * comb * user * sys * (max of *) (glob)
345 ! wall * comb * user * sys * (avg of *) (glob)
345 ! wall * comb * user * sys * (avg of *) (glob)
346 ! wall * comb * user * sys * (median of *) (glob)
346 ! wall * comb * user * sys * (median of *) (glob)
347
347
348 detailed output:
348 detailed output:
349
349
350 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
350 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
351 ! wall * comb * user * sys * (best of *) (glob)
351 ! wall * comb * user * sys * (best of *) (glob)
352 ! wall * comb * user * sys * (max of *) (glob)
352 ! wall * comb * user * sys * (max of *) (glob)
353 ! wall * comb * user * sys * (avg of *) (glob)
353 ! wall * comb * user * sys * (avg of *) (glob)
354 ! wall * comb * user * sys * (median of *) (glob)
354 ! wall * comb * user * sys * (median of *) (glob)
355
355
356 test json output
356 test json output
357 ----------------
357 ----------------
358
358
359 normal output:
359 normal output:
360
360
361 $ hg perfheads --template json --config perf.stub=no
361 $ hg perfheads --template json --config perf.stub=no
362 [
362 [
363 {
363 {
364 "avg.comb": *, (glob)
364 "avg.comb": *, (glob)
365 "avg.count": *, (glob)
365 "avg.count": *, (glob)
366 "avg.sys": *, (glob)
366 "avg.sys": *, (glob)
367 "avg.user": *, (glob)
367 "avg.user": *, (glob)
368 "avg.wall": *, (glob)
368 "avg.wall": *, (glob)
369 "comb": *, (glob)
369 "comb": *, (glob)
370 "count": *, (glob)
370 "count": *, (glob)
371 "max.comb": *, (glob)
371 "max.comb": *, (glob)
372 "max.count": *, (glob)
372 "max.count": *, (glob)
373 "max.sys": *, (glob)
373 "max.sys": *, (glob)
374 "max.user": *, (glob)
374 "max.user": *, (glob)
375 "max.wall": *, (glob)
375 "max.wall": *, (glob)
376 "median.comb": *, (glob)
376 "median.comb": *, (glob)
377 "median.count": *, (glob)
377 "median.count": *, (glob)
378 "median.sys": *, (glob)
378 "median.sys": *, (glob)
379 "median.user": *, (glob)
379 "median.user": *, (glob)
380 "median.wall": *, (glob)
380 "median.wall": *, (glob)
381 "sys": *, (glob)
381 "sys": *, (glob)
382 "user": *, (glob)
382 "user": *, (glob)
383 "wall": * (glob)
383 "wall": * (glob)
384 }
384 }
385 ]
385 ]
386
386
387 detailed output:
387 detailed output:
388
388
389 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
389 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
390 [
390 [
391 {
391 {
392 "avg.comb": *, (glob)
392 "avg.comb": *, (glob)
393 "avg.count": *, (glob)
393 "avg.count": *, (glob)
394 "avg.sys": *, (glob)
394 "avg.sys": *, (glob)
395 "avg.user": *, (glob)
395 "avg.user": *, (glob)
396 "avg.wall": *, (glob)
396 "avg.wall": *, (glob)
397 "comb": *, (glob)
397 "comb": *, (glob)
398 "count": *, (glob)
398 "count": *, (glob)
399 "max.comb": *, (glob)
399 "max.comb": *, (glob)
400 "max.count": *, (glob)
400 "max.count": *, (glob)
401 "max.sys": *, (glob)
401 "max.sys": *, (glob)
402 "max.user": *, (glob)
402 "max.user": *, (glob)
403 "max.wall": *, (glob)
403 "max.wall": *, (glob)
404 "median.comb": *, (glob)
404 "median.comb": *, (glob)
405 "median.count": *, (glob)
405 "median.count": *, (glob)
406 "median.sys": *, (glob)
406 "median.sys": *, (glob)
407 "median.user": *, (glob)
407 "median.user": *, (glob)
408 "median.wall": *, (glob)
408 "median.wall": *, (glob)
409 "sys": *, (glob)
409 "sys": *, (glob)
410 "user": *, (glob)
410 "user": *, (glob)
411 "wall": * (glob)
411 "wall": * (glob)
412 }
412 }
413 ]
413 ]
414
414
415 Test pre-run feature
415 Test pre-run feature
416 --------------------
416 --------------------
417
417
418 (perf discovery has some spurious output)
418 (perf discovery has some spurious output)
419
419
420 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
420 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
421 ! wall * comb * user * sys * (best of 1) (glob)
421 ! wall * comb * user * sys * (best of 1) (glob)
422 ! wall * comb * user * sys * (max of 1) (glob)
422 ! wall * comb * user * sys * (max of 1) (glob)
423 ! wall * comb * user * sys * (avg of 1) (glob)
423 ! wall * comb * user * sys * (avg of 1) (glob)
424 ! wall * comb * user * sys * (median of 1) (glob)
424 ! wall * comb * user * sys * (median of 1) (glob)
425 searching for changes
425 searching for changes
426 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
426 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
427 ! wall * comb * user * sys * (best of 1) (glob)
427 ! wall * comb * user * sys * (best of 1) (glob)
428 ! wall * comb * user * sys * (max of 1) (glob)
428 ! wall * comb * user * sys * (max of 1) (glob)
429 ! wall * comb * user * sys * (avg of 1) (glob)
429 ! wall * comb * user * sys * (avg of 1) (glob)
430 ! wall * comb * user * sys * (median of 1) (glob)
430 ! wall * comb * user * sys * (median of 1) (glob)
431 searching for changes
431 searching for changes
432 searching for changes
432 searching for changes
433 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
433 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
434 ! wall * comb * user * sys * (best of 1) (glob)
434 ! wall * comb * user * sys * (best of 1) (glob)
435 ! wall * comb * user * sys * (max of 1) (glob)
435 ! wall * comb * user * sys * (max of 1) (glob)
436 ! wall * comb * user * sys * (avg of 1) (glob)
436 ! wall * comb * user * sys * (avg of 1) (glob)
437 ! wall * comb * user * sys * (median of 1) (glob)
437 ! wall * comb * user * sys * (median of 1) (glob)
438 searching for changes
438 searching for changes
439 searching for changes
439 searching for changes
440 searching for changes
440 searching for changes
441 searching for changes
441 searching for changes
442 $ hg perf::bundle 'last(all(), 5)'
442 $ hg perf::bundle 'last(all(), 5)'
443 $ hg bundle --exact --rev 'last(all(), 5)' last-5.hg
443 $ hg bundle --exact --rev 'last(all(), 5)' last-5.hg
444 4 changesets found
444 4 changesets found
445 $ hg perf::unbundle last-5.hg
445 $ hg perf::unbundle last-5.hg
446
446
447
447
448 test profile-benchmark option
448 test profile-benchmark option
449 ------------------------------
449 ------------------------------
450
450
451 Function to check that statprof ran
451 Function to check that statprof ran
452 $ statprofran () {
452 $ statprofran () {
453 > grep -E 'Sample count:|No samples recorded' > /dev/null
453 > grep -E 'Sample count:|No samples recorded' > /dev/null
454 > }
454 > }
455 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
455 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
456
456
457 Check perf.py for historical portability
457 Check perf.py for historical portability
458 ----------------------------------------
458 ----------------------------------------
459
459
460 $ cd "$TESTDIR/.."
460 $ cd "$TESTDIR/.."
461
461
462 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
462 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
463 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
463 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
464 > "$TESTDIR"/check-perf-code.py contrib/perf.py
464 > "$TESTDIR"/check-perf-code.py contrib/perf.py
465 contrib/perf.py:\d+: (re)
465 contrib/perf.py:\d+: (re)
466 > from mercurial import (
466 > from mercurial import (
467 import newer module separately in try clause for early Mercurial
467 import newer module separately in try clause for early Mercurial
468 contrib/perf.py:\d+: (re)
468 contrib/perf.py:\d+: (re)
469 > from mercurial import (
469 > from mercurial import (
470 import newer module separately in try clause for early Mercurial
470 import newer module separately in try clause for early Mercurial
471 contrib/perf.py:\d+: (re)
471 contrib/perf.py:\d+: (re)
472 > origindexpath = orig.opener.join(indexfile)
472 > origindexpath = orig.opener.join(indexfile)
473 use getvfs()/getsvfs() for early Mercurial
473 use getvfs()/getsvfs() for early Mercurial
474 contrib/perf.py:\d+: (re)
474 contrib/perf.py:\d+: (re)
475 > origdatapath = orig.opener.join(datafile)
475 > origdatapath = orig.opener.join(datafile)
476 use getvfs()/getsvfs() for early Mercurial
476 use getvfs()/getsvfs() for early Mercurial
477 contrib/perf.py:\d+: (re)
477 contrib/perf.py:\d+: (re)
478 > vfs = vfsmod.vfs(tmpdir)
478 > vfs = vfsmod.vfs(tmpdir)
479 use getvfs()/getsvfs() for early Mercurial
479 use getvfs()/getsvfs() for early Mercurial
480 contrib/perf.py:\d+: (re)
480 contrib/perf.py:\d+: (re)
481 > vfs.options = getattr(orig.opener, 'options', None)
481 > vfs.options = getattr(orig.opener, 'options', None)
482 use getvfs()/getsvfs() for early Mercurial
482 use getvfs()/getsvfs() for early Mercurial
483 [1]
483 [1]
@@ -1,116 +1,111 b''
1 #require no-windows
1 #require no-windows
2
2
3 $ . "$TESTDIR/remotefilelog-library.sh"
3 $ . "$TESTDIR/remotefilelog-library.sh"
4
4
5 $ hg init master
5 $ hg init master
6 $ cd master
6 $ cd master
7 $ cat >> .hg/hgrc <<EOF
7 $ cat >> .hg/hgrc <<EOF
8 > [remotefilelog]
8 > [remotefilelog]
9 > server=True
9 > server=True
10 > serverexpiration=-1
10 > serverexpiration=-1
11 > EOF
11 > EOF
12 $ echo x > x
12 $ echo x > x
13 $ hg commit -qAm x
13 $ hg commit -qAm x
14 $ cd ..
14 $ cd ..
15
15
16 $ hgcloneshallow ssh://user@dummy/master shallow -q
16 $ hgcloneshallow ssh://user@dummy/master shallow -q
17 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
17 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
18
18
19 # Set the prefetchdays config to zero so that all commits are prefetched
19 # Set the prefetchdays config to zero so that all commits are prefetched
20 # no matter what their creation date is.
20 # no matter what their creation date is.
21 $ cd shallow
21 $ cd shallow
22 $ cat >> .hg/hgrc <<EOF
22 $ cat >> .hg/hgrc <<EOF
23 > [remotefilelog]
23 > [remotefilelog]
24 > prefetchdays=0
24 > prefetchdays=0
25 > EOF
25 > EOF
26 $ cd ..
26 $ cd ..
27
27
28 # commit a new version of x so we can gc the old one
28 # commit a new version of x so we can gc the old one
29
29
30 $ cd master
30 $ cd master
31 $ echo y > x
31 $ echo y > x
32 $ hg commit -qAm y
32 $ hg commit -qAm y
33 $ cd ..
33 $ cd ..
34
34
35 $ cd shallow
35 $ cd shallow
36 $ hg pull -q
36 $ hg pull -q
37 $ hg update -q
37 $ hg update -q
38 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
38 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
39 $ cd ..
39 $ cd ..
40
40
41 # gc client cache
41 # gc client cache
42
42
43 $ lastweek=`"$PYTHON" -c 'import datetime,time; print(datetime.datetime.fromtimestamp(time.time() - (86400 * 7)).strftime("%y%m%d%H%M"))'`
43 $ lastweek=`"$PYTHON" -c 'import datetime,time; print(datetime.datetime.fromtimestamp(time.time() - (86400 * 7)).strftime("%y%m%d%H%M"))'`
44 $ find $CACHEDIR -type f -exec touch -t $lastweek {} \;
44 $ find $CACHEDIR -type f -exec touch -t $lastweek {} \;
45
45
46 $ find $CACHEDIR -type f | sort
46 $ find $CACHEDIR -type f | sort
47 $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 (glob)
47 $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 (glob)
48 $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob)
48 $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob)
49 $TESTTMP/hgcache/repos (glob)
49 $TESTTMP/hgcache/repos (glob)
50 $ hg gc
50 $ hg gc
51 finished: removed 1 of 2 files (0.00 GB to 0.00 GB)
51 finished: removed 1 of 2 files (0.00 GB to 0.00 GB)
52 $ find $CACHEDIR -type f | sort
52 $ find $CACHEDIR -type f | sort
53 $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob)
53 $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob)
54 $TESTTMP/hgcache/repos
54 $TESTTMP/hgcache/repos
55
55
56 # gc server cache
56 # gc server cache
57
57
58 $ find master/.hg/remotefilelogcache -type f | sort
58 $ find master/.hg/remotefilelogcache -type f | sort
59 master/.hg/remotefilelogcache/x/1406e74118627694268417491f018a4a883152f0 (glob)
59 master/.hg/remotefilelogcache/x/1406e74118627694268417491f018a4a883152f0 (glob)
60 master/.hg/remotefilelogcache/x/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob)
60 master/.hg/remotefilelogcache/x/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob)
61 $ hg gc master
61 $ hg gc master
62 finished: removed 0 of 1 files (0.00 GB to 0.00 GB)
62 finished: removed 0 of 1 files (0.00 GB to 0.00 GB)
63 $ find master/.hg/remotefilelogcache -type f | sort
63 $ find master/.hg/remotefilelogcache -type f | sort
64 master/.hg/remotefilelogcache/x/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob)
64 master/.hg/remotefilelogcache/x/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob)
65
65
66 # Test that GC keepset includes pullprefetch revset if it is configured
66 # Test that GC keepset includes pullprefetch revset if it is configured
67
67
68 $ cd shallow
68 $ cd shallow
69 $ cat >> .hg/hgrc <<EOF
69 $ cat >> .hg/hgrc <<EOF
70 > [remotefilelog]
70 > [remotefilelog]
71 > pullprefetch=all()
71 > pullprefetch=all()
72 > EOF
72 > EOF
73 $ hg prefetch
73 $ hg prefetch
74 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
74 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
75
75
76 $ cd ..
76 $ cd ..
77 $ hg gc
77 $ hg gc
78 finished: removed 0 of 2 files (0.00 GB to 0.00 GB)
78 finished: removed 0 of 2 files (0.00 GB to 0.00 GB)
79
79
80 # Ensure that there are 2 versions of the file in cache
80 # Ensure that there are 2 versions of the file in cache
81 $ find $CACHEDIR -type f | sort
81 $ find $CACHEDIR -type f | sort
82 $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 (glob)
82 $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 (glob)
83 $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob)
83 $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob)
84 $TESTTMP/hgcache/repos (glob)
84 $TESTTMP/hgcache/repos (glob)
85
85
86 # Test that if garbage collection on repack and repack on hg gc flags are set then incremental repack with garbage collector is run
86 # Test that if garbage collection on repack and repack on hg gc flags are set then incremental repack with garbage collector is run
87
87
88 $ hg gc --config remotefilelog.gcrepack=True --config remotefilelog.repackonhggc=True
88 $ hg gc --config remotefilelog.gcrepack=True --config remotefilelog.repackonhggc=True
89
89
90 # Ensure that loose files are repacked
90 # Ensure that loose files are repacked
91 $ find $CACHEDIR -type f | sort
91 $ find $CACHEDIR -type f | sort
92 $TESTTMP/hgcache/master/packs/320dab99b7e3f60512b97f347689625263d22cf5.dataidx
92 $TESTTMP/hgcache/master/packs/320dab99b7e3f60512b97f347689625263d22cf5.dataidx
93 $TESTTMP/hgcache/master/packs/320dab99b7e3f60512b97f347689625263d22cf5.datapack
93 $TESTTMP/hgcache/master/packs/320dab99b7e3f60512b97f347689625263d22cf5.datapack
94 $TESTTMP/hgcache/master/packs/837b83c1ef6485a336eb4421ac5973c0ec130fbb.histidx
94 $TESTTMP/hgcache/master/packs/837b83c1ef6485a336eb4421ac5973c0ec130fbb.histidx
95 $TESTTMP/hgcache/master/packs/837b83c1ef6485a336eb4421ac5973c0ec130fbb.histpack
95 $TESTTMP/hgcache/master/packs/837b83c1ef6485a336eb4421ac5973c0ec130fbb.histpack
96 $TESTTMP/hgcache/repos
96 $TESTTMP/hgcache/repos
97
97
98 # Test that warning is displayed when there are no valid repos in repofile
98 # Test that warning is displayed when there are no valid repos in repofile
99
99
100 $ cp $CACHEDIR/repos $CACHEDIR/repos.bak
100 $ cp $CACHEDIR/repos $CACHEDIR/repos.bak
101 $ echo " " > $CACHEDIR/repos
101 $ echo " " > $CACHEDIR/repos
102 $ hg gc
102 $ hg gc
103 warning: no valid repos in repofile
103 warning: no valid repos in repofile
104 $ mv $CACHEDIR/repos.bak $CACHEDIR/repos
104 $ mv $CACHEDIR/repos.bak $CACHEDIR/repos
105
105
106 # Test that warning is displayed when the repo path is malformed
106 # Test that warning is displayed when the repo path is malformed
107
107
108 $ printf "asdas\0das" >> $CACHEDIR/repos
108 $ printf "asdas\0das" >> $CACHEDIR/repos
109 #if py311
110 $ hg gc
111 finished: removed 0 of 4 files (0.00 GB to 0.00 GB)
112 #else
113 $ hg gc
109 $ hg gc
114 abort: invalid path asdas\x00da: .*(null|NULL).* (re)
110 abort: invalid path asdas\x00da: .*(null|NULL).* (re)
115 [255]
111 [255]
116 #endif
@@ -1,420 +1,427 b''
1 #require rhg
1 #require rhg
2
2
3 $ NO_FALLBACK="env RHG_ON_UNSUPPORTED=abort"
3 $ NO_FALLBACK="env RHG_ON_UNSUPPORTED=abort"
4
4
5 Unimplemented command
5 Unimplemented command
6 $ $NO_FALLBACK rhg unimplemented-command
6 $ $NO_FALLBACK rhg unimplemented-command
7 unsupported feature: error: The subcommand 'unimplemented-command' wasn't recognized
7 unsupported feature: error: The subcommand 'unimplemented-command' wasn't recognized
8
8
9 Usage: rhg [OPTIONS] <COMMAND>
9 Usage: rhg [OPTIONS] <COMMAND>
10
10
11 For more information try '--help'
11 For more information try '--help'
12
12
13 [252]
13 [252]
14 $ rhg unimplemented-command --config rhg.on-unsupported=abort-silent
14 $ rhg unimplemented-command --config rhg.on-unsupported=abort-silent
15 [252]
15 [252]
16
16
17 Finding root
17 Finding root
18 $ $NO_FALLBACK rhg root
18 $ $NO_FALLBACK rhg root
19 abort: no repository found in '$TESTTMP' (.hg not found)!
19 abort: no repository found in '$TESTTMP' (.hg not found)!
20 [255]
20 [255]
21
21
22 $ hg init repository
22 $ hg init repository
23 $ cd repository
23 $ cd repository
24 $ $NO_FALLBACK rhg root
24 $ $NO_FALLBACK rhg root
25 $TESTTMP/repository
25 $TESTTMP/repository
26
26
27 Reading and setting configuration
27 Reading and setting configuration
28 $ echo "[ui]" >> $HGRCPATH
28 $ echo "[ui]" >> $HGRCPATH
29 $ echo "username = user1" >> $HGRCPATH
29 $ echo "username = user1" >> $HGRCPATH
30 $ echo "[extensions]" >> $HGRCPATH
31 $ echo "sparse =" >> $HGRCPATH
30 $ $NO_FALLBACK rhg config ui.username
32 $ $NO_FALLBACK rhg config ui.username
31 user1
33 user1
32 $ echo "[ui]" >> .hg/hgrc
34 $ echo "[ui]" >> .hg/hgrc
33 $ echo "username = user2" >> .hg/hgrc
35 $ echo "username = user2" >> .hg/hgrc
34 $ $NO_FALLBACK rhg config ui.username
36 $ $NO_FALLBACK rhg config ui.username
35 user2
37 user2
36 $ $NO_FALLBACK rhg --config ui.username=user3 config ui.username
38 $ $NO_FALLBACK rhg --config ui.username=user3 config ui.username
37 user3
39 user3
38
40
39 Unwritable file descriptor
41 Unwritable file descriptor
40 $ $NO_FALLBACK rhg root > /dev/full
42 $ $NO_FALLBACK rhg root > /dev/full
41 abort: No space left on device (os error 28)
43 abort: No space left on device (os error 28)
42 [255]
44 [255]
43
45
44 Deleted repository
46 Deleted repository
45 $ rm -rf `pwd`
47 $ rm -rf `pwd`
46 $ $NO_FALLBACK rhg root
48 $ $NO_FALLBACK rhg root
47 abort: error getting current working directory: $ENOENT$
49 abort: error getting current working directory: $ENOENT$
48 [255]
50 [255]
49
51
50 Listing tracked files
52 Listing tracked files
51 $ cd $TESTTMP
53 $ cd $TESTTMP
52 $ hg init repository
54 $ hg init repository
53 $ cd repository
55 $ cd repository
54 $ for i in 1 2 3; do
56 $ for i in 1 2 3; do
55 > echo $i >> file$i
57 > echo $i >> file$i
56 > hg add file$i
58 > hg add file$i
57 > done
59 > done
58 > hg commit -m "commit $i" -q
60 > hg commit -m "commit $i" -q
59
61
60 Listing tracked files from root
62 Listing tracked files from root
61 $ $NO_FALLBACK rhg files
63 $ $NO_FALLBACK rhg files
62 file1
64 file1
63 file2
65 file2
64 file3
66 file3
65
67
66 Listing tracked files from subdirectory
68 Listing tracked files from subdirectory
67 $ mkdir -p path/to/directory
69 $ mkdir -p path/to/directory
68 $ cd path/to/directory
70 $ cd path/to/directory
69 $ $NO_FALLBACK rhg files
71 $ $NO_FALLBACK rhg files
70 ../../../file1
72 ../../../file1
71 ../../../file2
73 ../../../file2
72 ../../../file3
74 ../../../file3
73
75
74 $ $NO_FALLBACK rhg files --config ui.relative-paths=legacy
76 $ $NO_FALLBACK rhg files --config ui.relative-paths=legacy
75 ../../../file1
77 ../../../file1
76 ../../../file2
78 ../../../file2
77 ../../../file3
79 ../../../file3
78
80
79 $ $NO_FALLBACK rhg files --config ui.relative-paths=false
81 $ $NO_FALLBACK rhg files --config ui.relative-paths=false
80 file1
82 file1
81 file2
83 file2
82 file3
84 file3
83
85
84 $ $NO_FALLBACK rhg files --config ui.relative-paths=true
86 $ $NO_FALLBACK rhg files --config ui.relative-paths=true
85 ../../../file1
87 ../../../file1
86 ../../../file2
88 ../../../file2
87 ../../../file3
89 ../../../file3
88
90
89 Listing tracked files through broken pipe
91 Listing tracked files through broken pipe
90 $ $NO_FALLBACK rhg files | head -n 1
92 $ $NO_FALLBACK rhg files | head -n 1
91 ../../../file1
93 ../../../file1
92
94
93 Debuging data in inline index
95 Debuging data in inline index
94 $ cd $TESTTMP
96 $ cd $TESTTMP
95 $ rm -rf repository
97 $ rm -rf repository
96 $ hg init repository
98 $ hg init repository
97 $ cd repository
99 $ cd repository
98 $ for i in 1 2 3 4 5 6; do
100 $ for i in 1 2 3 4 5 6; do
99 > echo $i >> file-$i
101 > echo $i >> file-$i
100 > hg add file-$i
102 > hg add file-$i
101 > hg commit -m "Commit $i" -q
103 > hg commit -m "Commit $i" -q
102 > done
104 > done
103 $ $NO_FALLBACK rhg debugdata -c 2
105 $ $NO_FALLBACK rhg debugdata -c 2
104 8d0267cb034247ebfa5ee58ce59e22e57a492297
106 8d0267cb034247ebfa5ee58ce59e22e57a492297
105 test
107 test
106 0 0
108 0 0
107 file-3
109 file-3
108
110
109 Commit 3 (no-eol)
111 Commit 3 (no-eol)
110 $ $NO_FALLBACK rhg debugdata -m 2
112 $ $NO_FALLBACK rhg debugdata -m 2
111 file-1\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
113 file-1\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
112 file-2\x005d9299349fc01ddd25d0070d149b124d8f10411e (esc)
114 file-2\x005d9299349fc01ddd25d0070d149b124d8f10411e (esc)
113 file-3\x002661d26c649684b482d10f91960cc3db683c38b4 (esc)
115 file-3\x002661d26c649684b482d10f91960cc3db683c38b4 (esc)
114
116
115 Debuging with full node id
117 Debuging with full node id
116 $ $NO_FALLBACK rhg debugdata -c `hg log -r 0 -T '{node}'`
118 $ $NO_FALLBACK rhg debugdata -c `hg log -r 0 -T '{node}'`
117 d1d1c679d3053e8926061b6f45ca52009f011e3f
119 d1d1c679d3053e8926061b6f45ca52009f011e3f
118 test
120 test
119 0 0
121 0 0
120 file-1
122 file-1
121
123
122 Commit 1 (no-eol)
124 Commit 1 (no-eol)
123
125
124 Specifying revisions by changeset ID
126 Specifying revisions by changeset ID
125 $ hg log -T '{node}\n'
127 $ hg log -T '{node}\n'
126 c6ad58c44207b6ff8a4fbbca7045a5edaa7e908b
128 c6ad58c44207b6ff8a4fbbca7045a5edaa7e908b
127 d654274993d0149eecc3cc03214f598320211900
129 d654274993d0149eecc3cc03214f598320211900
128 f646af7e96481d3a5470b695cf30ad8e3ab6c575
130 f646af7e96481d3a5470b695cf30ad8e3ab6c575
129 cf8b83f14ead62b374b6e91a0e9303b85dfd9ed7
131 cf8b83f14ead62b374b6e91a0e9303b85dfd9ed7
130 91c6f6e73e39318534dc415ea4e8a09c99cd74d6
132 91c6f6e73e39318534dc415ea4e8a09c99cd74d6
131 6ae9681c6d30389694d8701faf24b583cf3ccafe
133 6ae9681c6d30389694d8701faf24b583cf3ccafe
132 $ $NO_FALLBACK rhg files -r cf8b83
134 $ $NO_FALLBACK rhg files -r cf8b83
133 file-1
135 file-1
134 file-2
136 file-2
135 file-3
137 file-3
136 $ $NO_FALLBACK rhg cat -r cf8b83 file-2
138 $ $NO_FALLBACK rhg cat -r cf8b83 file-2
137 2
139 2
138 $ $NO_FALLBACK rhg cat --rev cf8b83 file-2
140 $ $NO_FALLBACK rhg cat --rev cf8b83 file-2
139 2
141 2
140 $ $NO_FALLBACK rhg cat -r c file-2
142 $ $NO_FALLBACK rhg cat -r c file-2
141 abort: ambiguous revision identifier: c
143 abort: ambiguous revision identifier: c
142 [255]
144 [255]
143 $ $NO_FALLBACK rhg cat -r d file-2
145 $ $NO_FALLBACK rhg cat -r d file-2
144 2
146 2
145 $ $NO_FALLBACK rhg cat -r 0000 file-2
147 $ $NO_FALLBACK rhg cat -r 0000 file-2
146 file-2: no such file in rev 000000000000
148 file-2: no such file in rev 000000000000
147 [1]
149 [1]
148
150
149 Cat files
151 Cat files
150 $ cd $TESTTMP
152 $ cd $TESTTMP
151 $ rm -rf repository
153 $ rm -rf repository
152 $ hg init repository
154 $ hg init repository
153 $ cd repository
155 $ cd repository
154 $ echo "original content" > original
156 $ echo "original content" > original
155 $ hg add original
157 $ hg add original
156 $ hg commit -m "add original" original
158 $ hg commit -m "add original" original
157 Without `--rev`
159 Without `--rev`
158 $ $NO_FALLBACK rhg cat original
160 $ $NO_FALLBACK rhg cat original
159 original content
161 original content
160 With `--rev`
162 With `--rev`
161 $ $NO_FALLBACK rhg cat -r 0 original
163 $ $NO_FALLBACK rhg cat -r 0 original
162 original content
164 original content
163 Cat copied file should not display copy metadata
165 Cat copied file should not display copy metadata
164 $ hg copy original copy_of_original
166 $ hg copy original copy_of_original
165 $ hg commit -m "add copy of original"
167 $ hg commit -m "add copy of original"
166 $ $NO_FALLBACK rhg cat original
168 $ $NO_FALLBACK rhg cat original
167 original content
169 original content
168 $ $NO_FALLBACK rhg cat -r 1 copy_of_original
170 $ $NO_FALLBACK rhg cat -r 1 copy_of_original
169 original content
171 original content
170
172
171
173
172 Fallback to Python
174 Fallback to Python
173 $ $NO_FALLBACK rhg cat original --exclude="*.rs"
175 $ $NO_FALLBACK rhg cat original --exclude="*.rs"
174 unsupported feature: error: Found argument '--exclude' which wasn't expected, or isn't valid in this context
176 unsupported feature: error: Found argument '--exclude' which wasn't expected, or isn't valid in this context
175
177
176 If you tried to supply '--exclude' as a value rather than a flag, use '-- --exclude'
178 If you tried to supply '--exclude' as a value rather than a flag, use '-- --exclude'
177
179
178 Usage: rhg cat <FILE>...
180 Usage: rhg cat <FILE>...
179
181
180 For more information try '--help'
182 For more information try '--help'
181
183
182 [252]
184 [252]
183 $ rhg cat original --exclude="*.rs"
185 $ rhg cat original --exclude="*.rs"
184 original content
186 original content
185
187
186 Check that `fallback-immediately` overrides `$NO_FALLBACK`
188 Check that `fallback-immediately` overrides `$NO_FALLBACK`
187 $ $NO_FALLBACK rhg cat original --exclude="*.rs" --config rhg.fallback-immediately=1
189 $ $NO_FALLBACK rhg cat original --exclude="*.rs" --config rhg.fallback-immediately=1
188 original content
190 original content
189
191
190 $ (unset RHG_FALLBACK_EXECUTABLE; rhg cat original --exclude="*.rs")
192 $ (unset RHG_FALLBACK_EXECUTABLE; rhg cat original --exclude="*.rs")
191 abort: 'rhg.on-unsupported=fallback' without 'rhg.fallback-executable' set.
193 abort: 'rhg.on-unsupported=fallback' without 'rhg.fallback-executable' set.
192 [255]
194 [255]
193
195
194 $ (unset RHG_FALLBACK_EXECUTABLE; rhg cat original)
196 $ (unset RHG_FALLBACK_EXECUTABLE; rhg cat original)
195 original content
197 original content
196
198
197 $ rhg cat original --exclude="*.rs" --config rhg.fallback-executable=false
199 $ rhg cat original --exclude="*.rs" --config rhg.fallback-executable=false
198 [1]
200 [1]
199
201
200 $ rhg cat original --exclude="*.rs" --config rhg.fallback-executable=hg-non-existent
202 $ rhg cat original --exclude="*.rs" --config rhg.fallback-executable=hg-non-existent
201 abort: invalid fallback 'hg-non-existent': cannot find binary path
203 abort: invalid fallback 'hg-non-existent': cannot find binary path
202 [253]
204 [253]
203
205
204 $ rhg cat original --exclude="*.rs" --config rhg.fallback-executable=rhg
206 $ rhg cat original --exclude="*.rs" --config rhg.fallback-executable=rhg
205 Blocking recursive fallback. The 'rhg.fallback-executable = rhg' config points to `rhg` itself.
207 Blocking recursive fallback. The 'rhg.fallback-executable = rhg' config points to `rhg` itself.
206 unsupported feature: error: Found argument '--exclude' which wasn't expected, or isn't valid in this context
208 unsupported feature: error: Found argument '--exclude' which wasn't expected, or isn't valid in this context
207
209
208 If you tried to supply '--exclude' as a value rather than a flag, use '-- --exclude'
210 If you tried to supply '--exclude' as a value rather than a flag, use '-- --exclude'
209
211
210 Usage: rhg cat <FILE>...
212 Usage: rhg cat <FILE>...
211
213
212 For more information try '--help'
214 For more information try '--help'
213
215
214 [252]
216 [252]
215
217
216 Fallback with shell path segments
218 Fallback with shell path segments
217 $ $NO_FALLBACK rhg cat .
219 $ $NO_FALLBACK rhg cat .
218 unsupported feature: `..` or `.` path segment
220 unsupported feature: `..` or `.` path segment
219 [252]
221 [252]
220 $ $NO_FALLBACK rhg cat ..
222 $ $NO_FALLBACK rhg cat ..
221 unsupported feature: `..` or `.` path segment
223 unsupported feature: `..` or `.` path segment
222 [252]
224 [252]
223 $ $NO_FALLBACK rhg cat ../..
225 $ $NO_FALLBACK rhg cat ../..
224 unsupported feature: `..` or `.` path segment
226 unsupported feature: `..` or `.` path segment
225 [252]
227 [252]
226
228
227 Fallback with filesets
229 Fallback with filesets
228 $ $NO_FALLBACK rhg cat "set:c or b"
230 $ $NO_FALLBACK rhg cat "set:c or b"
229 unsupported feature: fileset
231 unsupported feature: fileset
230 [252]
232 [252]
231
233
232 Fallback with generic hooks
234 Fallback with generic hooks
233 $ $NO_FALLBACK rhg cat original --config hooks.pre-cat=something
235 $ $NO_FALLBACK rhg cat original --config hooks.pre-cat=something
234 unsupported feature: pre-cat hook defined
236 unsupported feature: pre-cat hook defined
235 [252]
237 [252]
236
238
237 $ $NO_FALLBACK rhg cat original --config hooks.post-cat=something
239 $ $NO_FALLBACK rhg cat original --config hooks.post-cat=something
238 unsupported feature: post-cat hook defined
240 unsupported feature: post-cat hook defined
239 [252]
241 [252]
240
242
241 $ $NO_FALLBACK rhg cat original --config hooks.fail-cat=something
243 $ $NO_FALLBACK rhg cat original --config hooks.fail-cat=something
242 unsupported feature: fail-cat hook defined
244 unsupported feature: fail-cat hook defined
243 [252]
245 [252]
244
246
245 Fallback with [defaults]
247 Fallback with [defaults]
246 $ $NO_FALLBACK rhg cat original --config "defaults.cat=-r null"
248 $ $NO_FALLBACK rhg cat original --config "defaults.cat=-r null"
247 unsupported feature: `defaults` config set
249 unsupported feature: `defaults` config set
248 [252]
250 [252]
249
251
250
252
251 Requirements
253 Requirements
252 $ $NO_FALLBACK rhg debugrequirements
254 $ $NO_FALLBACK rhg debugrequirements
253 dotencode
255 dotencode
254 fncache
256 fncache
255 generaldelta
257 generaldelta
256 persistent-nodemap
258 persistent-nodemap
257 revlog-compression-zstd (zstd !)
259 revlog-compression-zstd (zstd !)
258 revlogv1
260 revlogv1
259 share-safe
261 share-safe
260 sparserevlog
262 sparserevlog
261 store
263 store
262
264
263 $ echo indoor-pool >> .hg/requires
265 $ echo indoor-pool >> .hg/requires
264 $ $NO_FALLBACK rhg files
266 $ $NO_FALLBACK rhg files
265 unsupported feature: repository requires feature unknown to this Mercurial: indoor-pool
267 unsupported feature: repository requires feature unknown to this Mercurial: indoor-pool
266 [252]
268 [252]
267
269
268 $ $NO_FALLBACK rhg cat -r 1 copy_of_original
270 $ $NO_FALLBACK rhg cat -r 1 copy_of_original
269 unsupported feature: repository requires feature unknown to this Mercurial: indoor-pool
271 unsupported feature: repository requires feature unknown to this Mercurial: indoor-pool
270 [252]
272 [252]
271
273
272 $ $NO_FALLBACK rhg debugrequirements
274 $ $NO_FALLBACK rhg debugrequirements
273 unsupported feature: repository requires feature unknown to this Mercurial: indoor-pool
275 unsupported feature: repository requires feature unknown to this Mercurial: indoor-pool
274 [252]
276 [252]
275
277
276 $ echo -e '\xFF' >> .hg/requires
278 $ echo -e '\xFF' >> .hg/requires
277 $ $NO_FALLBACK rhg debugrequirements
279 $ $NO_FALLBACK rhg debugrequirements
278 abort: parse error in 'requires' file
280 abort: parse error in 'requires' file
279 [255]
281 [255]
280
282
281 Persistent nodemap
283 Persistent nodemap
282 $ cd $TESTTMP
284 $ cd $TESTTMP
283 $ rm -rf repository
285 $ rm -rf repository
284 $ hg --config format.use-persistent-nodemap=no init repository
286 $ hg --config format.use-persistent-nodemap=no init repository
285 $ cd repository
287 $ cd repository
286 $ $NO_FALLBACK rhg debugrequirements | grep nodemap
288 $ $NO_FALLBACK rhg debugrequirements | grep nodemap
287 [1]
289 [1]
288 $ hg debugbuilddag .+5000 --overwritten-file --config "storage.revlog.nodemap.mode=warn"
290 $ hg debugbuilddag .+5000 --overwritten-file --config "storage.revlog.nodemap.mode=warn"
289 $ hg id -r tip
291 $ hg id -r tip
290 c3ae8dec9fad tip
292 c3ae8dec9fad tip
291 $ ls .hg/store/00changelog*
293 $ ls .hg/store/00changelog*
292 .hg/store/00changelog.d
294 .hg/store/00changelog.d
293 .hg/store/00changelog.i
295 .hg/store/00changelog.i
294 $ $NO_FALLBACK rhg files -r c3ae8dec9fad
296 $ $NO_FALLBACK rhg files -r c3ae8dec9fad
295 of
297 of
296
298
297 $ cd $TESTTMP
299 $ cd $TESTTMP
298 $ rm -rf repository
300 $ rm -rf repository
299 $ hg --config format.use-persistent-nodemap=True init repository
301 $ hg --config format.use-persistent-nodemap=True init repository
300 $ cd repository
302 $ cd repository
301 $ $NO_FALLBACK rhg debugrequirements | grep nodemap
303 $ $NO_FALLBACK rhg debugrequirements | grep nodemap
302 persistent-nodemap
304 persistent-nodemap
303 $ hg debugbuilddag .+5000 --overwritten-file --config "storage.revlog.nodemap.mode=warn"
305 $ hg debugbuilddag .+5000 --overwritten-file --config "storage.revlog.nodemap.mode=warn"
304 $ hg id -r tip
306 $ hg id -r tip
305 c3ae8dec9fad tip
307 c3ae8dec9fad tip
306 $ ls .hg/store/00changelog*
308 $ ls .hg/store/00changelog*
307 .hg/store/00changelog-*.nd (glob)
309 .hg/store/00changelog-*.nd (glob)
308 .hg/store/00changelog.d
310 .hg/store/00changelog.d
309 .hg/store/00changelog.i
311 .hg/store/00changelog.i
310 .hg/store/00changelog.n
312 .hg/store/00changelog.n
311
313
314 Rhg status on a sparse repo with nodemap (this specific combination used to crash in 6.5.2)
315
316 $ hg debugsparse -X excluded-dir
317 $ $NO_FALLBACK rhg status
318
312 Specifying revisions by changeset ID
319 Specifying revisions by changeset ID
313 $ $NO_FALLBACK rhg files -r c3ae8dec9fad
320 $ $NO_FALLBACK rhg files -r c3ae8dec9fad
314 of
321 of
315 $ $NO_FALLBACK rhg cat -r c3ae8dec9fad of
322 $ $NO_FALLBACK rhg cat -r c3ae8dec9fad of
316 r5000
323 r5000
317
324
318 Crate a shared repository
325 Crate a shared repository
319
326
320 $ echo "[extensions]" >> $HGRCPATH
327 $ echo "[extensions]" >> $HGRCPATH
321 $ echo "share = " >> $HGRCPATH
328 $ echo "share = " >> $HGRCPATH
322
329
323 $ cd $TESTTMP
330 $ cd $TESTTMP
324 $ hg init repo1
331 $ hg init repo1
325 $ echo a > repo1/a
332 $ echo a > repo1/a
326 $ hg -R repo1 commit -A -m'init'
333 $ hg -R repo1 commit -A -m'init'
327 adding a
334 adding a
328
335
329 $ hg share repo1 repo2
336 $ hg share repo1 repo2
330 updating working directory
337 updating working directory
331 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
338 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
332
339
333 And check that basic rhg commands work with sharing
340 And check that basic rhg commands work with sharing
334
341
335 $ $NO_FALLBACK rhg files -R repo2
342 $ $NO_FALLBACK rhg files -R repo2
336 repo2/a
343 repo2/a
337 $ $NO_FALLBACK rhg -R repo2 cat -r 0 repo2/a
344 $ $NO_FALLBACK rhg -R repo2 cat -r 0 repo2/a
338 a
345 a
339
346
340 Same with relative sharing
347 Same with relative sharing
341
348
342 $ hg share repo2 repo3 --relative
349 $ hg share repo2 repo3 --relative
343 updating working directory
350 updating working directory
344 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
351 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
345
352
346 $ $NO_FALLBACK rhg files -R repo3
353 $ $NO_FALLBACK rhg files -R repo3
347 repo3/a
354 repo3/a
348 $ $NO_FALLBACK rhg -R repo3 cat -r 0 repo3/a
355 $ $NO_FALLBACK rhg -R repo3 cat -r 0 repo3/a
349 a
356 a
350
357
351 Same with share-safe
358 Same with share-safe
352
359
353 $ echo "[format]" >> $HGRCPATH
360 $ echo "[format]" >> $HGRCPATH
354 $ echo "use-share-safe = True" >> $HGRCPATH
361 $ echo "use-share-safe = True" >> $HGRCPATH
355
362
356 $ cd $TESTTMP
363 $ cd $TESTTMP
357 $ hg init repo4
364 $ hg init repo4
358 $ cd repo4
365 $ cd repo4
359 $ echo a > a
366 $ echo a > a
360 $ hg commit -A -m'init'
367 $ hg commit -A -m'init'
361 adding a
368 adding a
362
369
363 $ cd ..
370 $ cd ..
364 $ hg share repo4 repo5
371 $ hg share repo4 repo5
365 updating working directory
372 updating working directory
366 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
373 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
367
374
368 And check that basic rhg commands work with sharing
375 And check that basic rhg commands work with sharing
369
376
370 $ cd repo5
377 $ cd repo5
371 $ $NO_FALLBACK rhg files
378 $ $NO_FALLBACK rhg files
372 a
379 a
373 $ $NO_FALLBACK rhg cat -r 0 a
380 $ $NO_FALLBACK rhg cat -r 0 a
374 a
381 a
375
382
376 The blackbox extension is supported
383 The blackbox extension is supported
377
384
378 $ echo "[extensions]" >> $HGRCPATH
385 $ echo "[extensions]" >> $HGRCPATH
379 $ echo "blackbox =" >> $HGRCPATH
386 $ echo "blackbox =" >> $HGRCPATH
380 $ echo "[blackbox]" >> $HGRCPATH
387 $ echo "[blackbox]" >> $HGRCPATH
381 $ echo "maxsize = 1" >> $HGRCPATH
388 $ echo "maxsize = 1" >> $HGRCPATH
382 $ $NO_FALLBACK rhg files > /dev/null
389 $ $NO_FALLBACK rhg files > /dev/null
383 $ cat .hg/blackbox.log
390 $ cat .hg/blackbox.log
384 ????-??-?? ??:??:??.??? * @d3873e73d99ef67873dac33fbcc66268d5d2b6f4 (*)> (rust) files exited 0 after * seconds (glob)
391 ????-??-?? ??:??:??.??? * @d3873e73d99ef67873dac33fbcc66268d5d2b6f4 (*)> (rust) files exited 0 after * seconds (glob)
385 $ cat .hg/blackbox.log.1
392 $ cat .hg/blackbox.log.1
386 ????-??-?? ??:??:??.??? * @d3873e73d99ef67873dac33fbcc66268d5d2b6f4 (*)> (rust) files (glob)
393 ????-??-?? ??:??:??.??? * @d3873e73d99ef67873dac33fbcc66268d5d2b6f4 (*)> (rust) files (glob)
387
394
388 Subrepos are not supported
395 Subrepos are not supported
389
396
390 $ touch .hgsub
397 $ touch .hgsub
391 $ $NO_FALLBACK rhg files
398 $ $NO_FALLBACK rhg files
392 unsupported feature: subrepos (.hgsub is present)
399 unsupported feature: subrepos (.hgsub is present)
393 [252]
400 [252]
394 $ rhg files
401 $ rhg files
395 a
402 a
396 $ rm .hgsub
403 $ rm .hgsub
397
404
398 The `:required` extension suboptions are correctly ignored
405 The `:required` extension suboptions are correctly ignored
399
406
400 $ echo "[extensions]" >> $HGRCPATH
407 $ echo "[extensions]" >> $HGRCPATH
401 $ echo "blackbox:required = yes" >> $HGRCPATH
408 $ echo "blackbox:required = yes" >> $HGRCPATH
402 $ rhg files
409 $ rhg files
403 a
410 a
404 $ echo "*:required = yes" >> $HGRCPATH
411 $ echo "*:required = yes" >> $HGRCPATH
405 $ rhg files
412 $ rhg files
406 a
413 a
407
414
408 We can ignore all extensions at once
415 We can ignore all extensions at once
409
416
410 $ echo "[extensions]" >> $HGRCPATH
417 $ echo "[extensions]" >> $HGRCPATH
411 $ echo "thisextensionbetternotexist=" >> $HGRCPATH
418 $ echo "thisextensionbetternotexist=" >> $HGRCPATH
412 $ echo "thisextensionbetternotexisteither=" >> $HGRCPATH
419 $ echo "thisextensionbetternotexisteither=" >> $HGRCPATH
413 $ $NO_FALLBACK rhg files
420 $ $NO_FALLBACK rhg files
414 unsupported feature: extensions: thisextensionbetternotexist, thisextensionbetternotexisteither (consider adding them to 'rhg.ignored-extensions' config)
421 unsupported feature: extensions: thisextensionbetternotexist, thisextensionbetternotexisteither (consider adding them to 'rhg.ignored-extensions' config)
415 [252]
422 [252]
416
423
417 $ echo "[rhg]" >> $HGRCPATH
424 $ echo "[rhg]" >> $HGRCPATH
418 $ echo "ignored-extensions=*" >> $HGRCPATH
425 $ echo "ignored-extensions=*" >> $HGRCPATH
419 $ $NO_FALLBACK rhg files
426 $ $NO_FALLBACK rhg files
420 a
427 a
@@ -1,655 +1,655 b''
1 setup
1 setup
2
2
3 $ cat >> $HGRCPATH <<EOF
3 $ cat >> $HGRCPATH <<EOF
4 > [extensions]
4 > [extensions]
5 > share =
5 > share =
6 > [format]
6 > [format]
7 > use-share-safe = True
7 > use-share-safe = True
8 > [storage]
8 > [storage]
9 > revlog.persistent-nodemap.slow-path=allow
9 > revlog.persistent-nodemap.slow-path=allow
10 > # enforce zlib to ensure we can upgrade to zstd later
10 > # enforce zlib to ensure we can upgrade to zstd later
11 > [format]
11 > [format]
12 > revlog-compression=zlib
12 > revlog-compression=zlib
13 > # we want to be able to enable it later
13 > # we want to be able to enable it later
14 > use-persistent-nodemap=no
14 > use-persistent-nodemap=no
15 > EOF
15 > EOF
16
16
17 prepare source repo
17 prepare source repo
18
18
19 $ hg init source
19 $ hg init source
20 $ cd source
20 $ cd source
21 $ cat .hg/requires
21 $ cat .hg/requires
22 dirstate-v2 (dirstate-v2 !)
22 dirstate-v2 (dirstate-v2 !)
23 share-safe
23 share-safe
24 $ cat .hg/store/requires
24 $ cat .hg/store/requires
25 dotencode
25 dotencode
26 fncache
26 fncache
27 generaldelta
27 generaldelta
28 revlogv1
28 revlogv1
29 sparserevlog
29 sparserevlog
30 store
30 store
31 $ hg debugrequirements
31 $ hg debugrequirements
32 dotencode
32 dotencode
33 dirstate-v2 (dirstate-v2 !)
33 dirstate-v2 (dirstate-v2 !)
34 fncache
34 fncache
35 generaldelta
35 generaldelta
36 revlogv1
36 revlogv1
37 share-safe
37 share-safe
38 sparserevlog
38 sparserevlog
39 store
39 store
40
40
41 $ echo a > a
41 $ echo a > a
42 $ hg ci -Aqm "added a"
42 $ hg ci -Aqm "added a"
43 $ echo b > b
43 $ echo b > b
44 $ hg ci -Aqm "added b"
44 $ hg ci -Aqm "added b"
45
45
46 $ HGEDITOR=cat hg config --shared
46 $ HGEDITOR=cat hg config --shared
47 abort: repository is not shared; can't use --shared
47 abort: repository is not shared; can't use --shared
48 [10]
48 [10]
49 $ cd ..
49 $ cd ..
50
50
51 Create a shared repo and check the requirements are shared and read correctly
51 Create a shared repo and check the requirements are shared and read correctly
52 $ hg share source shared1
52 $ hg share source shared1
53 updating working directory
53 updating working directory
54 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
54 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
55 $ cd shared1
55 $ cd shared1
56 $ cat .hg/requires
56 $ cat .hg/requires
57 dirstate-v2 (dirstate-v2 !)
57 dirstate-v2 (dirstate-v2 !)
58 share-safe
58 share-safe
59 shared
59 shared
60
60
61 $ hg debugrequirements -R ../source
61 $ hg debugrequirements -R ../source
62 dotencode
62 dotencode
63 dirstate-v2 (dirstate-v2 !)
63 dirstate-v2 (dirstate-v2 !)
64 fncache
64 fncache
65 generaldelta
65 generaldelta
66 revlogv1
66 revlogv1
67 share-safe
67 share-safe
68 sparserevlog
68 sparserevlog
69 store
69 store
70
70
71 $ hg debugrequirements
71 $ hg debugrequirements
72 dotencode
72 dotencode
73 dirstate-v2 (dirstate-v2 !)
73 dirstate-v2 (dirstate-v2 !)
74 fncache
74 fncache
75 generaldelta
75 generaldelta
76 revlogv1
76 revlogv1
77 share-safe
77 share-safe
78 shared
78 shared
79 sparserevlog
79 sparserevlog
80 store
80 store
81
81
82 $ echo c > c
82 $ echo c > c
83 $ hg ci -Aqm "added c"
83 $ hg ci -Aqm "added c"
84
84
85 Check that config of the source repository is also loaded
85 Check that config of the source repository is also loaded
86
86
87 $ hg showconfig ui.curses
87 $ hg showconfig ui.curses
88 [1]
88 [1]
89
89
90 $ echo "[ui]" >> ../source/.hg/hgrc
90 $ echo "[ui]" >> ../source/.hg/hgrc
91 $ echo "curses=true" >> ../source/.hg/hgrc
91 $ echo "curses=true" >> ../source/.hg/hgrc
92
92
93 $ hg showconfig ui.curses
93 $ hg showconfig ui.curses
94 true
94 true
95
95
96 Test that extensions of source repository are also loaded
96 Test that extensions of source repository are also loaded
97
97
98 $ hg debugextensions
98 $ hg debugextensions
99 share
99 share
100 $ hg extdiff -p echo
100 $ hg extdiff -p echo
101 hg: unknown command 'extdiff'
101 hg: unknown command 'extdiff'
102 'extdiff' is provided by the following extension:
102 'extdiff' is provided by the following extension:
103
103
104 extdiff command to allow external programs to compare revisions
104 extdiff command to allow external programs to compare revisions
105
105
106 (use 'hg help extensions' for information on enabling extensions)
106 (use 'hg help extensions' for information on enabling extensions)
107 [10]
107 [10]
108
108
109 $ echo "[extensions]" >> ../source/.hg/hgrc
109 $ echo "[extensions]" >> ../source/.hg/hgrc
110 $ echo "extdiff=" >> ../source/.hg/hgrc
110 $ echo "extdiff=" >> ../source/.hg/hgrc
111
111
112 $ hg debugextensions -R ../source
112 $ hg debugextensions -R ../source
113 extdiff
113 extdiff
114 share
114 share
115 $ hg extdiff -R ../source -p echo
115 $ hg extdiff -R ../source -p echo
116
116
117 BROKEN: the command below will not work if config of shared source is not loaded
117 BROKEN: the command below will not work if config of shared source is not loaded
118 on dispatch but debugextensions says that extension
118 on dispatch but debugextensions says that extension
119 is loaded
119 is loaded
120 $ hg debugextensions
120 $ hg debugextensions
121 extdiff
121 extdiff
122 share
122 share
123
123
124 $ hg extdiff -p echo
124 $ hg extdiff -p echo
125
125
126 However, local .hg/hgrc should override the config set by share source
126 However, local .hg/hgrc should override the config set by share source
127
127
128 $ echo "[ui]" >> .hg/hgrc
128 $ echo "[ui]" >> .hg/hgrc
129 $ echo "curses=false" >> .hg/hgrc
129 $ echo "curses=false" >> .hg/hgrc
130
130
131 $ hg showconfig ui.curses
131 $ hg showconfig ui.curses
132 false
132 false
133
133
134 $ HGEDITOR=cat hg config --shared
134 $ HGEDITOR=cat hg config --shared
135 [ui]
135 [ui]
136 curses=true
136 curses=true
137 [extensions]
137 [extensions]
138 extdiff=
138 extdiff=
139
139
140 $ HGEDITOR=cat hg config --local
140 $ HGEDITOR=cat hg config --local
141 [ui]
141 [ui]
142 curses=false
142 curses=false
143
143
144 Testing that hooks set in source repository also runs in shared repo
144 Testing that hooks set in source repository also runs in shared repo
145
145
146 $ cd ../source
146 $ cd ../source
147 $ cat <<EOF >> .hg/hgrc
147 $ cat <<EOF >> .hg/hgrc
148 > [extensions]
148 > [extensions]
149 > hooklib=
149 > hooklib=
150 > [hooks]
150 > [hooks]
151 > pretxnchangegroup.reject_merge_commits = \
151 > pretxnchangegroup.reject_merge_commits = \
152 > python:hgext.hooklib.reject_merge_commits.hook
152 > python:hgext.hooklib.reject_merge_commits.hook
153 > EOF
153 > EOF
154
154
155 $ cd ..
155 $ cd ..
156 $ hg clone source cloned
156 $ hg clone source cloned
157 updating to branch default
157 updating to branch default
158 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
158 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
159 $ cd cloned
159 $ cd cloned
160 $ hg up 0
160 $ hg up 0
161 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
161 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
162 $ echo bar > bar
162 $ echo bar > bar
163 $ hg ci -Aqm "added bar"
163 $ hg ci -Aqm "added bar"
164 $ hg merge
164 $ hg merge
165 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
165 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
166 (branch merge, don't forget to commit)
166 (branch merge, don't forget to commit)
167 $ hg ci -m "merge commit"
167 $ hg ci -m "merge commit"
168
168
169 $ hg push ../source
169 $ hg push ../source
170 pushing to ../source
170 pushing to ../source
171 searching for changes
171 searching for changes
172 adding changesets
172 adding changesets
173 adding manifests
173 adding manifests
174 adding file changes
174 adding file changes
175 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
175 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
176 transaction abort!
176 transaction abort!
177 rollback completed
177 rollback completed
178 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
178 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
179 [255]
179 [255]
180
180
181 $ hg push ../shared1
181 $ hg push ../shared1
182 pushing to ../shared1
182 pushing to ../shared1
183 searching for changes
183 searching for changes
184 adding changesets
184 adding changesets
185 adding manifests
185 adding manifests
186 adding file changes
186 adding file changes
187 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
187 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
188 transaction abort!
188 transaction abort!
189 rollback completed
189 rollback completed
190 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
190 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
191 [255]
191 [255]
192
192
193 Test that if share source config is untrusted, we dont read it
193 Test that if share source config is untrusted, we dont read it
194
194
195 $ cd ../shared1
195 $ cd ../shared1
196
196
197 $ cat << EOF > $TESTTMP/untrusted.py
197 $ cat << EOF > $TESTTMP/untrusted.py
198 > from mercurial import scmutil, util
198 > from mercurial import scmutil, util
199 > def uisetup(ui):
199 > def uisetup(ui):
200 > class untrustedui(ui.__class__):
200 > class untrustedui(ui.__class__):
201 > def _trusted(self, fp, f):
201 > def _trusted(self, fp, f):
202 > if util.normpath(fp.name).endswith(b'source/.hg/hgrc'):
202 > if util.normpath(fp.name).endswith(b'source/.hg/hgrc'):
203 > return False
203 > return False
204 > return super(untrustedui, self)._trusted(fp, f)
204 > return super(untrustedui, self)._trusted(fp, f)
205 > ui.__class__ = untrustedui
205 > ui.__class__ = untrustedui
206 > EOF
206 > EOF
207
207
208 $ hg showconfig hooks
208 $ hg showconfig hooks
209 hooks.pretxnchangegroup.reject_merge_commits=python:hgext.hooklib.reject_merge_commits.hook
209 hooks.pretxnchangegroup.reject_merge_commits=python:hgext.hooklib.reject_merge_commits.hook
210
210
211 $ hg showconfig hooks --config extensions.untrusted=$TESTTMP/untrusted.py
211 $ hg showconfig hooks --config extensions.untrusted=$TESTTMP/untrusted.py
212 [1]
212 [1]
213
213
214 Update the source repository format and check that shared repo works
214 Update the source repository format and check that shared repo works
215
215
216 $ cd ../source
216 $ cd ../source
217
217
218 Disable zstd related tests because its not present on pure version
218 Disable zstd related tests because its not present on pure version
219 #if zstd
219 #if zstd
220 $ echo "[format]" >> .hg/hgrc
220 $ echo "[format]" >> .hg/hgrc
221 $ echo "revlog-compression=zstd" >> .hg/hgrc
221 $ echo "revlog-compression=zstd" >> .hg/hgrc
222
222
223 $ hg debugupgraderepo --run -q
223 $ hg debugupgraderepo --run -q
224 upgrade will perform the following actions:
224 upgrade will perform the following actions:
225
225
226 requirements
226 requirements
227 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-dirstate-v2 !)
227 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-dirstate-v2 !)
228 preserved: dotencode, use-dirstate-v2, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (dirstate-v2 !)
228 preserved: dotencode, use-dirstate-v2, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (dirstate-v2 !)
229 added: revlog-compression-zstd
229 added: revlog-compression-zstd
230
230
231 processed revlogs:
231 processed revlogs:
232 - all-filelogs
232 - all-filelogs
233 - changelog
233 - changelog
234 - manifest
234 - manifest
235
235
236 $ hg log -r .
236 $ hg log -r .
237 changeset: 1:5f6d8a4bf34a
237 changeset: 1:5f6d8a4bf34a
238 user: test
238 user: test
239 date: Thu Jan 01 00:00:00 1970 +0000
239 date: Thu Jan 01 00:00:00 1970 +0000
240 summary: added b
240 summary: added b
241
241
242 #endif
242 #endif
243 $ echo "[format]" >> .hg/hgrc
243 $ echo "[format]" >> .hg/hgrc
244 $ echo "use-persistent-nodemap=True" >> .hg/hgrc
244 $ echo "use-persistent-nodemap=True" >> .hg/hgrc
245
245
246 $ hg debugupgraderepo --run -q -R ../shared1
246 $ hg debugupgraderepo --run -q -R ../shared1
247 abort: cannot use these actions on a share repository: persistent-nodemap
247 abort: cannot use these actions on a share repository: persistent-nodemap
248 (upgrade the main repository directly)
248 (upgrade the main repository directly)
249 [255]
249 [255]
250
250
251 $ hg debugupgraderepo --run -q
251 $ hg debugupgraderepo --run -q
252 upgrade will perform the following actions:
252 upgrade will perform the following actions:
253
253
254 requirements
254 requirements
255 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-zstd no-dirstate-v2 !)
255 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-zstd no-dirstate-v2 !)
256 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd no-dirstate-v2 !)
256 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd no-dirstate-v2 !)
257 preserved: dotencode, use-dirstate-v2, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-zstd dirstate-v2 !)
257 preserved: dotencode, use-dirstate-v2, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-zstd dirstate-v2 !)
258 preserved: dotencode, use-dirstate-v2, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd dirstate-v2 !)
258 preserved: dotencode, use-dirstate-v2, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd dirstate-v2 !)
259 added: persistent-nodemap
259 added: persistent-nodemap
260
260
261 processed revlogs:
261 processed revlogs:
262 - all-filelogs
262 - all-filelogs
263 - changelog
263 - changelog
264 - manifest
264 - manifest
265
265
266 $ hg log -r .
266 $ hg log -r .
267 changeset: 1:5f6d8a4bf34a
267 changeset: 1:5f6d8a4bf34a
268 user: test
268 user: test
269 date: Thu Jan 01 00:00:00 1970 +0000
269 date: Thu Jan 01 00:00:00 1970 +0000
270 summary: added b
270 summary: added b
271
271
272
272
273 Shared one should work
273 Shared one should work
274 $ cd ../shared1
274 $ cd ../shared1
275 $ hg log -r .
275 $ hg log -r .
276 changeset: 2:155349b645be
276 changeset: 2:155349b645be
277 tag: tip
277 tag: tip
278 user: test
278 user: test
279 date: Thu Jan 01 00:00:00 1970 +0000
279 date: Thu Jan 01 00:00:00 1970 +0000
280 summary: added c
280 summary: added c
281
281
282
282
283 Testing that nonsharedrc is loaded for source and not shared
283 Testing that nonsharedrc is loaded for source and not shared
284
284
285 $ cd ../source
285 $ cd ../source
286 $ touch .hg/hgrc-not-shared
286 $ touch .hg/hgrc-not-shared
287 $ echo "[ui]" >> .hg/hgrc-not-shared
287 $ echo "[ui]" >> .hg/hgrc-not-shared
288 $ echo "traceback=true" >> .hg/hgrc-not-shared
288 $ echo "traceback=true" >> .hg/hgrc-not-shared
289
289
290 $ hg showconfig ui.traceback
290 $ hg showconfig ui.traceback
291 true
291 true
292
292
293 $ HGEDITOR=cat hg config --non-shared
293 $ HGEDITOR=cat hg config --non-shared
294 [ui]
294 [ui]
295 traceback=true
295 traceback=true
296
296
297 $ cd ../shared1
297 $ cd ../shared1
298 $ hg showconfig ui.traceback
298 $ hg showconfig ui.traceback
299 [1]
299 [1]
300
300
301 Unsharing works
301 Unsharing works
302
302
303 $ hg unshare
303 $ hg unshare
304
304
305 Test that source config is added to the shared one after unshare, and the config
305 Test that source config is added to the shared one after unshare, and the config
306 of current repo is still respected over the config which came from source config
306 of current repo is still respected over the config which came from source config
307 $ cd ../cloned
307 $ cd ../cloned
308 $ hg push ../shared1
308 $ hg push ../shared1
309 pushing to ../shared1
309 pushing to ../shared1
310 searching for changes
310 searching for changes
311 adding changesets
311 adding changesets
312 adding manifests
312 adding manifests
313 adding file changes
313 adding file changes
314 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
314 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
315 transaction abort!
315 transaction abort!
316 rollback completed
316 rollback completed
317 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
317 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
318 [255]
318 [255]
319 $ hg showconfig ui.curses -R ../shared1
319 $ hg showconfig ui.curses -R ../shared1
320 false
320 false
321
321
322 $ cd ../
322 $ cd ../
323
323
324 Test that upgrading using debugupgraderepo works
324 Test that upgrading using debugupgraderepo works
325 =================================================
325 =================================================
326
326
327 $ hg init non-share-safe --config format.use-share-safe=false
327 $ hg init non-share-safe --config format.use-share-safe=false
328 $ cd non-share-safe
328 $ cd non-share-safe
329 $ hg debugrequirements
329 $ hg debugrequirements
330 dotencode
330 dotencode
331 dirstate-v2 (dirstate-v2 !)
331 dirstate-v2 (dirstate-v2 !)
332 fncache
332 fncache
333 generaldelta
333 generaldelta
334 revlogv1
334 revlogv1
335 sparserevlog
335 sparserevlog
336 store
336 store
337 $ echo foo > foo
337 $ echo foo > foo
338 $ hg ci -Aqm 'added foo'
338 $ hg ci -Aqm 'added foo'
339 $ echo bar > bar
339 $ echo bar > bar
340 $ hg ci -Aqm 'added bar'
340 $ hg ci -Aqm 'added bar'
341
341
342 Create a share before upgrading
342 Create a share before upgrading
343
343
344 $ cd ..
344 $ cd ..
345 $ hg share non-share-safe nss-share
345 $ hg share non-share-safe nss-share
346 updating working directory
346 updating working directory
347 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
347 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
348 $ hg debugrequirements -R nss-share
348 $ hg debugrequirements -R nss-share
349 dotencode
349 dotencode
350 dirstate-v2 (dirstate-v2 !)
350 dirstate-v2 (dirstate-v2 !)
351 fncache
351 fncache
352 generaldelta
352 generaldelta
353 revlogv1
353 revlogv1
354 shared
354 shared
355 sparserevlog
355 sparserevlog
356 store
356 store
357 $ cd non-share-safe
357 $ cd non-share-safe
358
358
359 Upgrade
359 Upgrade
360
360
361 $ hg debugupgraderepo -q
361 $ hg debugupgraderepo -q
362 requirements
362 requirements
363 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-dirstate-v2 !)
363 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-dirstate-v2 !)
364 preserved: dotencode, use-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
364 preserved: dotencode, use-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
365 added: share-safe
365 added: share-safe
366
366
367 no revlogs to process
367 no revlogs to process
368
368
369 $ hg debugupgraderepo --run
369 $ hg debugupgraderepo --run
370 upgrade will perform the following actions:
370 upgrade will perform the following actions:
371
371
372 requirements
372 requirements
373 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-dirstate-v2 !)
373 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-dirstate-v2 !)
374 preserved: dotencode, use-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
374 preserved: dotencode, use-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
375 added: share-safe
375 added: share-safe
376
376
377 share-safe
377 share-safe
378 Upgrades a repository to share-safe format so that future shares of this repository share its requirements and configs.
378 Upgrades a repository to share-safe format so that future shares of this repository share its requirements and configs.
379
379
380 no revlogs to process
380 no revlogs to process
381
381
382 beginning upgrade...
382 beginning upgrade...
383 repository locked and read-only
383 repository locked and read-only
384 creating temporary repository to stage upgraded data: $TESTTMP/non-share-safe/.hg/upgrade.* (glob)
384 creating temporary repository to stage upgraded data: $TESTTMP/non-share-safe/.hg/upgrade.* (glob)
385 (it is safe to interrupt this process any time before data migration completes)
385 (it is safe to interrupt this process any time before data migration completes)
386 upgrading repository requirements
386 upgrading repository requirements
387 removing temporary repository $TESTTMP/non-share-safe/.hg/upgrade.* (glob)
387 removing temporary repository $TESTTMP/non-share-safe/.hg/upgrade.* (glob)
388 repository upgraded to share safe mode, existing shares will still work in old non-safe mode. Re-share existing shares to use them in safe mode New shares will be created in safe mode.
388 repository upgraded to share safe mode, existing shares will still work in old non-safe mode. Re-share existing shares to use them in safe mode New shares will be created in safe mode.
389
389
390 $ hg debugrequirements
390 $ hg debugrequirements
391 dotencode
391 dotencode
392 dirstate-v2 (dirstate-v2 !)
392 dirstate-v2 (dirstate-v2 !)
393 fncache
393 fncache
394 generaldelta
394 generaldelta
395 revlogv1
395 revlogv1
396 share-safe
396 share-safe
397 sparserevlog
397 sparserevlog
398 store
398 store
399
399
400 $ cat .hg/requires
400 $ cat .hg/requires
401 dirstate-v2 (dirstate-v2 !)
401 dirstate-v2 (dirstate-v2 !)
402 share-safe
402 share-safe
403
403
404 $ cat .hg/store/requires
404 $ cat .hg/store/requires
405 dotencode
405 dotencode
406 fncache
406 fncache
407 generaldelta
407 generaldelta
408 revlogv1
408 revlogv1
409 sparserevlog
409 sparserevlog
410 store
410 store
411
411
412 $ hg log -GT "{node}: {desc}\n"
412 $ hg log -GT "{node}: {desc}\n"
413 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
413 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
414 |
414 |
415 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
415 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
416
416
417
417
418 Make sure existing shares dont work with default config
418 Make sure existing shares dont work with default config
419
419
420 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
420 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
421 abort: version mismatch: source uses share-safe functionality while the current share does not
421 abort: version mismatch: source uses share-safe functionality while the current share does not
422 (see `hg help config.format.use-share-safe` for more information)
422 (see `hg help config.format.use-share-safe` for more information)
423 [255]
423 [255]
424
424
425
425
426 Create a safe share from upgrade one
426 Create a safe share from upgrade one
427
427
428 $ cd ..
428 $ cd ..
429 $ hg share non-share-safe ss-share
429 $ hg share non-share-safe ss-share
430 updating working directory
430 updating working directory
431 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
431 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
432 $ cd ss-share
432 $ cd ss-share
433 $ hg log -GT "{node}: {desc}\n"
433 $ hg log -GT "{node}: {desc}\n"
434 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
434 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
435 |
435 |
436 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
436 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
437
437
438 $ cd ../non-share-safe
438 $ cd ../non-share-safe
439
439
440 Test that downgrading works too
440 Test that downgrading works too
441
441
442 $ cat >> $HGRCPATH <<EOF
442 $ cat >> $HGRCPATH <<EOF
443 > [extensions]
443 > [extensions]
444 > share =
444 > share =
445 > [format]
445 > [format]
446 > use-share-safe = False
446 > use-share-safe = False
447 > EOF
447 > EOF
448
448
449 $ hg debugupgraderepo -q
449 $ hg debugupgraderepo -q
450 requirements
450 requirements
451 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-dirstate-v2 !)
451 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-dirstate-v2 !)
452 preserved: dotencode, use-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
452 preserved: dotencode, use-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
453 removed: share-safe
453 removed: share-safe
454
454
455 no revlogs to process
455 no revlogs to process
456
456
457 $ hg debugupgraderepo --run
457 $ hg debugupgraderepo --run
458 upgrade will perform the following actions:
458 upgrade will perform the following actions:
459
459
460 requirements
460 requirements
461 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-dirstate-v2 !)
461 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-dirstate-v2 !)
462 preserved: dotencode, use-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
462 preserved: dotencode, use-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
463 removed: share-safe
463 removed: share-safe
464
464
465 no revlogs to process
465 no revlogs to process
466
466
467 beginning upgrade...
467 beginning upgrade...
468 repository locked and read-only
468 repository locked and read-only
469 creating temporary repository to stage upgraded data: $TESTTMP/non-share-safe/.hg/upgrade.* (glob)
469 creating temporary repository to stage upgraded data: $TESTTMP/non-share-safe/.hg/upgrade.* (glob)
470 (it is safe to interrupt this process any time before data migration completes)
470 (it is safe to interrupt this process any time before data migration completes)
471 upgrading repository requirements
471 upgrading repository requirements
472 removing temporary repository $TESTTMP/non-share-safe/.hg/upgrade.* (glob)
472 removing temporary repository $TESTTMP/non-share-safe/.hg/upgrade.* (glob)
473 repository downgraded to not use share safe mode, existing shares will not work and needs to be reshared.
473 repository downgraded to not use share safe mode, existing shares will not work and need to be reshared.
474
474
475 $ hg debugrequirements
475 $ hg debugrequirements
476 dotencode
476 dotencode
477 dirstate-v2 (dirstate-v2 !)
477 dirstate-v2 (dirstate-v2 !)
478 fncache
478 fncache
479 generaldelta
479 generaldelta
480 revlogv1
480 revlogv1
481 sparserevlog
481 sparserevlog
482 store
482 store
483
483
484 $ cat .hg/requires
484 $ cat .hg/requires
485 dotencode
485 dotencode
486 dirstate-v2 (dirstate-v2 !)
486 dirstate-v2 (dirstate-v2 !)
487 fncache
487 fncache
488 generaldelta
488 generaldelta
489 revlogv1
489 revlogv1
490 sparserevlog
490 sparserevlog
491 store
491 store
492
492
493 $ test -f .hg/store/requires
493 $ test -f .hg/store/requires
494 [1]
494 [1]
495
495
496 $ hg log -GT "{node}: {desc}\n"
496 $ hg log -GT "{node}: {desc}\n"
497 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
497 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
498 |
498 |
499 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
499 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
500
500
501
501
502 Make sure existing shares still works
502 Make sure existing shares still works
503
503
504 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
504 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
505 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
505 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
506 |
506 |
507 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
507 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
508
508
509
509
510 $ hg log -GT "{node}: {desc}\n" -R ../ss-share
510 $ hg log -GT "{node}: {desc}\n" -R ../ss-share
511 abort: share source does not support share-safe requirement
511 abort: share source does not support share-safe requirement
512 (see `hg help config.format.use-share-safe` for more information)
512 (see `hg help config.format.use-share-safe` for more information)
513 [255]
513 [255]
514
514
515 Testing automatic downgrade of shares when config is set
515 Testing automatic downgrade of shares when config is set
516
516
517 $ touch ../ss-share/.hg/wlock
517 $ touch ../ss-share/.hg/wlock
518 $ hg log -GT "{node}: {desc}\n" -R ../ss-share --config share.safe-mismatch.source-not-safe=downgrade-abort
518 $ hg log -GT "{node}: {desc}\n" -R ../ss-share --config share.safe-mismatch.source-not-safe=downgrade-abort
519 abort: failed to downgrade share, got error: Lock held
519 abort: failed to downgrade share, got error: Lock held
520 (see `hg help config.format.use-share-safe` for more information)
520 (see `hg help config.format.use-share-safe` for more information)
521 [255]
521 [255]
522 $ rm ../ss-share/.hg/wlock
522 $ rm ../ss-share/.hg/wlock
523
523
524 $ cp -R ../ss-share ../ss-share-bck
524 $ cp -R ../ss-share ../ss-share-bck
525 $ hg log -GT "{node}: {desc}\n" -R ../ss-share --config share.safe-mismatch.source-not-safe=downgrade-abort
525 $ hg log -GT "{node}: {desc}\n" -R ../ss-share --config share.safe-mismatch.source-not-safe=downgrade-abort
526 repository downgraded to not use share-safe mode
526 repository downgraded to not use share-safe mode
527 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
527 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
528 |
528 |
529 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
529 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
530
530
531 $ rm -rf ../ss-share
531 $ rm -rf ../ss-share
532 $ mv ../ss-share-bck ../ss-share
532 $ mv ../ss-share-bck ../ss-share
533
533
534 $ hg log -GT "{node}: {desc}\n" -R ../ss-share --config share.safe-mismatch.source-not-safe=downgrade-abort --config share.safe-mismatch.source-not-safe:verbose-upgrade=no
534 $ hg log -GT "{node}: {desc}\n" -R ../ss-share --config share.safe-mismatch.source-not-safe=downgrade-abort --config share.safe-mismatch.source-not-safe:verbose-upgrade=no
535 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
535 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
536 |
536 |
537 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
537 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
538
538
539
539
540 $ hg log -GT "{node}: {desc}\n" -R ../ss-share
540 $ hg log -GT "{node}: {desc}\n" -R ../ss-share
541 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
541 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
542 |
542 |
543 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
543 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
544
544
545
545
546
546
547 Testing automatic upgrade of shares when config is set
547 Testing automatic upgrade of shares when config is set
548
548
549 $ hg debugupgraderepo -q --run --config format.use-share-safe=True
549 $ hg debugupgraderepo -q --run --config format.use-share-safe=True
550 upgrade will perform the following actions:
550 upgrade will perform the following actions:
551
551
552 requirements
552 requirements
553 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-dirstate-v2 !)
553 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-dirstate-v2 !)
554 preserved: dotencode, use-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
554 preserved: dotencode, use-dirstate-v2, fncache, generaldelta, revlogv1, sparserevlog, store (dirstate-v2 !)
555 added: share-safe
555 added: share-safe
556
556
557 no revlogs to process
557 no revlogs to process
558
558
559 repository upgraded to share safe mode, existing shares will still work in old non-safe mode. Re-share existing shares to use them in safe mode New shares will be created in safe mode.
559 repository upgraded to share safe mode, existing shares will still work in old non-safe mode. Re-share existing shares to use them in safe mode New shares will be created in safe mode.
560 $ hg debugrequirements
560 $ hg debugrequirements
561 dotencode
561 dotencode
562 dirstate-v2 (dirstate-v2 !)
562 dirstate-v2 (dirstate-v2 !)
563 fncache
563 fncache
564 generaldelta
564 generaldelta
565 revlogv1
565 revlogv1
566 share-safe
566 share-safe
567 sparserevlog
567 sparserevlog
568 store
568 store
569 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
569 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
570 abort: version mismatch: source uses share-safe functionality while the current share does not
570 abort: version mismatch: source uses share-safe functionality while the current share does not
571 (see `hg help config.format.use-share-safe` for more information)
571 (see `hg help config.format.use-share-safe` for more information)
572 [255]
572 [255]
573
573
574 Check that if lock is taken, upgrade fails but read operation are successful
574 Check that if lock is taken, upgrade fails but read operation are successful
575 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config share.safe-mismatch.source-safe=upgra
575 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config share.safe-mismatch.source-safe=upgra
576 abort: share-safe mismatch with source.
576 abort: share-safe mismatch with source.
577 Unrecognized value 'upgra' of `share.safe-mismatch.source-safe` set.
577 Unrecognized value 'upgra' of `share.safe-mismatch.source-safe` set.
578 (see `hg help config.format.use-share-safe` for more information)
578 (see `hg help config.format.use-share-safe` for more information)
579 [255]
579 [255]
580 $ touch ../nss-share/.hg/wlock
580 $ touch ../nss-share/.hg/wlock
581 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config share.safe-mismatch.source-safe=upgrade-allow
581 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config share.safe-mismatch.source-safe=upgrade-allow
582 failed to upgrade share, got error: Lock held
582 failed to upgrade share, got error: Lock held
583 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
583 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
584 |
584 |
585 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
585 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
586
586
587
587
588 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config share.safe-mismatch.source-safe=upgrade-allow --config share.safe-mismatch.source-safe.warn=False
588 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config share.safe-mismatch.source-safe=upgrade-allow --config share.safe-mismatch.source-safe.warn=False
589 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
589 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
590 |
590 |
591 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
591 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
592
592
593
593
594 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config share.safe-mismatch.source-safe=upgrade-abort
594 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config share.safe-mismatch.source-safe=upgrade-abort
595 abort: failed to upgrade share, got error: Lock held
595 abort: failed to upgrade share, got error: Lock held
596 (see `hg help config.format.use-share-safe` for more information)
596 (see `hg help config.format.use-share-safe` for more information)
597 [255]
597 [255]
598
598
599 $ rm ../nss-share/.hg/wlock
599 $ rm ../nss-share/.hg/wlock
600 $ cp -R ../nss-share ../nss-share-bck
600 $ cp -R ../nss-share ../nss-share-bck
601 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config share.safe-mismatch.source-safe=upgrade-abort
601 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config share.safe-mismatch.source-safe=upgrade-abort
602 repository upgraded to use share-safe mode
602 repository upgraded to use share-safe mode
603 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
603 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
604 |
604 |
605 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
605 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
606
606
607 $ rm -rf ../nss-share
607 $ rm -rf ../nss-share
608 $ mv ../nss-share-bck ../nss-share
608 $ mv ../nss-share-bck ../nss-share
609 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config share.safe-mismatch.source-safe=upgrade-abort --config share.safe-mismatch.source-safe:verbose-upgrade=no
609 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config share.safe-mismatch.source-safe=upgrade-abort --config share.safe-mismatch.source-safe:verbose-upgrade=no
610 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
610 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
611 |
611 |
612 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
612 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
613
613
614
614
615 Test that unshare works
615 Test that unshare works
616
616
617 $ hg unshare -R ../nss-share
617 $ hg unshare -R ../nss-share
618 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
618 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
619 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
619 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
620 |
620 |
621 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
621 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
622
622
623
623
624 Test automatique upgrade/downgrade of main-repository
624 Test automatique upgrade/downgrade of main-repository
625 ------------------------------------------------------
625 ------------------------------------------------------
626
626
627 create an initial repository
627 create an initial repository
628
628
629 $ hg init auto-upgrade \
629 $ hg init auto-upgrade \
630 > --config format.use-share-safe=no
630 > --config format.use-share-safe=no
631 $ hg debugbuilddag -R auto-upgrade --new-file .+5
631 $ hg debugbuilddag -R auto-upgrade --new-file .+5
632 $ hg -R auto-upgrade update
632 $ hg -R auto-upgrade update
633 6 files updated, 0 files merged, 0 files removed, 0 files unresolved
633 6 files updated, 0 files merged, 0 files removed, 0 files unresolved
634 $ hg debugformat -R auto-upgrade | grep share-safe
634 $ hg debugformat -R auto-upgrade | grep share-safe
635 share-safe: no
635 share-safe: no
636
636
637 upgrade it to share-safe automatically
637 upgrade it to share-safe automatically
638
638
639 $ hg status -R auto-upgrade \
639 $ hg status -R auto-upgrade \
640 > --config format.use-share-safe.automatic-upgrade-of-mismatching-repositories=yes \
640 > --config format.use-share-safe.automatic-upgrade-of-mismatching-repositories=yes \
641 > --config format.use-share-safe=yes
641 > --config format.use-share-safe=yes
642 automatically upgrading repository to the `share-safe` feature
642 automatically upgrading repository to the `share-safe` feature
643 (see `hg help config.format.use-share-safe` for details)
643 (see `hg help config.format.use-share-safe` for details)
644 $ hg debugformat -R auto-upgrade | grep share-safe
644 $ hg debugformat -R auto-upgrade | grep share-safe
645 share-safe: yes
645 share-safe: yes
646
646
647 downgrade it from share-safe automatically
647 downgrade it from share-safe automatically
648
648
649 $ hg status -R auto-upgrade \
649 $ hg status -R auto-upgrade \
650 > --config format.use-share-safe.automatic-upgrade-of-mismatching-repositories=yes \
650 > --config format.use-share-safe.automatic-upgrade-of-mismatching-repositories=yes \
651 > --config format.use-share-safe=no
651 > --config format.use-share-safe=no
652 automatically downgrading repository from the `share-safe` feature
652 automatically downgrading repository from the `share-safe` feature
653 (see `hg help config.format.use-share-safe` for details)
653 (see `hg help config.format.use-share-safe` for details)
654 $ hg debugformat -R auto-upgrade | grep share-safe
654 $ hg debugformat -R auto-upgrade | grep share-safe
655 share-safe: no
655 share-safe: no
@@ -1,283 +1,300 b''
1 #require no-reposimplestore
1 #require no-reposimplestore
2
2
3 $ hg clone http://localhost:$HGPORT/ copy
3 $ hg clone http://localhost:$HGPORT/ copy
4 abort: * (glob)
4 abort: * (glob)
5 [100]
5 [100]
6 $ test -d copy
6 $ test -d copy
7 [1]
7 [1]
8
8
9 This server doesn't do range requests so it's basically only good for
9 This server doesn't do range requests so it's basically only good for
10 one pull
10 one pull
11
11
12 $ "$PYTHON" "$TESTDIR/dumbhttp.py" -p $HGPORT --pid dumb.pid \
12 $ "$PYTHON" "$TESTDIR/dumbhttp.py" -p $HGPORT --pid dumb.pid \
13 > --logfile server.log
13 > --logfile server.log
14 $ cat dumb.pid >> $DAEMON_PIDS
14 $ cat dumb.pid >> $DAEMON_PIDS
15 $ hg init remote
15 $ hg init remote
16 $ cd remote
16 $ cd remote
17 $ echo foo > bar
17 $ echo foo > bar
18 $ echo c2 > '.dotfile with spaces'
18 $ echo c2 > '.dotfile with spaces'
19 $ hg add
19 $ hg add
20 adding .dotfile with spaces
20 adding .dotfile with spaces
21 adding bar
21 adding bar
22 $ hg commit -m"test"
22 $ hg commit -m"test"
23 $ hg tip
23 $ hg tip
24 changeset: 0:02770d679fb8
24 changeset: 0:02770d679fb8
25 tag: tip
25 tag: tip
26 user: test
26 user: test
27 date: Thu Jan 01 00:00:00 1970 +0000
27 date: Thu Jan 01 00:00:00 1970 +0000
28 summary: test
28 summary: test
29
29
30 $ cd ..
30 $ cd ..
31 $ hg clone static-http://localhost:$HGPORT/remote local
31 $ hg clone static-http://localhost:$HGPORT/remote local
32 requesting all changes
32 requesting all changes
33 adding changesets
33 adding changesets
34 adding manifests
34 adding manifests
35 adding file changes
35 adding file changes
36 added 1 changesets with 2 changes to 2 files
36 added 1 changesets with 2 changes to 2 files
37 new changesets 02770d679fb8
37 new changesets 02770d679fb8
38 updating to branch default
38 updating to branch default
39 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
39 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
40 $ cd local
40 $ cd local
41 $ hg verify -q
41 $ hg verify -q
42 $ cat bar
42 $ cat bar
43 foo
43 foo
44 $ cd ../remote
44 $ cd ../remote
45 $ echo baz > quux
45 $ echo baz > quux
46 $ hg commit -A -mtest2
46 $ hg commit -A -mtest2
47 adding quux
47 adding quux
48
48
49 check for HTTP opener failures when cachefile does not exist
49 check for HTTP opener failures when cachefile does not exist
50
50
51 $ rm .hg/cache/*
51 $ rm .hg/cache/*
52 $ cd ../local
52 $ cd ../local
53 $ cat >> .hg/hgrc <<EOF
53 $ cat >> .hg/hgrc <<EOF
54 > [hooks]
54 > [hooks]
55 > changegroup = sh -c "printenv.py --line changegroup"
55 > changegroup = sh -c "printenv.py --line changegroup"
56 > EOF
56 > EOF
57 $ hg pull
57 $ hg pull
58 pulling from static-http://localhost:$HGPORT/remote
58 pulling from static-http://localhost:$HGPORT/remote
59 searching for changes
59 searching for changes
60 adding changesets
60 adding changesets
61 adding manifests
61 adding manifests
62 adding file changes
62 adding file changes
63 added 1 changesets with 1 changes to 1 files
63 added 1 changesets with 1 changes to 1 files
64 new changesets 4ac2e3648604
64 new changesets 4ac2e3648604
65 changegroup hook: HG_HOOKNAME=changegroup
65 changegroup hook: HG_HOOKNAME=changegroup
66 HG_HOOKTYPE=changegroup
66 HG_HOOKTYPE=changegroup
67 HG_NODE=4ac2e3648604439c580c69b09ec9d93a88d93432
67 HG_NODE=4ac2e3648604439c580c69b09ec9d93a88d93432
68 HG_NODE_LAST=4ac2e3648604439c580c69b09ec9d93a88d93432
68 HG_NODE_LAST=4ac2e3648604439c580c69b09ec9d93a88d93432
69 HG_SOURCE=pull
69 HG_SOURCE=pull
70 HG_TXNID=TXN:$ID$
70 HG_TXNID=TXN:$ID$
71 HG_TXNNAME=pull
71 HG_TXNNAME=pull
72 http://localhost:$HGPORT/remote
72 http://localhost:$HGPORT/remote
73 HG_URL=http://localhost:$HGPORT/remote
73 HG_URL=http://localhost:$HGPORT/remote
74
74
75 (run 'hg update' to get a working copy)
75 (run 'hg update' to get a working copy)
76
76
77 trying to push
77 trying to push
78
78
79 $ hg update
79 $ hg update
80 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
80 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
81 $ echo more foo >> bar
81 $ echo more foo >> bar
82 $ hg commit -m"test"
82 $ hg commit -m"test"
83 $ hg push
83 $ hg push
84 pushing to static-http://localhost:$HGPORT/remote
84 pushing to static-http://localhost:$HGPORT/remote
85 abort: destination does not support push
85 abort: destination does not support push
86 [255]
86 [255]
87
87
88 trying clone -r
88 trying clone -r
89
89
90 $ cd ..
90 $ cd ..
91 $ hg clone -r doesnotexist static-http://localhost:$HGPORT/remote local0
91 $ hg clone -r doesnotexist static-http://localhost:$HGPORT/remote local0
92 abort: unknown revision 'doesnotexist'
92 abort: unknown revision 'doesnotexist'
93 [10]
93 [10]
94 $ hg clone -r 0 static-http://localhost:$HGPORT/remote local0
94 $ hg clone -r 0 static-http://localhost:$HGPORT/remote local0
95 adding changesets
95 adding changesets
96 adding manifests
96 adding manifests
97 adding file changes
97 adding file changes
98 added 1 changesets with 2 changes to 2 files
98 added 1 changesets with 2 changes to 2 files
99 new changesets 02770d679fb8
99 new changesets 02770d679fb8
100 updating to branch default
100 updating to branch default
101 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
101 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
102
102
103 test with "/" URI (issue747) and subrepo
103 test with "/" URI (issue747) and subrepo
104
104
105 $ hg init
105 $ hg init
106 $ hg init sub
106 $ hg init sub
107 $ touch sub/test
107 $ touch sub/test
108 $ hg -R sub commit -A -m "test"
108 $ hg -R sub commit -A -m "test"
109 adding test
109 adding test
110 $ hg -R sub tag not-empty
110 $ hg -R sub tag not-empty
111 $ echo sub=sub > .hgsub
111 $ echo sub=sub > .hgsub
112 $ echo a > a
112 $ echo a > a
113 $ hg add a .hgsub
113 $ hg add a .hgsub
114 $ hg -q ci -ma
114 $ hg -q ci -ma
115 $ hg clone static-http://localhost:$HGPORT/ local2
115 $ hg clone static-http://localhost:$HGPORT/ local2
116 requesting all changes
116 requesting all changes
117 adding changesets
117 adding changesets
118 adding manifests
118 adding manifests
119 adding file changes
119 adding file changes
120 added 1 changesets with 3 changes to 3 files
120 added 1 changesets with 3 changes to 3 files
121 new changesets a9ebfbe8e587
121 new changesets a9ebfbe8e587
122 updating to branch default
122 updating to branch default
123 cloning subrepo sub from static-http://localhost:$HGPORT/sub
123 cloning subrepo sub from static-http://localhost:$HGPORT/sub
124 requesting all changes
124 requesting all changes
125 adding changesets
125 adding changesets
126 adding manifests
126 adding manifests
127 adding file changes
127 adding file changes
128 added 2 changesets with 2 changes to 2 files
128 added 2 changesets with 2 changes to 2 files
129 new changesets be090ea66256:322ea90975df
129 new changesets be090ea66256:322ea90975df
130 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
130 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
131 $ cd local2
131 $ cd local2
132 $ hg verify -q
132 $ hg verify -q
133 $ cat a
133 $ cat a
134 a
134 a
135 $ hg paths
135 $ hg paths
136 default = static-http://localhost:$HGPORT/
136 default = static-http://localhost:$HGPORT/
137
137
138 test with empty repo (issue965)
138 test with empty repo (issue965)
139
139
140 $ cd ..
140 $ cd ..
141 $ hg init remotempty
141 $ hg init remotempty
142 $ hg clone static-http://localhost:$HGPORT/remotempty local3
142 $ hg clone static-http://localhost:$HGPORT/remotempty local3
143 no changes found
143 no changes found
144 updating to branch default
144 updating to branch default
145 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
145 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
146 $ cd local3
146 $ cd local3
147 $ hg verify -q
147 $ hg verify -q
148 $ hg paths
148 $ hg paths
149 default = static-http://localhost:$HGPORT/remotempty
149 default = static-http://localhost:$HGPORT/remotempty
150
150
151 test autodetecting static-http: scheme (issue6833)
152
153 $ cd ..
154 $ hg init actually-static
155 $ hg clone http://localhost:$HGPORT/actually-static local4
156 no changes found
157 updating to branch default
158 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
159
151 test with non-repo
160 test with non-repo
152
161
153 $ cd ..
154 $ mkdir notarepo
162 $ mkdir notarepo
155 $ hg clone static-http://localhost:$HGPORT/notarepo local3
163 $ hg clone static-http://localhost:$HGPORT/notarepo local3
156 abort: 'http://localhost:$HGPORT/notarepo' does not appear to be an hg repository
164 abort: 'http://localhost:$HGPORT/notarepo' does not appear to be an hg repository
157 [255]
165 [255]
158
166
159 Clone with tags and branches works
167 Clone with tags and branches works
160
168
161 $ hg init remote-with-names
169 $ hg init remote-with-names
162 $ cd remote-with-names
170 $ cd remote-with-names
163 $ echo 0 > foo
171 $ echo 0 > foo
164 $ hg -q commit -A -m initial
172 $ hg -q commit -A -m initial
165 $ echo 1 > foo
173 $ echo 1 > foo
166 $ hg commit -m 'commit 1'
174 $ hg commit -m 'commit 1'
167 $ hg -q up 0
175 $ hg -q up 0
168 $ hg branch mybranch
176 $ hg branch mybranch
169 marked working directory as branch mybranch
177 marked working directory as branch mybranch
170 (branches are permanent and global, did you want a bookmark?)
178 (branches are permanent and global, did you want a bookmark?)
171 $ echo 2 > foo
179 $ echo 2 > foo
172 $ hg commit -m 'commit 2 (mybranch)'
180 $ hg commit -m 'commit 2 (mybranch)'
173 $ hg tag -r 1 'default-tag'
181 $ hg tag -r 1 'default-tag'
174 $ hg tag -r 2 'branch-tag'
182 $ hg tag -r 2 'branch-tag'
175
183
176 $ cd ..
184 $ cd ..
177
185
178 $ hg clone static-http://localhost:$HGPORT/remote-with-names local-with-names
186 $ hg clone static-http://localhost:$HGPORT/remote-with-names local-with-names
179 requesting all changes
187 requesting all changes
180 adding changesets
188 adding changesets
181 adding manifests
189 adding manifests
182 adding file changes
190 adding file changes
183 added 5 changesets with 5 changes to 2 files (+1 heads)
191 added 5 changesets with 5 changes to 2 files (+1 heads)
184 new changesets 68986213bd44:0c325bd2b5a7
192 new changesets 68986213bd44:0c325bd2b5a7
185 updating to branch default
193 updating to branch default
186 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
194 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
187
195
188 Clone a specific branch works
196 Clone a specific branch works
189
197
190 $ hg clone -r mybranch static-http://localhost:$HGPORT/remote-with-names local-with-names-branch
198 $ hg clone -r mybranch static-http://localhost:$HGPORT/remote-with-names local-with-names-branch
191 adding changesets
199 adding changesets
192 adding manifests
200 adding manifests
193 adding file changes
201 adding file changes
194 added 4 changesets with 4 changes to 2 files
202 added 4 changesets with 4 changes to 2 files
195 new changesets 68986213bd44:0c325bd2b5a7
203 new changesets 68986213bd44:0c325bd2b5a7
196 updating to branch mybranch
204 updating to branch mybranch
197 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
205 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
198
206
199 Clone a specific tag works
207 Clone a specific tag works
200
208
201 $ hg clone -r default-tag static-http://localhost:$HGPORT/remote-with-names local-with-names-tag
209 $ hg clone -r default-tag static-http://localhost:$HGPORT/remote-with-names local-with-names-tag
202 adding changesets
210 adding changesets
203 adding manifests
211 adding manifests
204 adding file changes
212 adding file changes
205 added 2 changesets with 2 changes to 1 files
213 added 2 changesets with 2 changes to 1 files
206 new changesets 68986213bd44:4ee3fcef1c80
214 new changesets 68986213bd44:4ee3fcef1c80
207 updating to branch default
215 updating to branch default
208 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
216 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
209
217
210 $ killdaemons.py
218 $ killdaemons.py
211
219
212 List of files accessed over HTTP:
220 List of files accessed over HTTP:
213
221
214 $ cat server.log | sed -n -e 's|.*GET \(/[^ ]*\).*|\1|p' | sort -u
222 $ cat server.log | sed -n -e 's|.*GET \(/[^ ]*\).*|\1|p' | sort -u
215 /.hg/bookmarks
223 /.hg/bookmarks
216 /.hg/bookmarks.current
224 /.hg/bookmarks.current
217 /.hg/cache/hgtagsfnodes1
225 /.hg/cache/hgtagsfnodes1
218 /.hg/dirstate
226 /.hg/dirstate
219 /.hg/requires
227 /.hg/requires
220 /.hg/store/00changelog.i
228 /.hg/store/00changelog.i
221 /.hg/store/00manifest.i
229 /.hg/store/00manifest.i
222 /.hg/store/data/%7E2ehgsub.i (no-py37 !)
230 /.hg/store/data/%7E2ehgsub.i (no-py37 !)
223 /.hg/store/data/%7E2ehgsubstate.i (no-py37 !)
231 /.hg/store/data/%7E2ehgsubstate.i (no-py37 !)
224 /.hg/store/data/a.i
232 /.hg/store/data/a.i
225 /.hg/store/data/~2ehgsub.i (py37 !)
233 /.hg/store/data/~2ehgsub.i (py37 !)
226 /.hg/store/data/~2ehgsubstate.i (py37 !)
234 /.hg/store/data/~2ehgsubstate.i (py37 !)
227 /.hg/store/requires
235 /.hg/store/requires
236 /actually-static/.hg/bookmarks
237 /actually-static/.hg/bookmarks.current
238 /actually-static/.hg/dirstate
239 /actually-static/.hg/requires
240 /actually-static/.hg/store/00changelog.i
241 /actually-static/.hg/store/00manifest.i
242 /actually-static/.hg/store/requires
243 /actually-static/?cmd=capabilities
244 /actually-static?cmd=capabilities
228 /notarepo/.hg/00changelog.i
245 /notarepo/.hg/00changelog.i
229 /notarepo/.hg/requires
246 /notarepo/.hg/requires
230 /remote-with-names/.hg/bookmarks
247 /remote-with-names/.hg/bookmarks
231 /remote-with-names/.hg/bookmarks.current
248 /remote-with-names/.hg/bookmarks.current
232 /remote-with-names/.hg/cache/branch2-served
249 /remote-with-names/.hg/cache/branch2-served
233 /remote-with-names/.hg/cache/hgtagsfnodes1
250 /remote-with-names/.hg/cache/hgtagsfnodes1
234 /remote-with-names/.hg/cache/tags2-served
251 /remote-with-names/.hg/cache/tags2-served
235 /remote-with-names/.hg/dirstate
252 /remote-with-names/.hg/dirstate
236 /remote-with-names/.hg/localtags
253 /remote-with-names/.hg/localtags
237 /remote-with-names/.hg/requires
254 /remote-with-names/.hg/requires
238 /remote-with-names/.hg/store/00changelog.i
255 /remote-with-names/.hg/store/00changelog.i
239 /remote-with-names/.hg/store/00manifest.i
256 /remote-with-names/.hg/store/00manifest.i
240 /remote-with-names/.hg/store/data/%7E2ehgtags.i (no-py37 !)
257 /remote-with-names/.hg/store/data/%7E2ehgtags.i (no-py37 !)
241 /remote-with-names/.hg/store/data/foo.i
258 /remote-with-names/.hg/store/data/foo.i
242 /remote-with-names/.hg/store/data/~2ehgtags.i (py37 !)
259 /remote-with-names/.hg/store/data/~2ehgtags.i (py37 !)
243 /remote-with-names/.hg/store/obsstore
260 /remote-with-names/.hg/store/obsstore
244 /remote-with-names/.hg/store/requires
261 /remote-with-names/.hg/store/requires
245 /remote/.hg/bookmarks
262 /remote/.hg/bookmarks
246 /remote/.hg/bookmarks.current
263 /remote/.hg/bookmarks.current
247 /remote/.hg/cache/branch2-base
264 /remote/.hg/cache/branch2-base
248 /remote/.hg/cache/branch2-immutable
265 /remote/.hg/cache/branch2-immutable
249 /remote/.hg/cache/branch2-served
266 /remote/.hg/cache/branch2-served
250 /remote/.hg/cache/hgtagsfnodes1
267 /remote/.hg/cache/hgtagsfnodes1
251 /remote/.hg/cache/rbc-names-v1
268 /remote/.hg/cache/rbc-names-v1
252 /remote/.hg/cache/tags2-served
269 /remote/.hg/cache/tags2-served
253 /remote/.hg/dirstate
270 /remote/.hg/dirstate
254 /remote/.hg/localtags
271 /remote/.hg/localtags
255 /remote/.hg/requires
272 /remote/.hg/requires
256 /remote/.hg/store/00changelog.i
273 /remote/.hg/store/00changelog.i
257 /remote/.hg/store/00manifest.i
274 /remote/.hg/store/00manifest.i
258 /remote/.hg/store/data/%7E2edotfile%20with%20spaces.i (no-py37 !)
275 /remote/.hg/store/data/%7E2edotfile%20with%20spaces.i (no-py37 !)
259 /remote/.hg/store/data/%7E2ehgtags.i (no-py37 !)
276 /remote/.hg/store/data/%7E2ehgtags.i (no-py37 !)
260 /remote/.hg/store/data/bar.i
277 /remote/.hg/store/data/bar.i
261 /remote/.hg/store/data/quux.i
278 /remote/.hg/store/data/quux.i
262 /remote/.hg/store/data/~2edotfile%20with%20spaces.i (py37 !)
279 /remote/.hg/store/data/~2edotfile%20with%20spaces.i (py37 !)
263 /remote/.hg/store/data/~2ehgtags.i (py37 !)
280 /remote/.hg/store/data/~2ehgtags.i (py37 !)
264 /remote/.hg/store/obsstore
281 /remote/.hg/store/obsstore
265 /remote/.hg/store/requires
282 /remote/.hg/store/requires
266 /remotempty/.hg/bookmarks
283 /remotempty/.hg/bookmarks
267 /remotempty/.hg/bookmarks.current
284 /remotempty/.hg/bookmarks.current
268 /remotempty/.hg/dirstate
285 /remotempty/.hg/dirstate
269 /remotempty/.hg/requires
286 /remotempty/.hg/requires
270 /remotempty/.hg/store/00changelog.i
287 /remotempty/.hg/store/00changelog.i
271 /remotempty/.hg/store/00manifest.i
288 /remotempty/.hg/store/00manifest.i
272 /remotempty/.hg/store/requires
289 /remotempty/.hg/store/requires
273 /sub/.hg/bookmarks
290 /sub/.hg/bookmarks
274 /sub/.hg/bookmarks.current
291 /sub/.hg/bookmarks.current
275 /sub/.hg/cache/hgtagsfnodes1
292 /sub/.hg/cache/hgtagsfnodes1
276 /sub/.hg/dirstate
293 /sub/.hg/dirstate
277 /sub/.hg/requires
294 /sub/.hg/requires
278 /sub/.hg/store/00changelog.i
295 /sub/.hg/store/00changelog.i
279 /sub/.hg/store/00manifest.i
296 /sub/.hg/store/00manifest.i
280 /sub/.hg/store/data/%7E2ehgtags.i (no-py37 !)
297 /sub/.hg/store/data/%7E2ehgtags.i (no-py37 !)
281 /sub/.hg/store/data/test.i
298 /sub/.hg/store/data/test.i
282 /sub/.hg/store/data/~2ehgtags.i (py37 !)
299 /sub/.hg/store/data/~2ehgtags.i (py37 !)
283 /sub/.hg/store/requires
300 /sub/.hg/store/requires
General Comments 0
You need to be logged in to leave comments. Login now