##// END OF EJS Templates
branching: merge with stable
marmoute -
r51579:a41eeb87 merge default
parent child Browse files
Show More
@@ -0,0 +1,51 b''
1 $ cat >> $HGRCPATH << EOF
2 > [storage]
3 > dirstate-v2.slow-path=allow
4 > [format]
5 > use-dirstate-v2=no
6 > EOF
7
8 Set up a v1 repo
9
10 $ hg init repo
11 $ cd repo
12 $ echo a > a
13 $ hg add a
14 $ hg commit -m a
15 $ hg debugrequires | grep dirstate
16 [1]
17 $ ls -1 .hg/dirstate*
18 .hg/dirstate
19
20 Copy v1 dirstate
21 $ cp .hg/dirstate $TESTTMP/dirstate-v1-backup
22
23 Upgrade it to v2
24
25 $ hg debugupgraderepo -q --config format.use-dirstate-v2=1 --run | egrep 'added:|removed:'
26 added: dirstate-v2
27 $ hg debugrequires | grep dirstate
28 dirstate-v2
29 $ ls -1 .hg/dirstate*
30 .hg/dirstate
31 .hg/dirstate.* (glob)
32
33 Manually reset to dirstate v1 to simulate an incomplete dirstate-v2 upgrade
34
35 $ rm .hg/dirstate*
36 $ cp $TESTTMP/dirstate-v1-backup .hg/dirstate
37
38 There should be no errors, but a v2 dirstate should be written back to disk
39 $ hg st
40 $ ls -1 .hg/dirstate*
41 .hg/dirstate
42 .hg/dirstate.* (glob)
43
44 Corrupt the dirstate to see how the errors show up to the user
45 $ echo "I ate your data" > .hg/dirstate
46
47 $ hg st
48 abort: working directory state appears damaged! (no-rhg !)
49 (falling back to dirstate-v1 from v2 also failed) (no-rhg !)
50 abort: Too little data for dirstate: 16 bytes. (rhg !)
51 [255]
@@ -244,3 +244,4 b' f14864fffdcab725d9eac6d4f4c07be05a35f59a'
244 83ea6ce48b4fd09fb79c4e34cc5750c805699a53 0 iQHNBAABCgA3FiEEH2b4zfZU6QXBHaBhoR4BzQ4F2VYFAmQ3860ZHGFscGhhcmVAcmFwaGFlbGdvbWVzLmRldgAKCRChHgHNDgXZVk3gDACIIcQxKfis/r5UNj7SqyFhQxUCo8Njp7zdLFv3CSWFdFiOpQONI7Byt9KjwedUkUK9tqdb03V7W32ZSBTrNLM11uHY9E5Aknjoza4m+aIGbamEVRWIIHXjUZEMKS9QcY8ElbDvvPu/xdZjyTEjNNiuByUpPUcJXVzpKrHm8Wy3GWDliYBuu68mzFIX3JnZKscdK4EjCAfDysSwwfLeBMpd0Rk+SgwjDwyPWAAyU3yDPNmlUn8qTGHjXxU3vsHCXpoJWkfKmQ9n++23WEpM9vC8zx2TIy70+gFUvKG77+Ucv+djQxHRv0L6L5qUSBJukD3R3nml1xu6pUeioBHepRmTUWgPbHa/gQ+J2Pw+rPCK51x0EeT0SJjxUR2mmMLbk8N2efM35lEjF/sNxotTq17Sv9bjwXhue6BURxpQDEyOuSaS0IlF56ndXtE/4FX3H6zgU1+3jw5iBWajr1E04QjPlSOJO7nIKYM9Jq3VpHR7MiFwfT46pJEfw9pNgZX2b8o=
244 83ea6ce48b4fd09fb79c4e34cc5750c805699a53 0 iQHNBAABCgA3FiEEH2b4zfZU6QXBHaBhoR4BzQ4F2VYFAmQ3860ZHGFscGhhcmVAcmFwaGFlbGdvbWVzLmRldgAKCRChHgHNDgXZVk3gDACIIcQxKfis/r5UNj7SqyFhQxUCo8Njp7zdLFv3CSWFdFiOpQONI7Byt9KjwedUkUK9tqdb03V7W32ZSBTrNLM11uHY9E5Aknjoza4m+aIGbamEVRWIIHXjUZEMKS9QcY8ElbDvvPu/xdZjyTEjNNiuByUpPUcJXVzpKrHm8Wy3GWDliYBuu68mzFIX3JnZKscdK4EjCAfDysSwwfLeBMpd0Rk+SgwjDwyPWAAyU3yDPNmlUn8qTGHjXxU3vsHCXpoJWkfKmQ9n++23WEpM9vC8zx2TIy70+gFUvKG77+Ucv+djQxHRv0L6L5qUSBJukD3R3nml1xu6pUeioBHepRmTUWgPbHa/gQ+J2Pw+rPCK51x0EeT0SJjxUR2mmMLbk8N2efM35lEjF/sNxotTq17Sv9bjwXhue6BURxpQDEyOuSaS0IlF56ndXtE/4FX3H6zgU1+3jw5iBWajr1E04QjPlSOJO7nIKYM9Jq3VpHR7MiFwfT46pJEfw9pNgZX2b8o=
245 f952be90b0514a576dcc8bbe758ce3847faba9bb 0 iQHNBAABCgA3FiEEH2b4zfZU6QXBHaBhoR4BzQ4F2VYFAmQ+ZaoZHGFscGhhcmVAcmFwaGFlbGdvbWVzLmRldgAKCRChHgHNDgXZVuDOC/90SQ3UjXmByAaT5qr4bd3sVGt12lXlaKdyDxY0JMSKyHMUnb4YltHzNFxiUku10aRsRvJt5denTGeaOvAYbbXE7nbZJuyLD9rvfFTCe6EVx7kymCBwSbobKMzD79QHAFU7xu036gs7rmwyc++F4JF4IOrT4bjSYY5/8g0uLAHUexnn49QfQ5OYr325qShDFLjUZ7aH0yxA/gEr2MfXQmbIEc0eJJQXD1EhDkpSJFNIKzwWMOT1AhFk8kTlDqqbPnW7sDxTW+v/gGjAFYLHi8GMLEyrBQdEqytN7Pl9XOPXt/8RaDfIzYfl0OHxh2l1Y1MuH/PHrWO4PBPsr82QI2mxufYKuujpFMPr4PxXXl2g31OKhI8jJj+bHr62kGIOJCxZ8EPPGKXPGyoOuIVa0MeHmXxjb9kkj0SALjlaUvZrSENzRTsQXDNHQa+iDaITKLmItvLsaTEz9DJzGmI20shtJYcx4lqHsTgtMZfOtR5tmUknAFUUBZfUwvwULD4LmNI=
245 f952be90b0514a576dcc8bbe758ce3847faba9bb 0 iQHNBAABCgA3FiEEH2b4zfZU6QXBHaBhoR4BzQ4F2VYFAmQ+ZaoZHGFscGhhcmVAcmFwaGFlbGdvbWVzLmRldgAKCRChHgHNDgXZVuDOC/90SQ3UjXmByAaT5qr4bd3sVGt12lXlaKdyDxY0JMSKyHMUnb4YltHzNFxiUku10aRsRvJt5denTGeaOvAYbbXE7nbZJuyLD9rvfFTCe6EVx7kymCBwSbobKMzD79QHAFU7xu036gs7rmwyc++F4JF4IOrT4bjSYY5/8g0uLAHUexnn49QfQ5OYr325qShDFLjUZ7aH0yxA/gEr2MfXQmbIEc0eJJQXD1EhDkpSJFNIKzwWMOT1AhFk8kTlDqqbPnW7sDxTW+v/gGjAFYLHi8GMLEyrBQdEqytN7Pl9XOPXt/8RaDfIzYfl0OHxh2l1Y1MuH/PHrWO4PBPsr82QI2mxufYKuujpFMPr4PxXXl2g31OKhI8jJj+bHr62kGIOJCxZ8EPPGKXPGyoOuIVa0MeHmXxjb9kkj0SALjlaUvZrSENzRTsQXDNHQa+iDaITKLmItvLsaTEz9DJzGmI20shtJYcx4lqHsTgtMZfOtR5tmUknAFUUBZfUwvwULD4LmNI=
246 fc445f8abcf90b33db7c463816a1b3560681767f 0 iQHNBAABCgA3FiEEH2b4zfZU6QXBHaBhoR4BzQ4F2VYFAmRTok8ZHGFscGhhcmVAcmFwaGFlbGdvbWVzLmRldgAKCRChHgHNDgXZVpZ5DACBv33k//ovzSbyH5/q+Xhk3TqNRY8IDOjoEhvDyu0bJHsvygOGXLUtHpQPth1RA4/c+AVNJrUeFvT02sLqqP2d9oSA9HEAYpOuzwgr1A+1o+Q2GyfD4cElP6KfiEe8oyFVOB0rfBgWNei1C0nnrhChQr5dOPR63uAFhHzkEsgsTFS7ONxZ1DHbe7gRV8OMMf1MatAtRzRexQJCqyNv7WodQdrKtjHqPKtlWl20dbwTHhzeiZbtjiTe0CVXVsOqnA1DQkO/IaiKQrn3zWdGY5ABbqQ1K0ceLcej4NFOeLo9ZrShndU3BuFUa9Dq9bnPYOI9wMqGoDh/GdTZkZEzBy5PTokY3AJHblbub49pi8YTenFcPdtd/v71AaNi3TKa45ZNhYVkPmRETYweHkLs3CIrSyeiBwU4RGuQZVD/GujAQB5yhk0w+LPMzBsHruD4vsgXwIraCzQIIJTjgyxKuAJGdGNUFYyxEpUkgz5G6MFrBKe8HO69y3Pm/qDNZ2maV8k=
246 fc445f8abcf90b33db7c463816a1b3560681767f 0 iQHNBAABCgA3FiEEH2b4zfZU6QXBHaBhoR4BzQ4F2VYFAmRTok8ZHGFscGhhcmVAcmFwaGFlbGdvbWVzLmRldgAKCRChHgHNDgXZVpZ5DACBv33k//ovzSbyH5/q+Xhk3TqNRY8IDOjoEhvDyu0bJHsvygOGXLUtHpQPth1RA4/c+AVNJrUeFvT02sLqqP2d9oSA9HEAYpOuzwgr1A+1o+Q2GyfD4cElP6KfiEe8oyFVOB0rfBgWNei1C0nnrhChQr5dOPR63uAFhHzkEsgsTFS7ONxZ1DHbe7gRV8OMMf1MatAtRzRexQJCqyNv7WodQdrKtjHqPKtlWl20dbwTHhzeiZbtjiTe0CVXVsOqnA1DQkO/IaiKQrn3zWdGY5ABbqQ1K0ceLcej4NFOeLo9ZrShndU3BuFUa9Dq9bnPYOI9wMqGoDh/GdTZkZEzBy5PTokY3AJHblbub49pi8YTenFcPdtd/v71AaNi3TKa45ZNhYVkPmRETYweHkLs3CIrSyeiBwU4RGuQZVD/GujAQB5yhk0w+LPMzBsHruD4vsgXwIraCzQIIJTjgyxKuAJGdGNUFYyxEpUkgz5G6MFrBKe8HO69y3Pm/qDNZ2maV8k=
247 da372c745e0f053bb7a64e74cccd15810d96341d 0 iQHNBAABCgA3FiEEH2b4zfZU6QXBHaBhoR4BzQ4F2VYFAmSB7WkZHGFscGhhcmVAcmFwaGFlbGdvbWVzLmRldgAKCRChHgHNDgXZVoy+C/4zwO+Wxc3wr0aEzjVqAss7FuGS5e66H+0T3WzVgKIRMqiiOmUmmiNf+XloXlX4TOwoh9j9GNEpoZfV6TSwFSqV0LALaVIRRwrkJBDhnqw4eNBZbK5aBWNa2/21dkHecxF4KG3ai9kLwy2mtHxkDIy8T2LPvdx8pfNcYT4PZ19x2itqZLouBJqiZYehsqeMLNF2vRqkq+rQ+D2sFGLljgPo0JlpkOZ4IL7S/cqTOBG1sQ6KJK+hAE1kF1lhvK796VhKKXVnWVgqJLyg7ZI6168gxeFv5cyCtb+FUXJJ/5SOkxaCKJf3mg3DIYi3G7xjwB5CfUGW8A2qexgEjXeV42Mu7/Mkmn/aeTdL0UcRK3oBVHJwqt/fJlGFqVWt4/9g9KW5mJvTDQYBo/zjLyvKFEbnSLzhEP+9SvthCrtX0UYkKxOGi2M2Z7e9wgBB0gY8a36kA739lkNu6r3vH/FVh0aPTMWukLToELS90WgfViNr16lDnCeDjMgg97OKxWdOW6U=
@@ -260,3 +260,4 b' f14864fffdcab725d9eac6d4f4c07be05a35f59a'
260 83ea6ce48b4fd09fb79c4e34cc5750c805699a53 6.4.1
260 83ea6ce48b4fd09fb79c4e34cc5750c805699a53 6.4.1
261 f952be90b0514a576dcc8bbe758ce3847faba9bb 6.4.2
261 f952be90b0514a576dcc8bbe758ce3847faba9bb 6.4.2
262 fc445f8abcf90b33db7c463816a1b3560681767f 6.4.3
262 fc445f8abcf90b33db7c463816a1b3560681767f 6.4.3
263 da372c745e0f053bb7a64e74cccd15810d96341d 6.4.4
@@ -79,8 +79,10 b' for most needs.'
79 Bundle files can be generated with the :hg:`bundle` command. Typically
79 Bundle files can be generated with the :hg:`bundle` command. Typically
80 :hg:`bundle --all` is used to produce a bundle of the entire repository.
80 :hg:`bundle --all` is used to produce a bundle of the entire repository.
81
81
82 :hg:`debugcreatestreamclonebundle` can be used to produce a special
82 The bundlespec option `stream` (see :hg:`help bundlespec`)
83 *streaming clonebundle*. These are bundle files that are extremely efficient
83 can be used to produce a special *streaming clonebundle*, typically using
84 :hg:`bundle --all --type="none-streamv2"`.
85 These are bundle files that are extremely efficient
84 to produce and consume (read: fast). However, they are larger than
86 to produce and consume (read: fast). However, they are larger than
85 traditional bundle formats and require that clients support the exact set
87 traditional bundle formats and require that clients support the exact set
86 of repository data store formats in use by the repository that created them.
88 of repository data store formats in use by the repository that created them.
@@ -52,6 +52,14 b' def alter_bundle_url(repo, url):'
52 return url
52 return url
53
53
54
54
55 SUPPORTED_CLONEBUNDLE_SCHEMES = [
56 b"http://",
57 b"https://",
58 b"largefile://",
59 CLONEBUNDLESCHEME,
60 ]
61
62
55 @attr.s
63 @attr.s
56 class bundlespec:
64 class bundlespec:
57 compression = attr.ib()
65 compression = attr.ib()
@@ -384,7 +392,9 b' def isstreamclonespec(bundlespec):'
384 return False
392 return False
385
393
386
394
387 def filterclonebundleentries(repo, entries, streamclonerequested=False):
395 def filterclonebundleentries(
396 repo, entries, streamclonerequested=False, pullbundles=False
397 ):
388 """Remove incompatible clone bundle manifest entries.
398 """Remove incompatible clone bundle manifest entries.
389
399
390 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
400 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
@@ -396,6 +406,16 b' def filterclonebundleentries(repo, entri'
396 """
406 """
397 newentries = []
407 newentries = []
398 for entry in entries:
408 for entry in entries:
409 url = entry.get(b'URL')
410 if not pullbundles and not any(
411 [url.startswith(scheme) for scheme in SUPPORTED_CLONEBUNDLE_SCHEMES]
412 ):
413 repo.ui.debug(
414 b'filtering %s because not a supported clonebundle scheme\n'
415 % url
416 )
417 continue
418
399 spec = entry.get(b'BUNDLESPEC')
419 spec = entry.get(b'BUNDLESPEC')
400 if spec:
420 if spec:
401 try:
421 try:
@@ -405,8 +425,7 b' def filterclonebundleentries(repo, entri'
405 # entries.
425 # entries.
406 if streamclonerequested and not isstreamclonespec(bundlespec):
426 if streamclonerequested and not isstreamclonespec(bundlespec):
407 repo.ui.debug(
427 repo.ui.debug(
408 b'filtering %s because not a stream clone\n'
428 b'filtering %s because not a stream clone\n' % url
409 % entry[b'URL']
410 )
429 )
411 continue
430 continue
412
431
@@ -416,7 +435,7 b' def filterclonebundleentries(repo, entri'
416 except error.UnsupportedBundleSpecification as e:
435 except error.UnsupportedBundleSpecification as e:
417 repo.ui.debug(
436 repo.ui.debug(
418 b'filtering %s because unsupported bundle '
437 b'filtering %s because unsupported bundle '
419 b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e))
438 b'spec: %s\n' % (url, stringutil.forcebytestr(e))
420 )
439 )
421 continue
440 continue
422 # If we don't have a spec and requested a stream clone, we don't know
441 # If we don't have a spec and requested a stream clone, we don't know
@@ -424,14 +443,12 b' def filterclonebundleentries(repo, entri'
424 elif streamclonerequested:
443 elif streamclonerequested:
425 repo.ui.debug(
444 repo.ui.debug(
426 b'filtering %s because cannot determine if a stream '
445 b'filtering %s because cannot determine if a stream '
427 b'clone bundle\n' % entry[b'URL']
446 b'clone bundle\n' % url
428 )
447 )
429 continue
448 continue
430
449
431 if b'REQUIRESNI' in entry and not sslutil.hassni:
450 if b'REQUIRESNI' in entry and not sslutil.hassni:
432 repo.ui.debug(
451 repo.ui.debug(b'filtering %s because SNI not supported\n' % url)
433 b'filtering %s because SNI not supported\n' % entry[b'URL']
434 )
435 continue
452 continue
436
453
437 if b'REQUIREDRAM' in entry:
454 if b'REQUIREDRAM' in entry:
@@ -439,15 +456,14 b' def filterclonebundleentries(repo, entri'
439 requiredram = util.sizetoint(entry[b'REQUIREDRAM'])
456 requiredram = util.sizetoint(entry[b'REQUIREDRAM'])
440 except error.ParseError:
457 except error.ParseError:
441 repo.ui.debug(
458 repo.ui.debug(
442 b'filtering %s due to a bad REQUIREDRAM attribute\n'
459 b'filtering %s due to a bad REQUIREDRAM attribute\n' % url
443 % entry[b'URL']
444 )
460 )
445 continue
461 continue
446 actualram = repo.ui.estimatememory()
462 actualram = repo.ui.estimatememory()
447 if actualram is not None and actualram * 0.66 < requiredram:
463 if actualram is not None and actualram * 0.66 < requiredram:
448 repo.ui.debug(
464 repo.ui.debug(
449 b'filtering %s as it needs more than 2/3 of system memory\n'
465 b'filtering %s as it needs more than 2/3 of system memory\n'
450 % entry[b'URL']
466 % url
451 )
467 )
452 continue
468 continue
453
469
@@ -4,6 +4,7 b''
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6
6
7 import struct
7 from .i18n import _
8 from .i18n import _
8
9
9 from . import (
10 from . import (
@@ -151,9 +152,15 b' class _dirstatemapcommon:'
151 b'dirstate only has a docket in v2 format'
152 b'dirstate only has a docket in v2 format'
152 )
153 )
153 self._set_identity()
154 self._set_identity()
154 self._docket = docketmod.DirstateDocket.parse(
155 try:
155 self._readdirstatefile(), self._nodeconstants
156 self._docket = docketmod.DirstateDocket.parse(
156 )
157 self._readdirstatefile(), self._nodeconstants
158 )
159 except struct.error:
160 self._ui.debug(b"failed to read dirstate-v2 data")
161 raise error.CorruptedDirstate(
162 b"failed to read dirstate-v2 data"
163 )
157 return self._docket
164 return self._docket
158
165
159 def _read_v2_data(self):
166 def _read_v2_data(self):
@@ -176,11 +183,23 b' class _dirstatemapcommon:'
176 return self._opener.read(self.docket.data_filename())
183 return self._opener.read(self.docket.data_filename())
177
184
178 def write_v2_no_append(self, tr, st, meta, packed):
185 def write_v2_no_append(self, tr, st, meta, packed):
179 old_docket = self.docket
186 try:
187 old_docket = self.docket
188 except error.CorruptedDirstate:
189 # This means we've identified a dirstate-v1 file on-disk when we
190 # were expecting a dirstate-v2 docket. We've managed to recover
191 # from that unexpected situation, and now we want to write back a
192 # dirstate-v2 file to make the on-disk situation right again.
193 #
194 # This shouldn't be triggered since `self.docket` is cached and
195 # we would have called parents() or read() first, but it's here
196 # just in case.
197 old_docket = None
198
180 new_docket = docketmod.DirstateDocket.with_new_uuid(
199 new_docket = docketmod.DirstateDocket.with_new_uuid(
181 self.parents(), len(packed), meta
200 self.parents(), len(packed), meta
182 )
201 )
183 if old_docket.uuid == new_docket.uuid:
202 if old_docket is not None and old_docket.uuid == new_docket.uuid:
184 raise error.ProgrammingError(b'dirstate docket name collision')
203 raise error.ProgrammingError(b'dirstate docket name collision')
185 data_filename = new_docket.data_filename()
204 data_filename = new_docket.data_filename()
186 self._opener.write(data_filename, packed)
205 self._opener.write(data_filename, packed)
@@ -194,7 +213,7 b' class _dirstatemapcommon:'
194 st.close()
213 st.close()
195 # Remove the old data file after the new docket pointing to
214 # Remove the old data file after the new docket pointing to
196 # the new data file was written.
215 # the new data file was written.
197 if old_docket.uuid:
216 if old_docket is not None and old_docket.uuid:
198 data_filename = old_docket.data_filename()
217 data_filename = old_docket.data_filename()
199 if tr is not None:
218 if tr is not None:
200 tr.addbackup(data_filename, location=b'plain')
219 tr.addbackup(data_filename, location=b'plain')
@@ -211,28 +230,40 b' class _dirstatemapcommon:'
211 def parents(self):
230 def parents(self):
212 if not self._parents:
231 if not self._parents:
213 if self._use_dirstate_v2:
232 if self._use_dirstate_v2:
214 self._parents = self.docket.parents
233 try:
234 self.docket
235 except error.CorruptedDirstate as e:
236 # fall back to dirstate-v1 if we fail to read v2
237 self._v1_parents(e)
238 else:
239 self._parents = self.docket.parents
215 else:
240 else:
216 read_len = self._nodelen * 2
241 self._v1_parents()
217 st = self._readdirstatefile(read_len)
218 l = len(st)
219 if l == read_len:
220 self._parents = (
221 st[: self._nodelen],
222 st[self._nodelen : 2 * self._nodelen],
223 )
224 elif l == 0:
225 self._parents = (
226 self._nodeconstants.nullid,
227 self._nodeconstants.nullid,
228 )
229 else:
230 raise error.Abort(
231 _(b'working directory state appears damaged!')
232 )
233
242
234 return self._parents
243 return self._parents
235
244
245 def _v1_parents(self, from_v2_exception=None):
246 read_len = self._nodelen * 2
247 st = self._readdirstatefile(read_len)
248 l = len(st)
249 if l == read_len:
250 self._parents = (
251 st[: self._nodelen],
252 st[self._nodelen : 2 * self._nodelen],
253 )
254 elif l == 0:
255 self._parents = (
256 self._nodeconstants.nullid,
257 self._nodeconstants.nullid,
258 )
259 else:
260 hint = None
261 if from_v2_exception is not None:
262 hint = _(b"falling back to dirstate-v1 from v2 also failed")
263 raise error.Abort(
264 _(b'working directory state appears damaged!'), hint
265 )
266
236
267
237 class dirstatemap(_dirstatemapcommon):
268 class dirstatemap(_dirstatemapcommon):
238 """Map encapsulating the dirstate's contents.
269 """Map encapsulating the dirstate's contents.
@@ -330,11 +361,17 b' class dirstatemap(_dirstatemapcommon):'
330 def read(self):
361 def read(self):
331 testing.wait_on_cfg(self._ui, b'dirstate.pre-read-file')
362 testing.wait_on_cfg(self._ui, b'dirstate.pre-read-file')
332 if self._use_dirstate_v2:
363 if self._use_dirstate_v2:
333
364 try:
334 if not self.docket.uuid:
365 self.docket
335 return
366 except error.CorruptedDirstate:
336 testing.wait_on_cfg(self._ui, b'dirstate.post-docket-read-file')
367 # fall back to dirstate-v1 if we fail to read v2
337 st = self._read_v2_data()
368 self._set_identity()
369 st = self._readdirstatefile()
370 else:
371 if not self.docket.uuid:
372 return
373 testing.wait_on_cfg(self._ui, b'dirstate.post-docket-read-file')
374 st = self._read_v2_data()
338 else:
375 else:
339 self._set_identity()
376 self._set_identity()
340 st = self._readdirstatefile()
377 st = self._readdirstatefile()
@@ -365,10 +402,17 b' class dirstatemap(_dirstatemapcommon):'
365 #
402 #
366 # (we cannot decorate the function directly since it is in a C module)
403 # (we cannot decorate the function directly since it is in a C module)
367 if self._use_dirstate_v2:
404 if self._use_dirstate_v2:
368 p = self.docket.parents
405 try:
369 meta = self.docket.tree_metadata
406 self.docket
370 parse_dirstate = util.nogc(v2.parse_dirstate)
407 except error.CorruptedDirstate:
371 parse_dirstate(self._map, self.copymap, st, meta)
408 # fall back to dirstate-v1 if we fail to parse v2
409 parse_dirstate = util.nogc(parsers.parse_dirstate)
410 p = parse_dirstate(self._map, self.copymap, st)
411 else:
412 p = self.docket.parents
413 meta = self.docket.tree_metadata
414 parse_dirstate = util.nogc(v2.parse_dirstate)
415 parse_dirstate(self._map, self.copymap, st, meta)
372 else:
416 else:
373 parse_dirstate = util.nogc(parsers.parse_dirstate)
417 parse_dirstate = util.nogc(parsers.parse_dirstate)
374 p = parse_dirstate(self._map, self.copymap, st)
418 p = parse_dirstate(self._map, self.copymap, st)
@@ -597,38 +641,37 b' if rustmod is not None:'
597
641
598 testing.wait_on_cfg(self._ui, b'dirstate.pre-read-file')
642 testing.wait_on_cfg(self._ui, b'dirstate.pre-read-file')
599 if self._use_dirstate_v2:
643 if self._use_dirstate_v2:
600 self.docket # load the data if needed
644 try:
601 inode = (
645 self.docket
602 self.identity.stat.st_ino
646 except error.CorruptedDirstate as e:
603 if self.identity is not None
647 # fall back to dirstate-v1 if we fail to read v2
604 and self.identity.stat is not None
648 parents = self._v1_map(e)
605 else None
606 )
607 testing.wait_on_cfg(self._ui, b'dirstate.post-docket-read-file')
608 if not self.docket.uuid:
609 data = b''
610 self._map = rustmod.DirstateMap.new_empty()
611 else:
649 else:
612 data = self._read_v2_data()
650 parents = self.docket.parents
613 self._map = rustmod.DirstateMap.new_v2(
651 inode = (
614 data,
652 self.identity.stat.st_ino
615 self.docket.data_size,
653 if self.identity is not None
616 self.docket.tree_metadata,
654 and self.identity.stat is not None
617 self.docket.uuid,
655 else None
618 inode,
656 )
657 testing.wait_on_cfg(
658 self._ui, b'dirstate.post-docket-read-file'
619 )
659 )
620 parents = self.docket.parents
660 if not self.docket.uuid:
661 data = b''
662 self._map = rustmod.DirstateMap.new_empty()
663 else:
664 data = self._read_v2_data()
665 self._map = rustmod.DirstateMap.new_v2(
666 data,
667 self.docket.data_size,
668 self.docket.tree_metadata,
669 self.docket.uuid,
670 inode,
671 )
672 parents = self.docket.parents
621 else:
673 else:
622 self._set_identity()
674 parents = self._v1_map()
623 inode = (
624 self.identity.stat.st_ino
625 if self.identity is not None
626 and self.identity.stat is not None
627 else None
628 )
629 self._map, parents = rustmod.DirstateMap.new_v1(
630 self._readdirstatefile(), inode
631 )
632
675
633 if parents and not self._dirtyparents:
676 if parents and not self._dirtyparents:
634 self.setparents(*parents)
677 self.setparents(*parents)
@@ -638,6 +681,23 b' if rustmod is not None:'
638 self.get = self._map.get
681 self.get = self._map.get
639 return self._map
682 return self._map
640
683
684 def _v1_map(self, from_v2_exception=None):
685 self._set_identity()
686 inode = (
687 self.identity.stat.st_ino
688 if self.identity is not None and self.identity.stat is not None
689 else None
690 )
691 try:
692 self._map, parents = rustmod.DirstateMap.new_v1(
693 self._readdirstatefile(), inode
694 )
695 except OSError as e:
696 if from_v2_exception is not None:
697 raise e from from_v2_exception
698 raise
699 return parents
700
641 @property
701 @property
642 def copymap(self):
702 def copymap(self):
643 return self._map.copymap()
703 return self._map.copymap()
@@ -696,9 +756,15 b' if rustmod is not None:'
696 self._dirtyparents = False
756 self._dirtyparents = False
697 return
757 return
698
758
759 write_mode = self._write_mode
760 try:
761 docket = self.docket
762 except error.CorruptedDirstate:
763 # fall back to dirstate-v1 if we fail to parse v2
764 docket = None
765
699 # We can only append to an existing data file if there is one
766 # We can only append to an existing data file if there is one
700 write_mode = self._write_mode
767 if docket is None or docket.uuid is None:
701 if self.docket.uuid is None:
702 write_mode = WRITE_MODE_FORCE_NEW
768 write_mode = WRITE_MODE_FORCE_NEW
703 packed, meta, append = self._map.write_v2(write_mode)
769 packed, meta, append = self._map.write_v2(write_mode)
704 if append:
770 if append:
@@ -650,6 +650,13 b' class CorruptedState(Exception):'
650 __bytes__ = _tobytes
650 __bytes__ = _tobytes
651
651
652
652
653 class CorruptedDirstate(Exception):
654 """error raised the dirstate appears corrupted on-disk. It may be due to
655 a dirstate version mismatch (i.e. expecting v2 and finding v1 on disk)."""
656
657 __bytes__ = _tobytes
658
659
653 class PeerTransportError(Abort):
660 class PeerTransportError(Abort):
654 """Transport-level I/O error when communicating with a peer repo."""
661 """Transport-level I/O error when communicating with a peer repo."""
655
662
@@ -67,6 +67,10 b' The following bundle <compression> engin'
67
67
68 .. bundlecompressionmarker
68 .. bundlecompressionmarker
69
69
70 The compression engines can be prepended with ``stream`` to create a streaming bundle.
71 These are bundles that are extremely efficient to produce and consume,
72 but do not have guaranteed compatibility with older clients.
73
70 Available Options
74 Available Options
71 =================
75 =================
72
76
@@ -89,7 +93,6 b' phases'
89 revbranchcache
93 revbranchcache
90 Include the "tags-fnodes" cache inside the bundle.
94 Include the "tags-fnodes" cache inside the bundle.
91
95
92
93 tagsfnodescache
96 tagsfnodescache
94 Include the "tags-fnodes" cache inside the bundle.
97 Include the "tags-fnodes" cache inside the bundle.
95
98
@@ -109,3 +112,10 b' Examples'
109
112
110 ``zstd-v1``
113 ``zstd-v1``
111 This errors because ``zstd`` is not supported for ``v1`` types.
114 This errors because ``zstd`` is not supported for ``v1`` types.
115
116 ``none-streamv2``
117 Produce a ``v2`` streaming bundle with no compression.
118
119 ``zstd-v2;obsolescence=true;phases=true``
120 Produce a ``v2`` bundle with zstandard compression which includes
121 obsolescence markers and phases.
@@ -615,8 +615,8 b' class revlog:'
615 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
615 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
616 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
616 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
617 entry_point = b'%s.i.a' % self.radix
617 entry_point = b'%s.i.a' % self.radix
618 elif self._try_split and self.opener.exists(b'%s.i.s' % self.radix):
618 elif self._try_split and self.opener.exists(self._split_index_file):
619 entry_point = b'%s.i.s' % self.radix
619 entry_point = self._split_index_file
620 else:
620 else:
621 entry_point = b'%s.i' % self.radix
621 entry_point = b'%s.i' % self.radix
622
622
@@ -2125,6 +2125,22 b' class revlog:'
2125 raise error.CensoredNodeError(self.display_id, node, text)
2125 raise error.CensoredNodeError(self.display_id, node, text)
2126 raise
2126 raise
2127
2127
2128 @property
2129 def _split_index_file(self):
2130 """the path where to expect the index of an ongoing splitting operation
2131
2132 The file will only exist if a splitting operation is in progress, but
2133 it is always expected at the same location."""
2134 parts = os.path.split(self.radix)
2135 if len(parts) > 1:
2136 # adds a '-s' prefix to the ``data/` or `meta/` base
2137 head = parts[0] + b'-s'
2138 return os.path.join(head, *parts[1:])
2139 else:
2140 # the revlog is stored at the root of the store (changelog or
2141 # manifest), no risk of collision.
2142 return self.radix + b'.i.s'
2143
2128 def _enforceinlinesize(self, tr, side_write=True):
2144 def _enforceinlinesize(self, tr, side_write=True):
2129 """Check if the revlog is too big for inline and convert if so.
2145 """Check if the revlog is too big for inline and convert if so.
2130
2146
@@ -2161,7 +2177,7 b' class revlog:'
2161 # this code
2177 # this code
2162 if side_write:
2178 if side_write:
2163 old_index_file_path = self._indexfile
2179 old_index_file_path = self._indexfile
2164 new_index_file_path = self._indexfile + b'.s'
2180 new_index_file_path = self._split_index_file
2165 opener = self.opener
2181 opener = self.opener
2166 weak_self = weakref.ref(self)
2182 weak_self = weakref.ref(self)
2167
2183
@@ -1087,10 +1087,17 b' class deltacomputer:'
1087 ):
1087 ):
1088 self.revlog = revlog
1088 self.revlog = revlog
1089 self._write_debug = write_debug
1089 self._write_debug = write_debug
1090 self._debug_search = debug_search
1090 if write_debug is None:
1091 self._debug_search = False
1092 else:
1093 self._debug_search = debug_search
1091 self._debug_info = debug_info
1094 self._debug_info = debug_info
1092 self._snapshot_cache = SnapshotCache()
1095 self._snapshot_cache = SnapshotCache()
1093
1096
1097 @property
1098 def _gather_debug(self):
1099 return self._write_debug is not None or self._debug_info is not None
1100
1094 def buildtext(self, revinfo, fh):
1101 def buildtext(self, revinfo, fh):
1095 """Builds a fulltext version of a revision
1102 """Builds a fulltext version of a revision
1096
1103
@@ -1136,7 +1143,6 b' class deltacomputer:'
1136 def _builddeltainfo(self, revinfo, base, fh, target_rev=None):
1143 def _builddeltainfo(self, revinfo, base, fh, target_rev=None):
1137 # can we use the cached delta?
1144 # can we use the cached delta?
1138 revlog = self.revlog
1145 revlog = self.revlog
1139 debug_search = self._write_debug is not None and self._debug_search
1140 chainbase = revlog.chainbase(base)
1146 chainbase = revlog.chainbase(base)
1141 if revlog._generaldelta:
1147 if revlog._generaldelta:
1142 deltabase = base
1148 deltabase = base
@@ -1173,7 +1179,7 b' class deltacomputer:'
1173 delta = revinfo.cachedelta[1]
1179 delta = revinfo.cachedelta[1]
1174 if delta is None:
1180 if delta is None:
1175 delta = self._builddeltadiff(base, revinfo, fh)
1181 delta = self._builddeltadiff(base, revinfo, fh)
1176 if debug_search:
1182 if self._debug_search:
1177 msg = b"DBG-DELTAS-SEARCH: uncompressed-delta-size=%d\n"
1183 msg = b"DBG-DELTAS-SEARCH: uncompressed-delta-size=%d\n"
1178 msg %= len(delta)
1184 msg %= len(delta)
1179 self._write_debug(msg)
1185 self._write_debug(msg)
@@ -1181,17 +1187,17 b' class deltacomputer:'
1181 if revlog.upperboundcomp is not None and snapshotdepth:
1187 if revlog.upperboundcomp is not None and snapshotdepth:
1182 lowestrealisticdeltalen = len(delta) // revlog.upperboundcomp
1188 lowestrealisticdeltalen = len(delta) // revlog.upperboundcomp
1183 snapshotlimit = revinfo.textlen >> snapshotdepth
1189 snapshotlimit = revinfo.textlen >> snapshotdepth
1184 if debug_search:
1190 if self._debug_search:
1185 msg = b"DBG-DELTAS-SEARCH: projected-lower-size=%d\n"
1191 msg = b"DBG-DELTAS-SEARCH: projected-lower-size=%d\n"
1186 msg %= lowestrealisticdeltalen
1192 msg %= lowestrealisticdeltalen
1187 self._write_debug(msg)
1193 self._write_debug(msg)
1188 if snapshotlimit < lowestrealisticdeltalen:
1194 if snapshotlimit < lowestrealisticdeltalen:
1189 if debug_search:
1195 if self._debug_search:
1190 msg = b"DBG-DELTAS-SEARCH: DISCARDED (snapshot limit)\n"
1196 msg = b"DBG-DELTAS-SEARCH: DISCARDED (snapshot limit)\n"
1191 self._write_debug(msg)
1197 self._write_debug(msg)
1192 return None
1198 return None
1193 if revlog.length(base) < lowestrealisticdeltalen:
1199 if revlog.length(base) < lowestrealisticdeltalen:
1194 if debug_search:
1200 if self._debug_search:
1195 msg = b"DBG-DELTAS-SEARCH: DISCARDED (prev size)\n"
1201 msg = b"DBG-DELTAS-SEARCH: DISCARDED (prev size)\n"
1196 self._write_debug(msg)
1202 self._write_debug(msg)
1197 return None
1203 return None
@@ -1253,41 +1259,34 b' class deltacomputer:'
1253 if target_rev is None:
1259 if target_rev is None:
1254 target_rev = len(self.revlog)
1260 target_rev = len(self.revlog)
1255
1261
1256 if not revinfo.textlen:
1262 gather_debug = self._gather_debug
1257 return self._fullsnapshotinfo(fh, revinfo, target_rev)
1263 cachedelta = revinfo.cachedelta
1264 revlog = self.revlog
1265 p1r = p2r = None
1258
1266
1259 if excluded_bases is None:
1267 if excluded_bases is None:
1260 excluded_bases = set()
1268 excluded_bases = set()
1261
1269
1262 # no delta for flag processor revision (see "candelta" for why)
1263 # not calling candelta since only one revision needs test, also to
1264 # avoid overhead fetching flags again.
1265 if revinfo.flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
1266 return self._fullsnapshotinfo(fh, revinfo, target_rev)
1267
1268 gather_debug = (
1269 self._write_debug is not None or self._debug_info is not None
1270 )
1271 debug_search = self._write_debug is not None and self._debug_search
1272
1273 if gather_debug:
1270 if gather_debug:
1274 start = util.timer()
1271 start = util.timer()
1275
1272 dbg = self._one_dbg_data()
1276 # count the number of different delta we tried (for debug purpose)
1273 dbg['revision'] = target_rev
1277 dbg_try_count = 0
1274 target_revlog = b"UNKNOWN"
1278 # count the number of "search round" we did. (for debug purpose)
1275 target_type = self.revlog.target[0]
1279 dbg_try_rounds = 0
1276 target_key = self.revlog.target[1]
1280 dbg_type = b'unknown'
1277 if target_type == KIND_CHANGELOG:
1281
1278 target_revlog = b'CHANGELOG:'
1282 cachedelta = revinfo.cachedelta
1279 elif target_type == KIND_MANIFESTLOG:
1283 p1 = revinfo.p1
1280 target_revlog = b'MANIFESTLOG:'
1284 p2 = revinfo.p2
1281 if target_key:
1285 revlog = self.revlog
1282 target_revlog += b'%s:' % target_key
1286
1283 elif target_type == KIND_FILELOG:
1287 deltainfo = None
1284 target_revlog = b'FILELOG:'
1288 p1r, p2r = revlog.rev(p1), revlog.rev(p2)
1285 if target_key:
1289
1286 target_revlog += b'%s:' % target_key
1290 if gather_debug:
1287 dbg['target-revlog'] = target_revlog
1288 p1r = revlog.rev(revinfo.p1)
1289 p2r = revlog.rev(revinfo.p2)
1291 if p1r != nullrev:
1290 if p1r != nullrev:
1292 p1_chain_len = revlog._chaininfo(p1r)[0]
1291 p1_chain_len = revlog._chaininfo(p1r)[0]
1293 else:
1292 else:
@@ -1296,7 +1295,109 b' class deltacomputer:'
1296 p2_chain_len = revlog._chaininfo(p2r)[0]
1295 p2_chain_len = revlog._chaininfo(p2r)[0]
1297 else:
1296 else:
1298 p2_chain_len = -1
1297 p2_chain_len = -1
1299 if debug_search:
1298 dbg['p1-chain-len'] = p1_chain_len
1299 dbg['p2-chain-len'] = p2_chain_len
1300
1301 # 1) if the revision is empty, no amount of delta can beat it
1302 #
1303 # 2) no delta for flag processor revision (see "candelta" for why)
1304 # not calling candelta since only one revision needs test, also to
1305 # avoid overhead fetching flags again.
1306 if not revinfo.textlen or revinfo.flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
1307 deltainfo = self._fullsnapshotinfo(fh, revinfo, target_rev)
1308 if gather_debug:
1309 end = util.timer()
1310 dbg['duration'] = end - start
1311 dbg[
1312 'delta-base'
1313 ] = deltainfo.base # pytype: disable=attribute-error
1314 dbg['search_round_count'] = 0
1315 dbg['using-cached-base'] = False
1316 dbg['delta_try_count'] = 0
1317 dbg['type'] = b"full"
1318 dbg['snapshot-depth'] = 0
1319 self._dbg_process_data(dbg)
1320 return deltainfo
1321
1322 deltainfo = None
1323
1324 # If this source delta are to be forcibly reuse, let us comply early.
1325 if (
1326 revlog._generaldelta
1327 and revinfo.cachedelta is not None
1328 and revinfo.cachedelta[2] == DELTA_BASE_REUSE_FORCE
1329 ):
1330 base = revinfo.cachedelta[0]
1331 if base == nullrev:
1332 dbg_type = b"full"
1333 deltainfo = self._fullsnapshotinfo(fh, revinfo, target_rev)
1334 if gather_debug:
1335 snapshotdepth = 0
1336 elif base not in excluded_bases:
1337 delta = revinfo.cachedelta[1]
1338 header, data = revlog.compress(delta)
1339 deltalen = len(header) + len(data)
1340 if gather_debug:
1341 offset = revlog.end(len(revlog) - 1)
1342 chainbase = revlog.chainbase(base)
1343 distance = deltalen + offset - revlog.start(chainbase)
1344 chainlen, compresseddeltalen = revlog._chaininfo(base)
1345 chainlen += 1
1346 compresseddeltalen += deltalen
1347 if base == p1r or base == p2r:
1348 dbg_type = b"delta"
1349 snapshotdepth = None
1350 elif not revlog.issnapshot(base):
1351 snapshotdepth = None
1352 else:
1353 dbg_type = b"snapshot"
1354 snapshotdepth = revlog.snapshotdepth(base) + 1
1355 else:
1356 distance = None
1357 chainbase = None
1358 chainlen = None
1359 compresseddeltalen = None
1360 snapshotdepth = None
1361 deltainfo = _deltainfo(
1362 distance=distance,
1363 deltalen=deltalen,
1364 data=(header, data),
1365 base=base,
1366 chainbase=chainbase,
1367 chainlen=chainlen,
1368 compresseddeltalen=compresseddeltalen,
1369 snapshotdepth=snapshotdepth,
1370 )
1371
1372 if deltainfo is not None:
1373 if gather_debug:
1374 end = util.timer()
1375 dbg['duration'] = end - start
1376 dbg[
1377 'delta-base'
1378 ] = deltainfo.base # pytype: disable=attribute-error
1379 dbg['search_round_count'] = 0
1380 dbg['using-cached-base'] = True
1381 dbg['delta_try_count'] = 0
1382 dbg['type'] = b"full"
1383 if snapshotdepth is None:
1384 dbg['snapshot-depth'] = 0
1385 else:
1386 dbg['snapshot-depth'] = snapshotdepth
1387 self._dbg_process_data(dbg)
1388 return deltainfo
1389
1390 # count the number of different delta we tried (for debug purpose)
1391 dbg_try_count = 0
1392 # count the number of "search round" we did. (for debug purpose)
1393 dbg_try_rounds = 0
1394 dbg_type = b'unknown'
1395
1396 if p1r is None:
1397 p1r = revlog.rev(revinfo.p1)
1398 p2r = revlog.rev(revinfo.p2)
1399
1400 if self._debug_search:
1300 msg = b"DBG-DELTAS-SEARCH: SEARCH rev=%d\n"
1401 msg = b"DBG-DELTAS-SEARCH: SEARCH rev=%d\n"
1301 msg %= target_rev
1402 msg %= target_rev
1302 self._write_debug(msg)
1403 self._write_debug(msg)
@@ -1314,7 +1415,7 b' class deltacomputer:'
1314 candidaterevs = next(groups)
1415 candidaterevs = next(groups)
1315 while candidaterevs is not None:
1416 while candidaterevs is not None:
1316 dbg_try_rounds += 1
1417 dbg_try_rounds += 1
1317 if debug_search:
1418 if self._debug_search:
1318 prev = None
1419 prev = None
1319 if deltainfo is not None:
1420 if deltainfo is not None:
1320 prev = deltainfo.base
1421 prev = deltainfo.base
@@ -1325,7 +1426,7 b' class deltacomputer:'
1325 and cachedelta[0] in candidaterevs
1426 and cachedelta[0] in candidaterevs
1326 ):
1427 ):
1327 round_type = b"cached-delta"
1428 round_type = b"cached-delta"
1328 elif p1 in candidaterevs or p2 in candidaterevs:
1429 elif p1r in candidaterevs or p2r in candidaterevs:
1329 round_type = b"parents"
1430 round_type = b"parents"
1330 elif prev is not None and all(c < prev for c in candidaterevs):
1431 elif prev is not None and all(c < prev for c in candidaterevs):
1331 round_type = b"refine-down"
1432 round_type = b"refine-down"
@@ -1338,7 +1439,7 b' class deltacomputer:'
1338 self._write_debug(msg)
1439 self._write_debug(msg)
1339 nominateddeltas = []
1440 nominateddeltas = []
1340 if deltainfo is not None:
1441 if deltainfo is not None:
1341 if debug_search:
1442 if self._debug_search:
1342 msg = (
1443 msg = (
1343 b"DBG-DELTAS-SEARCH: CONTENDER: rev=%d - length=%d\n"
1444 b"DBG-DELTAS-SEARCH: CONTENDER: rev=%d - length=%d\n"
1344 )
1445 )
@@ -1348,14 +1449,14 b' class deltacomputer:'
1348 # challenge it against refined candidates
1449 # challenge it against refined candidates
1349 nominateddeltas.append(deltainfo)
1450 nominateddeltas.append(deltainfo)
1350 for candidaterev in candidaterevs:
1451 for candidaterev in candidaterevs:
1351 if debug_search:
1452 if self._debug_search:
1352 msg = b"DBG-DELTAS-SEARCH: CANDIDATE: rev=%d\n"
1453 msg = b"DBG-DELTAS-SEARCH: CANDIDATE: rev=%d\n"
1353 msg %= candidaterev
1454 msg %= candidaterev
1354 self._write_debug(msg)
1455 self._write_debug(msg)
1355 candidate_type = None
1456 candidate_type = None
1356 if candidaterev == p1:
1457 if candidaterev == p1r:
1357 candidate_type = b"p1"
1458 candidate_type = b"p1"
1358 elif candidaterev == p2:
1459 elif candidaterev == p2r:
1359 candidate_type = b"p2"
1460 candidate_type = b"p2"
1360 elif self.revlog.issnapshot(candidaterev):
1461 elif self.revlog.issnapshot(candidaterev):
1361 candidate_type = b"snapshot-%d"
1462 candidate_type = b"snapshot-%d"
@@ -1376,7 +1477,7 b' class deltacomputer:'
1376
1477
1377 dbg_try_count += 1
1478 dbg_try_count += 1
1378
1479
1379 if debug_search:
1480 if self._debug_search:
1380 delta_start = util.timer()
1481 delta_start = util.timer()
1381 candidatedelta = self._builddeltainfo(
1482 candidatedelta = self._builddeltainfo(
1382 revinfo,
1483 revinfo,
@@ -1384,23 +1485,23 b' class deltacomputer:'
1384 fh,
1485 fh,
1385 target_rev=target_rev,
1486 target_rev=target_rev,
1386 )
1487 )
1387 if debug_search:
1488 if self._debug_search:
1388 delta_end = util.timer()
1489 delta_end = util.timer()
1389 msg = b"DBG-DELTAS-SEARCH: delta-search-time=%f\n"
1490 msg = b"DBG-DELTAS-SEARCH: delta-search-time=%f\n"
1390 msg %= delta_end - delta_start
1491 msg %= delta_end - delta_start
1391 self._write_debug(msg)
1492 self._write_debug(msg)
1392 if candidatedelta is not None:
1493 if candidatedelta is not None:
1393 if is_good_delta_info(self.revlog, candidatedelta, revinfo):
1494 if is_good_delta_info(self.revlog, candidatedelta, revinfo):
1394 if debug_search:
1495 if self._debug_search:
1395 msg = b"DBG-DELTAS-SEARCH: DELTA: length=%d (GOOD)\n"
1496 msg = b"DBG-DELTAS-SEARCH: DELTA: length=%d (GOOD)\n"
1396 msg %= candidatedelta.deltalen
1497 msg %= candidatedelta.deltalen
1397 self._write_debug(msg)
1498 self._write_debug(msg)
1398 nominateddeltas.append(candidatedelta)
1499 nominateddeltas.append(candidatedelta)
1399 elif debug_search:
1500 elif self._debug_search:
1400 msg = b"DBG-DELTAS-SEARCH: DELTA: length=%d (BAD)\n"
1501 msg = b"DBG-DELTAS-SEARCH: DELTA: length=%d (BAD)\n"
1401 msg %= candidatedelta.deltalen
1502 msg %= candidatedelta.deltalen
1402 self._write_debug(msg)
1503 self._write_debug(msg)
1403 elif debug_search:
1504 elif self._debug_search:
1404 msg = b"DBG-DELTAS-SEARCH: NO-DELTA\n"
1505 msg = b"DBG-DELTAS-SEARCH: NO-DELTA\n"
1405 self._write_debug(msg)
1506 self._write_debug(msg)
1406 if nominateddeltas:
1507 if nominateddeltas:
@@ -1434,17 +1535,14 b' class deltacomputer:'
1434 and dbg_try_count == 1
1535 and dbg_try_count == 1
1435 and deltainfo.base == cachedelta[0]
1536 and deltainfo.base == cachedelta[0]
1436 )
1537 )
1437 dbg = {
1538 dbg['duration'] = end - start
1438 'duration': end - start,
1539 dbg[
1439 'revision': target_rev,
1540 'delta-base'
1440 'delta-base': deltainfo.base, # pytype: disable=attribute-error
1541 ] = deltainfo.base # pytype: disable=attribute-error
1441 'search_round_count': dbg_try_rounds,
1542 dbg['search_round_count'] = dbg_try_rounds
1442 'using-cached-base': used_cached,
1543 dbg['using-cached-base'] = used_cached
1443 'delta_try_count': dbg_try_count,
1544 dbg['delta_try_count'] = dbg_try_count
1444 'type': dbg_type,
1545 dbg['type'] = dbg_type
1445 'p1-chain-len': p1_chain_len,
1446 'p2-chain-len': p2_chain_len,
1447 }
1448 if (
1546 if (
1449 deltainfo.snapshotdepth # pytype: disable=attribute-error
1547 deltainfo.snapshotdepth # pytype: disable=attribute-error
1450 is not None
1548 is not None
@@ -1454,55 +1552,58 b' class deltacomputer:'
1454 ] = deltainfo.snapshotdepth # pytype: disable=attribute-error
1552 ] = deltainfo.snapshotdepth # pytype: disable=attribute-error
1455 else:
1553 else:
1456 dbg['snapshot-depth'] = 0
1554 dbg['snapshot-depth'] = 0
1457 target_revlog = b"UNKNOWN"
1555 self._dbg_process_data(dbg)
1458 target_type = self.revlog.target[0]
1556 return deltainfo
1459 target_key = self.revlog.target[1]
1460 if target_type == KIND_CHANGELOG:
1461 target_revlog = b'CHANGELOG:'
1462 elif target_type == KIND_MANIFESTLOG:
1463 target_revlog = b'MANIFESTLOG:'
1464 if target_key:
1465 target_revlog += b'%s:' % target_key
1466 elif target_type == KIND_FILELOG:
1467 target_revlog = b'FILELOG:'
1468 if target_key:
1469 target_revlog += b'%s:' % target_key
1470 dbg['target-revlog'] = target_revlog
1471
1557
1472 if self._debug_info is not None:
1558 def _one_dbg_data(self):
1473 self._debug_info.append(dbg)
1559 return {
1560 'duration': None,
1561 'revision': None,
1562 'delta-base': None,
1563 'search_round_count': None,
1564 'using-cached-base': None,
1565 'delta_try_count': None,
1566 'type': None,
1567 'p1-chain-len': None,
1568 'p2-chain-len': None,
1569 'snapshot-depth': None,
1570 'target-revlog': None,
1571 }
1572
1573 def _dbg_process_data(self, dbg):
1574 if self._debug_info is not None:
1575 self._debug_info.append(dbg)
1474
1576
1475 if self._write_debug is not None:
1577 if self._write_debug is not None:
1476 msg = (
1578 msg = (
1477 b"DBG-DELTAS:"
1579 b"DBG-DELTAS:"
1478 b" %-12s"
1580 b" %-12s"
1479 b" rev=%d:"
1581 b" rev=%d:"
1480 b" delta-base=%d"
1582 b" delta-base=%d"
1481 b" is-cached=%d"
1583 b" is-cached=%d"
1482 b" - search-rounds=%d"
1584 b" - search-rounds=%d"
1483 b" try-count=%d"
1585 b" try-count=%d"
1484 b" - delta-type=%-6s"
1586 b" - delta-type=%-6s"
1485 b" snap-depth=%d"
1587 b" snap-depth=%d"
1486 b" - p1-chain-length=%d"
1588 b" - p1-chain-length=%d"
1487 b" p2-chain-length=%d"
1589 b" p2-chain-length=%d"
1488 b" - duration=%f"
1590 b" - duration=%f"
1489 b"\n"
1591 b"\n"
1490 )
1592 )
1491 msg %= (
1593 msg %= (
1492 dbg["target-revlog"],
1594 dbg["target-revlog"],
1493 dbg["revision"],
1595 dbg["revision"],
1494 dbg["delta-base"],
1596 dbg["delta-base"],
1495 dbg["using-cached-base"],
1597 dbg["using-cached-base"],
1496 dbg["search_round_count"],
1598 dbg["search_round_count"],
1497 dbg["delta_try_count"],
1599 dbg["delta_try_count"],
1498 dbg["type"],
1600 dbg["type"],
1499 dbg["snapshot-depth"],
1601 dbg["snapshot-depth"],
1500 dbg["p1-chain-len"],
1602 dbg["p1-chain-len"],
1501 dbg["p2-chain-len"],
1603 dbg["p2-chain-len"],
1502 dbg["duration"],
1604 dbg["duration"],
1503 )
1605 )
1504 self._write_debug(msg)
1606 self._write_debug(msg)
1505 return deltainfo
1506
1607
1507
1608
1508 def delta_compression(default_compression_header, deltainfo):
1609 def delta_compression(default_compression_header, deltainfo):
@@ -50,8 +50,8 b' templatefunc = registrar.templatefunc(fu'
50
50
51 @templatefunc(b'date(date[, fmt])')
51 @templatefunc(b'date(date[, fmt])')
52 def date(context, mapping, args):
52 def date(context, mapping, args):
53 """Format a date. See :hg:`help dates` for formatting
53 """Format a date. The format string uses the Python strftime format.
54 strings. The default is a Unix date format, including the timezone:
54 The default is a Unix date format, including the timezone:
55 "Mon Sep 04 15:13:13 2006 0700"."""
55 "Mon Sep 04 15:13:13 2006 0700"."""
56 if not (1 <= len(args) <= 2):
56 if not (1 <= len(args) <= 2):
57 # i18n: "date" is a keyword
57 # i18n: "date" is a keyword
@@ -290,6 +290,8 b' class transaction(util.transactional):'
290 self._backupjournal = b"%s.backupfiles" % self._journal
290 self._backupjournal = b"%s.backupfiles" % self._journal
291 self._backupsfile = opener.open(self._backupjournal, b'w')
291 self._backupsfile = opener.open(self._backupjournal, b'w')
292 self._backupsfile.write(b'%d\n' % version)
292 self._backupsfile.write(b'%d\n' % version)
293 # the set of temporary files
294 self._tmp_files = set()
293
295
294 if createmode is not None:
296 if createmode is not None:
295 opener.chmod(self._journal, createmode & 0o666)
297 opener.chmod(self._journal, createmode & 0o666)
@@ -354,6 +356,7 b' class transaction(util.transactional):'
354 file in self._newfiles
356 file in self._newfiles
355 or file in self._offsetmap
357 or file in self._offsetmap
356 or file in self._backupmap
358 or file in self._backupmap
359 or file in self._tmp_files
357 ):
360 ):
358 return
361 return
359 if self._queue:
362 if self._queue:
@@ -368,6 +371,7 b' class transaction(util.transactional):'
368 file in self._newfiles
371 file in self._newfiles
369 or file in self._offsetmap
372 or file in self._offsetmap
370 or file in self._backupmap
373 or file in self._backupmap
374 or file in self._tmp_files
371 ):
375 ):
372 return
376 return
373 if offset:
377 if offset:
@@ -439,6 +443,7 b' class transaction(util.transactional):'
439 Such files will be deleted when the transaction exits (on both
443 Such files will be deleted when the transaction exits (on both
440 failure and success).
444 failure and success).
441 """
445 """
446 self._tmp_files.add(tmpfile)
442 self._addbackupentry((location, b'', tmpfile, False))
447 self._addbackupentry((location, b'', tmpfile, False))
443
448
444 @active
449 @active
@@ -437,7 +437,7 b' def find_pullbundle(repo, proto, opts, c'
437 if not manifest:
437 if not manifest:
438 return None
438 return None
439 res = bundlecaches.parseclonebundlesmanifest(repo, manifest)
439 res = bundlecaches.parseclonebundlesmanifest(repo, manifest)
440 res = bundlecaches.filterclonebundleentries(repo, res)
440 res = bundlecaches.filterclonebundleentries(repo, res, pullbundles=True)
441 if not res:
441 if not res:
442 return None
442 return None
443 cl = repo.unfiltered().changelog
443 cl = repo.unfiltered().changelog
@@ -1,3 +1,14 b''
1 = Mercurial 6.4.4 =
2
3 * clonebundles: filter out invalid schemes instead of failing on them
4 * doc: format argument for date uses strftime format string (issue6818)
5 * test: make test-contrib-perf.t more robust
6 * revlog: fix a bug in revlog splitting
7 * bundles: clarify streaming v2 bundle usage
8 * delta-find: fix pulled-delta-reuse-policy=forced behavior
9 * dirstate: fall back to v1 if reading v2 failed
10 * revlog: avoid possible collision between directory and temporary index
11
1 = Mercurial 6.4.3 =
12 = Mercurial 6.4.3 =
2
13
3 * chg: declare environ (issue6812)
14 * chg: declare environ (issue6812)
@@ -232,7 +232,17 b' impl Repo {'
232 try_with_lock_no_wait(self.hg_vfs(), "wlock", f)
232 try_with_lock_no_wait(self.hg_vfs(), "wlock", f)
233 }
233 }
234
234
235 pub fn has_dirstate_v2(&self) -> bool {
235 /// Whether this repo should use dirstate-v2.
236 /// The presence of `dirstate-v2` in the requirements does not mean that
237 /// the on-disk dirstate is necessarily in version 2. In most cases,
238 /// a dirstate-v2 file will indeed be found, but in rare cases (like the
239 /// upgrade mechanism being cut short), the on-disk version will be a
240 /// v1 file.
241 /// Semantically, having a requirement only means that a client cannot
242 /// properly understand or properly update the repo if it lacks the support
243 /// for the required feature, but not that that feature is actually used
244 /// in all occasions.
245 pub fn use_dirstate_v2(&self) -> bool {
236 self.requirements
246 self.requirements
237 .contains(requirements::DIRSTATE_V2_REQUIREMENT)
247 .contains(requirements::DIRSTATE_V2_REQUIREMENT)
238 }
248 }
@@ -277,10 +287,21 b' impl Repo {'
277 let dirstate = self.dirstate_file_contents()?;
287 let dirstate = self.dirstate_file_contents()?;
278 let parents = if dirstate.is_empty() {
288 let parents = if dirstate.is_empty() {
279 DirstateParents::NULL
289 DirstateParents::NULL
280 } else if self.has_dirstate_v2() {
290 } else if self.use_dirstate_v2() {
281 let docket =
291 let docket_res =
282 crate::dirstate_tree::on_disk::read_docket(&dirstate)?;
292 crate::dirstate_tree::on_disk::read_docket(&dirstate);
283 docket.parents()
293 match docket_res {
294 Ok(docket) => docket.parents(),
295 Err(_) => {
296 log::info!(
297 "Parsing dirstate docket failed, \
298 falling back to dirstate-v1"
299 );
300 *crate::dirstate::parsers::parse_dirstate_parents(
301 &dirstate,
302 )?
303 }
304 }
284 } else {
305 } else {
285 *crate::dirstate::parsers::parse_dirstate_parents(&dirstate)?
306 *crate::dirstate::parsers::parse_dirstate_parents(&dirstate)?
286 };
307 };
@@ -296,7 +317,7 b' impl Repo {'
296 &self,
317 &self,
297 ) -> Result<DirstateMapIdentity, HgError> {
318 ) -> Result<DirstateMapIdentity, HgError> {
298 assert!(
319 assert!(
299 self.has_dirstate_v2(),
320 self.use_dirstate_v2(),
300 "accessing dirstate data file ID without dirstate-v2"
321 "accessing dirstate data file ID without dirstate-v2"
301 );
322 );
302 // Get the identity before the contents since we could have a race
323 // Get the identity before the contents since we could have a race
@@ -308,15 +329,35 b' impl Repo {'
308 self.dirstate_parents.set(DirstateParents::NULL);
329 self.dirstate_parents.set(DirstateParents::NULL);
309 Ok((identity, None, 0))
330 Ok((identity, None, 0))
310 } else {
331 } else {
311 let docket =
332 let docket_res =
312 crate::dirstate_tree::on_disk::read_docket(&dirstate)?;
333 crate::dirstate_tree::on_disk::read_docket(&dirstate);
313 self.dirstate_parents.set(docket.parents());
334 match docket_res {
314 Ok((identity, Some(docket.uuid.to_owned()), docket.data_size()))
335 Ok(docket) => {
336 self.dirstate_parents.set(docket.parents());
337 Ok((
338 identity,
339 Some(docket.uuid.to_owned()),
340 docket.data_size(),
341 ))
342 }
343 Err(_) => {
344 log::info!(
345 "Parsing dirstate docket failed, \
346 falling back to dirstate-v1"
347 );
348 let parents =
349 *crate::dirstate::parsers::parse_dirstate_parents(
350 &dirstate,
351 )?;
352 self.dirstate_parents.set(parents);
353 Ok((identity, None, 0))
354 }
355 }
315 }
356 }
316 }
357 }
317
358
318 fn new_dirstate_map(&self) -> Result<OwningDirstateMap, DirstateError> {
359 fn new_dirstate_map(&self) -> Result<OwningDirstateMap, DirstateError> {
319 if self.has_dirstate_v2() {
360 if self.use_dirstate_v2() {
320 // The v2 dirstate is split into a docket and a data file.
361 // The v2 dirstate is split into a docket and a data file.
321 // Since we don't always take the `wlock` to read it
362 // Since we don't always take the `wlock` to read it
322 // (like in `hg status`), it is susceptible to races.
363 // (like in `hg status`), it is susceptible to races.
@@ -343,7 +384,13 b' impl Repo {'
343 );
384 );
344 continue;
385 continue;
345 }
386 }
346 _ => return Err(e),
387 _ => {
388 log::info!(
389 "Reading dirstate v2 failed, \
390 falling back to v1"
391 );
392 return self.new_dirstate_map_v1();
393 }
347 },
394 },
348 }
395 }
349 }
396 }
@@ -354,23 +401,22 b' impl Repo {'
354 );
401 );
355 Err(DirstateError::Common(error))
402 Err(DirstateError::Common(error))
356 } else {
403 } else {
357 debug_wait_for_file_or_print(
404 self.new_dirstate_map_v1()
358 self.config(),
405 }
359 "dirstate.pre-read-file",
406 }
360 );
407
361 let identity = self.dirstate_identity()?;
408 fn new_dirstate_map_v1(&self) -> Result<OwningDirstateMap, DirstateError> {
362 let dirstate_file_contents = self.dirstate_file_contents()?;
409 debug_wait_for_file_or_print(self.config(), "dirstate.pre-read-file");
363 if dirstate_file_contents.is_empty() {
410 let identity = self.dirstate_identity()?;
364 self.dirstate_parents.set(DirstateParents::NULL);
411 let dirstate_file_contents = self.dirstate_file_contents()?;
365 Ok(OwningDirstateMap::new_empty(Vec::new()))
412 if dirstate_file_contents.is_empty() {
366 } else {
413 self.dirstate_parents.set(DirstateParents::NULL);
367 let (map, parents) = OwningDirstateMap::new_v1(
414 Ok(OwningDirstateMap::new_empty(Vec::new()))
368 dirstate_file_contents,
415 } else {
369 identity,
416 let (map, parents) =
370 )?;
417 OwningDirstateMap::new_v1(dirstate_file_contents, identity)?;
371 self.dirstate_parents.set(parents);
418 self.dirstate_parents.set(parents);
372 Ok(map)
419 Ok(map)
373 }
374 }
420 }
375 }
421 }
376
422
@@ -550,7 +596,7 b' impl Repo {'
550 // TODO: Maintain a `DirstateMap::dirty` flag, and return early here if
596 // TODO: Maintain a `DirstateMap::dirty` flag, and return early here if
551 // it’s unset
597 // it’s unset
552 let parents = self.dirstate_parents()?;
598 let parents = self.dirstate_parents()?;
553 let (packed_dirstate, old_uuid_to_remove) = if self.has_dirstate_v2() {
599 let (packed_dirstate, old_uuid_to_remove) = if self.use_dirstate_v2() {
554 let (identity, uuid, data_size) =
600 let (identity, uuid, data_size) =
555 self.get_dirstate_data_file_integrity()?;
601 self.get_dirstate_data_file_integrity()?;
556 let identity_changed = identity != map.old_identity();
602 let identity_changed = identity != map.old_identity();
@@ -59,6 +59,20 b' Manifest file with invalid URL aborts'
59 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
59 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
60 [255]
60 [255]
61
61
62 Manifest file with URL with unknown scheme skips the URL
63 $ echo 'weirdscheme://does.not.exist/bundle.hg' > server/.hg/clonebundles.manifest
64 $ hg clone http://localhost:$HGPORT unknown-scheme
65 no compatible clone bundles available on server; falling back to regular clone
66 (you may want to report this to the server operator)
67 requesting all changes
68 adding changesets
69 adding manifests
70 adding file changes
71 added 2 changesets with 2 changes to 2 files
72 new changesets 53245c60e682:aaff8d2ffbbf
73 updating to branch default
74 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
75
62 Server is not running aborts
76 Server is not running aborts
63
77
64 $ echo "http://localhost:$HGPORT1/bundle.hg" > server/.hg/clonebundles.manifest
78 $ echo "http://localhost:$HGPORT1/bundle.hg" > server/.hg/clonebundles.manifest
@@ -304,20 +304,20 b' Simple single entry'
304
304
305 Multiple entries
305 Multiple entries
306
306
307 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
307 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-50'
308 ! wall * comb * user * sys * (best of 5) (glob)
308 ! wall * comb * user * sys * (best of 50) (glob)
309
309
310 error case are ignored
310 error case are ignored
311
311
312 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
312 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-50'
313 malformatted run limit entry, missing "-": 500
313 malformatted run limit entry, missing "-": 500
314 ! wall * comb * user * sys * (best of 5) (glob)
314 ! wall * comb * user * sys * (best of 50) (glob)
315 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
315 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-120, 0.000000001-50'
316 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12
316 malformatted run limit entry, could not convert string to float: 'aaa': aaa-120
317 ! wall * comb * user * sys * (best of 5) (glob)
317 ! wall * comb * user * sys * (best of 50) (glob)
318 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
318 $ hg perfparents --config perf.stub=no --config perf.run-limits='120-aaaaaa, 0.000000001-50'
319 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
319 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 120-aaaaaa
320 ! wall * comb * user * sys * (best of 5) (glob)
320 ! wall * comb * user * sys * (best of 50) (glob)
321
321
322 test actual output
322 test actual output
323 ------------------
323 ------------------
@@ -329,5 +329,34 b' more subtle to test this behavior.'
329 DBG-DELTAS: CHANGELOG: * (glob)
329 DBG-DELTAS: CHANGELOG: * (glob)
330 DBG-DELTAS: MANIFESTLOG: * (glob)
330 DBG-DELTAS: MANIFESTLOG: * (glob)
331 DBG-DELTAS: MANIFESTLOG: * (glob)
331 DBG-DELTAS: MANIFESTLOG: * (glob)
332 DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=2 * (glob)
332 DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=2 is-cached=1 *search-rounds=0 try-count=0* (glob)
333 DBG-DELTAS: FILELOG:my-file.txt: rev=4: delta-base=3 * (glob)
333 DBG-DELTAS: FILELOG:my-file.txt: rev=4: delta-base=3 is-cached=1 *search-rounds=0 try-count=0* (glob)
334
335 Check that running "forced" on a non-general delta repository does not corrupt it
336 ---------------------------------------------------------------------------------
337
338 Even if requested to be used, some of the delta in the revlog cannot be stored on a non-general delta repository. We check that the bundle application was correct.
339
340 $ hg init \
341 > --config format.usegeneraldelta=no \
342 > --config format.sparse-revlog=no \
343 > local-forced-full-p1-no-gd
344 $ hg debugformat -R local-forced-full-p1-no-gd | grep generaldelta
345 generaldelta: no
346 $ hg -R local-forced-full-p1-no-gd pull --quiet local-pre-pull-full \
347 > --config debug.revlog.debug-delta=no
348 $ hg -R local-forced-full-p1-no-gd pull --quiet \
349 > --config 'paths.*:pulled-delta-reuse-policy=forced' all-p1.hg
350 DBG-DELTAS: CHANGELOG: * (glob)
351 DBG-DELTAS: CHANGELOG: * (glob)
352 DBG-DELTAS: MANIFESTLOG: * (glob)
353 DBG-DELTAS: MANIFESTLOG: * (glob)
354 DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=0 * - search-rounds=1 try-count=1 * (glob)
355 DBG-DELTAS: FILELOG:my-file.txt: rev=4: delta-base=4 * - search-rounds=1 try-count=1 * (glob)
356 $ hg -R local-forced-full-p1-no-gd verify
357 checking changesets
358 checking manifests
359 crosschecking files in changesets and manifests
360 checking files
361 checking dirstate
362 checked 5 changesets with 5 changes to 1 files
@@ -84,6 +84,8 b' setup a repository for tests'
84 > Directory_With,Special%Char/Complex_File.babar
84 > Directory_With,Special%Char/Complex_File.babar
85 > foo/bar/babar_celeste/foo
85 > foo/bar/babar_celeste/foo
86 > 1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/f
86 > 1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/f
87 > some_dir/sub_dir/foo_bar
88 > some_dir/sub_dir/foo_bar.i.s/tutu
87 > "
89 > "
88 $ for f in $files; do
90 $ for f in $files; do
89 > mkdir -p `dirname $f`
91 > mkdir -p `dirname $f`
@@ -104,13 +106,17 b' setup a repository for tests'
104 > dd if=/dev/zero of=$f bs=1k count=128 > /dev/null 2>&1
106 > dd if=/dev/zero of=$f bs=1k count=128 > /dev/null 2>&1
105 > done
107 > done
106 $ hg commit -AqmD --traceback
108 $ hg commit -AqmD --traceback
109 $ for f in $files; do
110 > dd if=/dev/zero of=$f bs=1k count=132 > /dev/null 2>&1
111 > done
112 $ hg commit -AqmD --traceback
107
113
108 Reference size:
114 Reference size:
109 $ f -s file
115 $ f -s file
110 file: size=131072
116 file: size=135168
111 $ f -s .hg/store/data/file*
117 $ f -s .hg/store/data*/file*
112 .hg/store/data/file.d: size=132139
118 .hg/store/data/file.d: size=267307
113 .hg/store/data/file.i: size=256
119 .hg/store/data/file.i: size=320
114
120
115 $ cd ..
121 $ cd ..
116
122
@@ -134,16 +140,16 b' Reference size:'
134 adding changesets
140 adding changesets
135 adding manifests
141 adding manifests
136 adding file changes
142 adding file changes
137 added 2 changesets with 8 changes to 4 files
143 added 3 changesets with 18 changes to 6 files
138 new changesets 16a630ece54e:8437c461d70a
144 new changesets c99a94cae9b1:64874a3b0160
139 (run 'hg update' to get a working copy)
145 (run 'hg update' to get a working copy)
140
146
141
147
142 The inline revlog has been replaced
148 The inline revlog has been replaced
143
149
144 $ f -s .hg/store/data/file*
150 $ f -s .hg/store/data/file*
145 .hg/store/data/file.d: size=132139
151 .hg/store/data/file.d: size=267307
146 .hg/store/data/file.i: size=256
152 .hg/store/data/file.i: size=320
147
153
148
154
149 $ hg verify -q
155 $ hg verify -q
@@ -171,7 +177,7 b' but truncate the index and the data to r'
171 Reference size:
177 Reference size:
172 $ f -s file
178 $ f -s file
173 file: size=1024
179 file: size=1024
174 $ f -s .hg/store/data/file*
180 $ f -s .hg/store/data*/file*
175 .hg/store/data/file.i: size=1174
181 .hg/store/data/file.i: size=1174
176
182
177 $ cat > .hg/hgrc <<EOF
183 $ cat > .hg/hgrc <<EOF
@@ -192,10 +198,13 b' Reference size:'
192
198
193 The inline revlog still exist, but a split version exist next to it
199 The inline revlog still exist, but a split version exist next to it
194
200
195 $ f -s .hg/store/data/file*
201 $ cat .hg/store/journal | tr '\0' ' ' | grep '\.s'
196 .hg/store/data/file.d: size=132139
202 data/some_dir/sub_dir/foo_bar.i.s/tutu.i 1174
203 data/some_dir/sub_dir/foo_bar.i.s/tutu.d 0
204 $ f -s .hg/store/data*/file*
205 .hg/store/data-s/file: size=320
206 .hg/store/data/file.d: size=267307
197 .hg/store/data/file.i: size=132395
207 .hg/store/data/file.i: size=132395
198 .hg/store/data/file.i.s: size=256
199
208
200
209
201 The first file.i entry should match the "Reference size" above.
210 The first file.i entry should match the "Reference size" above.
@@ -206,19 +215,19 b' A "temporary file" entry exist for the s'
206 $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
215 $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
207 data/file.i 1174
216 data/file.i 1174
208 data/file.d 0
217 data/file.d 0
209 $ cat .hg/store/journal.backupfiles | tr -s '\000' ' ' | tr -s '\00' ' '| grep data/file
218 $ cat .hg/store/journal.backupfiles | tr -s '\000' ' ' | tr -s '\00' ' '| grep 'data.*/file'
210 data/file.i data/journal.backup.file.i.bck 0
219 data/file.i data/journal.backup.file.i.bck 0
211 data/file.i.s 0
220 data-s/file 0
212
221
213 recover is rolling the split back, the fncache is still valid
222 recover is rolling the split back, the fncache is still valid
214
223
215 $ hg recover
224 $ hg recover
216 rolling back interrupted transaction
225 rolling back interrupted transaction
217 (verify step skipped, run `hg verify` to check your repository content)
226 (verify step skipped, run `hg verify` to check your repository content)
218 $ f -s .hg/store/data/file*
227 $ f -s .hg/store/data*/file*
219 .hg/store/data/file.i: size=1174
228 .hg/store/data/file.i: size=1174
220 $ hg tip
229 $ hg tip
221 changeset: 1:cc8dfb126534
230 changeset: 1:64b04c8dc267
222 tag: tip
231 tag: tip
223 user: test
232 user: test
224 date: Thu Jan 01 00:00:00 1970 +0000
233 date: Thu Jan 01 00:00:00 1970 +0000
@@ -243,7 +252,7 b' where the data file is left as garbage.'
243 Reference size:
252 Reference size:
244 $ f -s file
253 $ f -s file
245 file: size=1024
254 file: size=1024
246 $ f -s .hg/store/data/file*
255 $ f -s .hg/store/data*/file*
247 .hg/store/data/file.i: size=1174
256 .hg/store/data/file.i: size=1174
248
257
249 $ cat > .hg/hgrc <<EOF
258 $ cat > .hg/hgrc <<EOF
@@ -271,12 +280,12 b' Reference size:'
271
280
272 The inline revlog still exist, but a split version exist next to it
281 The inline revlog still exist, but a split version exist next to it
273
282
274 $ f -s .hg/store/data/file*
283 $ f -s .hg/store/data*/file*
275 .hg/store/data/file.d: size=132139
284 .hg/store/data-s/file: size=320
285 .hg/store/data/file.d: size=267307
276 .hg/store/data/file.i: size=132395
286 .hg/store/data/file.i: size=132395
277 .hg/store/data/file.i.s: size=256
278
287
279 $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
288 $ cat .hg/store/journal | tr -s '\000' ' ' | grep 'data.*/file'
280 data/file.i 1174
289 data/file.i 1174
281 data/file.d 0
290 data/file.d 0
282
291
@@ -285,10 +294,10 b' recover is rolling the split back, the f'
285 $ hg recover
294 $ hg recover
286 rolling back interrupted transaction
295 rolling back interrupted transaction
287 (verify step skipped, run `hg verify` to check your repository content)
296 (verify step skipped, run `hg verify` to check your repository content)
288 $ f -s .hg/store/data/file*
297 $ f -s .hg/store/data*/file*
289 .hg/store/data/file.i: size=1174
298 .hg/store/data/file.i: size=1174
290 $ hg tip
299 $ hg tip
291 changeset: 1:cc8dfb126534
300 changeset: 1:64b04c8dc267
292 tag: tip
301 tag: tip
293 user: test
302 user: test
294 date: Thu Jan 01 00:00:00 1970 +0000
303 date: Thu Jan 01 00:00:00 1970 +0000
@@ -308,7 +317,7 b' Now retry the procedure but intercept th'
308 Reference size:
317 Reference size:
309 $ f -s file
318 $ f -s file
310 file: size=1024
319 file: size=1024
311 $ f -s .hg/store/data/file*
320 $ f -s .hg/store/data*/file*
312 .hg/store/data/file.i: size=1174
321 .hg/store/data/file.i: size=1174
313
322
314 $ cat > .hg/hgrc <<EOF
323 $ cat > .hg/hgrc <<EOF
@@ -336,11 +345,11 b' Reference size:'
336
345
337 The inline revlog was over written on disk
346 The inline revlog was over written on disk
338
347
339 $ f -s .hg/store/data/file*
348 $ f -s .hg/store/data*/file*
340 .hg/store/data/file.d: size=132139
349 .hg/store/data/file.d: size=267307
341 .hg/store/data/file.i: size=256
350 .hg/store/data/file.i: size=320
342
351
343 $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
352 $ cat .hg/store/journal | tr -s '\000' ' ' | grep 'data.*/file'
344 data/file.i 1174
353 data/file.i 1174
345 data/file.d 0
354 data/file.d 0
346
355
@@ -349,10 +358,10 b' recover is rolling the split back, the f'
349 $ hg recover
358 $ hg recover
350 rolling back interrupted transaction
359 rolling back interrupted transaction
351 (verify step skipped, run `hg verify` to check your repository content)
360 (verify step skipped, run `hg verify` to check your repository content)
352 $ f -s .hg/store/data/file*
361 $ f -s .hg/store/data*/file*
353 .hg/store/data/file.i: size=1174
362 .hg/store/data/file.i: size=1174
354 $ hg tip
363 $ hg tip
355 changeset: 1:cc8dfb126534
364 changeset: 1:64b04c8dc267
356 tag: tip
365 tag: tip
357 user: test
366 user: test
358 date: Thu Jan 01 00:00:00 1970 +0000
367 date: Thu Jan 01 00:00:00 1970 +0000
@@ -386,13 +395,13 b' Repeat the original test but let hg roll'
386
395
387 The split was rollback
396 The split was rollback
388
397
389 $ f -s .hg/store/data/file*
398 $ f -s .hg/store/data*/file*
390 .hg/store/data/file.d: size=0
399 .hg/store/data/file.d: size=0
391 .hg/store/data/file.i: size=1174
400 .hg/store/data/file.i: size=1174
392
401
393
402
394 $ hg tip
403 $ hg tip
395 changeset: 1:cc8dfb126534
404 changeset: 1:64b04c8dc267
396 tag: tip
405 tag: tip
397 user: test
406 user: test
398 date: Thu Jan 01 00:00:00 1970 +0000
407 date: Thu Jan 01 00:00:00 1970 +0000
@@ -472,7 +481,7 b' We checks that hooks properly see the in'
472 adding changesets
481 adding changesets
473 adding manifests
482 adding manifests
474 adding file changes
483 adding file changes
475 size=131072
484 size=135168
476 transaction abort!
485 transaction abort!
477 rollback completed
486 rollback completed
478 abort: pretxnclose.03-abort hook exited with status 1
487 abort: pretxnclose.03-abort hook exited with status 1
@@ -839,6 +839,7 b' store directory has files we expect'
839 00changelog.i
839 00changelog.i
840 00manifest.i
840 00manifest.i
841 data
841 data
842 data-s
842 fncache
843 fncache
843 phaseroots
844 phaseroots
844 requires
845 requires
@@ -862,6 +863,7 b' old store should be backed up'
862 00changelog.i
863 00changelog.i
863 00manifest.i
864 00manifest.i
864 data
865 data
866 data-s
865 fncache
867 fncache
866 phaseroots
868 phaseroots
867 requires
869 requires
General Comments 0
You need to be logged in to leave comments. Login now