##// END OF EJS Templates
branching: merge with stable
marmoute -
r51579:a41eeb87 merge default
parent child Browse files
Show More
@@ -0,0 +1,51 b''
1 $ cat >> $HGRCPATH << EOF
2 > [storage]
3 > dirstate-v2.slow-path=allow
4 > [format]
5 > use-dirstate-v2=no
6 > EOF
7
8 Set up a v1 repo
9
10 $ hg init repo
11 $ cd repo
12 $ echo a > a
13 $ hg add a
14 $ hg commit -m a
15 $ hg debugrequires | grep dirstate
16 [1]
17 $ ls -1 .hg/dirstate*
18 .hg/dirstate
19
20 Copy v1 dirstate
21 $ cp .hg/dirstate $TESTTMP/dirstate-v1-backup
22
23 Upgrade it to v2
24
25 $ hg debugupgraderepo -q --config format.use-dirstate-v2=1 --run | egrep 'added:|removed:'
26 added: dirstate-v2
27 $ hg debugrequires | grep dirstate
28 dirstate-v2
29 $ ls -1 .hg/dirstate*
30 .hg/dirstate
31 .hg/dirstate.* (glob)
32
33 Manually reset to dirstate v1 to simulate an incomplete dirstate-v2 upgrade
34
35 $ rm .hg/dirstate*
36 $ cp $TESTTMP/dirstate-v1-backup .hg/dirstate
37
38 There should be no errors, but a v2 dirstate should be written back to disk
39 $ hg st
40 $ ls -1 .hg/dirstate*
41 .hg/dirstate
42 .hg/dirstate.* (glob)
43
44 Corrupt the dirstate to see how the errors show up to the user
45 $ echo "I ate your data" > .hg/dirstate
46
47 $ hg st
48 abort: working directory state appears damaged! (no-rhg !)
49 (falling back to dirstate-v1 from v2 also failed) (no-rhg !)
50 abort: Too little data for dirstate: 16 bytes. (rhg !)
51 [255]
@@ -244,3 +244,4 b' f14864fffdcab725d9eac6d4f4c07be05a35f59a'
244 244 83ea6ce48b4fd09fb79c4e34cc5750c805699a53 0 iQHNBAABCgA3FiEEH2b4zfZU6QXBHaBhoR4BzQ4F2VYFAmQ3860ZHGFscGhhcmVAcmFwaGFlbGdvbWVzLmRldgAKCRChHgHNDgXZVk3gDACIIcQxKfis/r5UNj7SqyFhQxUCo8Njp7zdLFv3CSWFdFiOpQONI7Byt9KjwedUkUK9tqdb03V7W32ZSBTrNLM11uHY9E5Aknjoza4m+aIGbamEVRWIIHXjUZEMKS9QcY8ElbDvvPu/xdZjyTEjNNiuByUpPUcJXVzpKrHm8Wy3GWDliYBuu68mzFIX3JnZKscdK4EjCAfDysSwwfLeBMpd0Rk+SgwjDwyPWAAyU3yDPNmlUn8qTGHjXxU3vsHCXpoJWkfKmQ9n++23WEpM9vC8zx2TIy70+gFUvKG77+Ucv+djQxHRv0L6L5qUSBJukD3R3nml1xu6pUeioBHepRmTUWgPbHa/gQ+J2Pw+rPCK51x0EeT0SJjxUR2mmMLbk8N2efM35lEjF/sNxotTq17Sv9bjwXhue6BURxpQDEyOuSaS0IlF56ndXtE/4FX3H6zgU1+3jw5iBWajr1E04QjPlSOJO7nIKYM9Jq3VpHR7MiFwfT46pJEfw9pNgZX2b8o=
245 245 f952be90b0514a576dcc8bbe758ce3847faba9bb 0 iQHNBAABCgA3FiEEH2b4zfZU6QXBHaBhoR4BzQ4F2VYFAmQ+ZaoZHGFscGhhcmVAcmFwaGFlbGdvbWVzLmRldgAKCRChHgHNDgXZVuDOC/90SQ3UjXmByAaT5qr4bd3sVGt12lXlaKdyDxY0JMSKyHMUnb4YltHzNFxiUku10aRsRvJt5denTGeaOvAYbbXE7nbZJuyLD9rvfFTCe6EVx7kymCBwSbobKMzD79QHAFU7xu036gs7rmwyc++F4JF4IOrT4bjSYY5/8g0uLAHUexnn49QfQ5OYr325qShDFLjUZ7aH0yxA/gEr2MfXQmbIEc0eJJQXD1EhDkpSJFNIKzwWMOT1AhFk8kTlDqqbPnW7sDxTW+v/gGjAFYLHi8GMLEyrBQdEqytN7Pl9XOPXt/8RaDfIzYfl0OHxh2l1Y1MuH/PHrWO4PBPsr82QI2mxufYKuujpFMPr4PxXXl2g31OKhI8jJj+bHr62kGIOJCxZ8EPPGKXPGyoOuIVa0MeHmXxjb9kkj0SALjlaUvZrSENzRTsQXDNHQa+iDaITKLmItvLsaTEz9DJzGmI20shtJYcx4lqHsTgtMZfOtR5tmUknAFUUBZfUwvwULD4LmNI=
246 246 fc445f8abcf90b33db7c463816a1b3560681767f 0 iQHNBAABCgA3FiEEH2b4zfZU6QXBHaBhoR4BzQ4F2VYFAmRTok8ZHGFscGhhcmVAcmFwaGFlbGdvbWVzLmRldgAKCRChHgHNDgXZVpZ5DACBv33k//ovzSbyH5/q+Xhk3TqNRY8IDOjoEhvDyu0bJHsvygOGXLUtHpQPth1RA4/c+AVNJrUeFvT02sLqqP2d9oSA9HEAYpOuzwgr1A+1o+Q2GyfD4cElP6KfiEe8oyFVOB0rfBgWNei1C0nnrhChQr5dOPR63uAFhHzkEsgsTFS7ONxZ1DHbe7gRV8OMMf1MatAtRzRexQJCqyNv7WodQdrKtjHqPKtlWl20dbwTHhzeiZbtjiTe0CVXVsOqnA1DQkO/IaiKQrn3zWdGY5ABbqQ1K0ceLcej4NFOeLo9ZrShndU3BuFUa9Dq9bnPYOI9wMqGoDh/GdTZkZEzBy5PTokY3AJHblbub49pi8YTenFcPdtd/v71AaNi3TKa45ZNhYVkPmRETYweHkLs3CIrSyeiBwU4RGuQZVD/GujAQB5yhk0w+LPMzBsHruD4vsgXwIraCzQIIJTjgyxKuAJGdGNUFYyxEpUkgz5G6MFrBKe8HO69y3Pm/qDNZ2maV8k=
247 da372c745e0f053bb7a64e74cccd15810d96341d 0 iQHNBAABCgA3FiEEH2b4zfZU6QXBHaBhoR4BzQ4F2VYFAmSB7WkZHGFscGhhcmVAcmFwaGFlbGdvbWVzLmRldgAKCRChHgHNDgXZVoy+C/4zwO+Wxc3wr0aEzjVqAss7FuGS5e66H+0T3WzVgKIRMqiiOmUmmiNf+XloXlX4TOwoh9j9GNEpoZfV6TSwFSqV0LALaVIRRwrkJBDhnqw4eNBZbK5aBWNa2/21dkHecxF4KG3ai9kLwy2mtHxkDIy8T2LPvdx8pfNcYT4PZ19x2itqZLouBJqiZYehsqeMLNF2vRqkq+rQ+D2sFGLljgPo0JlpkOZ4IL7S/cqTOBG1sQ6KJK+hAE1kF1lhvK796VhKKXVnWVgqJLyg7ZI6168gxeFv5cyCtb+FUXJJ/5SOkxaCKJf3mg3DIYi3G7xjwB5CfUGW8A2qexgEjXeV42Mu7/Mkmn/aeTdL0UcRK3oBVHJwqt/fJlGFqVWt4/9g9KW5mJvTDQYBo/zjLyvKFEbnSLzhEP+9SvthCrtX0UYkKxOGi2M2Z7e9wgBB0gY8a36kA739lkNu6r3vH/FVh0aPTMWukLToELS90WgfViNr16lDnCeDjMgg97OKxWdOW6U=
@@ -260,3 +260,4 b' f14864fffdcab725d9eac6d4f4c07be05a35f59a'
260 260 83ea6ce48b4fd09fb79c4e34cc5750c805699a53 6.4.1
261 261 f952be90b0514a576dcc8bbe758ce3847faba9bb 6.4.2
262 262 fc445f8abcf90b33db7c463816a1b3560681767f 6.4.3
263 da372c745e0f053bb7a64e74cccd15810d96341d 6.4.4
@@ -79,8 +79,10 b' for most needs.'
79 79 Bundle files can be generated with the :hg:`bundle` command. Typically
80 80 :hg:`bundle --all` is used to produce a bundle of the entire repository.
81 81
82 :hg:`debugcreatestreamclonebundle` can be used to produce a special
83 *streaming clonebundle*. These are bundle files that are extremely efficient
82 The bundlespec option `stream` (see :hg:`help bundlespec`)
83 can be used to produce a special *streaming clonebundle*, typically using
84 :hg:`bundle --all --type="none-streamv2"`.
85 These are bundle files that are extremely efficient
84 86 to produce and consume (read: fast). However, they are larger than
85 87 traditional bundle formats and require that clients support the exact set
86 88 of repository data store formats in use by the repository that created them.
@@ -52,6 +52,14 b' def alter_bundle_url(repo, url):'
52 52 return url
53 53
54 54
55 SUPPORTED_CLONEBUNDLE_SCHEMES = [
56 b"http://",
57 b"https://",
58 b"largefile://",
59 CLONEBUNDLESCHEME,
60 ]
61
62
55 63 @attr.s
56 64 class bundlespec:
57 65 compression = attr.ib()
@@ -384,7 +392,9 b' def isstreamclonespec(bundlespec):'
384 392 return False
385 393
386 394
387 def filterclonebundleentries(repo, entries, streamclonerequested=False):
395 def filterclonebundleentries(
396 repo, entries, streamclonerequested=False, pullbundles=False
397 ):
388 398 """Remove incompatible clone bundle manifest entries.
389 399
390 400 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
@@ -396,6 +406,16 b' def filterclonebundleentries(repo, entri'
396 406 """
397 407 newentries = []
398 408 for entry in entries:
409 url = entry.get(b'URL')
410 if not pullbundles and not any(
411 [url.startswith(scheme) for scheme in SUPPORTED_CLONEBUNDLE_SCHEMES]
412 ):
413 repo.ui.debug(
414 b'filtering %s because not a supported clonebundle scheme\n'
415 % url
416 )
417 continue
418
399 419 spec = entry.get(b'BUNDLESPEC')
400 420 if spec:
401 421 try:
@@ -405,8 +425,7 b' def filterclonebundleentries(repo, entri'
405 425 # entries.
406 426 if streamclonerequested and not isstreamclonespec(bundlespec):
407 427 repo.ui.debug(
408 b'filtering %s because not a stream clone\n'
409 % entry[b'URL']
428 b'filtering %s because not a stream clone\n' % url
410 429 )
411 430 continue
412 431
@@ -416,7 +435,7 b' def filterclonebundleentries(repo, entri'
416 435 except error.UnsupportedBundleSpecification as e:
417 436 repo.ui.debug(
418 437 b'filtering %s because unsupported bundle '
419 b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e))
438 b'spec: %s\n' % (url, stringutil.forcebytestr(e))
420 439 )
421 440 continue
422 441 # If we don't have a spec and requested a stream clone, we don't know
@@ -424,14 +443,12 b' def filterclonebundleentries(repo, entri'
424 443 elif streamclonerequested:
425 444 repo.ui.debug(
426 445 b'filtering %s because cannot determine if a stream '
427 b'clone bundle\n' % entry[b'URL']
446 b'clone bundle\n' % url
428 447 )
429 448 continue
430 449
431 450 if b'REQUIRESNI' in entry and not sslutil.hassni:
432 repo.ui.debug(
433 b'filtering %s because SNI not supported\n' % entry[b'URL']
434 )
451 repo.ui.debug(b'filtering %s because SNI not supported\n' % url)
435 452 continue
436 453
437 454 if b'REQUIREDRAM' in entry:
@@ -439,15 +456,14 b' def filterclonebundleentries(repo, entri'
439 456 requiredram = util.sizetoint(entry[b'REQUIREDRAM'])
440 457 except error.ParseError:
441 458 repo.ui.debug(
442 b'filtering %s due to a bad REQUIREDRAM attribute\n'
443 % entry[b'URL']
459 b'filtering %s due to a bad REQUIREDRAM attribute\n' % url
444 460 )
445 461 continue
446 462 actualram = repo.ui.estimatememory()
447 463 if actualram is not None and actualram * 0.66 < requiredram:
448 464 repo.ui.debug(
449 465 b'filtering %s as it needs more than 2/3 of system memory\n'
450 % entry[b'URL']
466 % url
451 467 )
452 468 continue
453 469
@@ -4,6 +4,7 b''
4 4 # GNU General Public License version 2 or any later version.
5 5
6 6
7 import struct
7 8 from .i18n import _
8 9
9 10 from . import (
@@ -151,9 +152,15 b' class _dirstatemapcommon:'
151 152 b'dirstate only has a docket in v2 format'
152 153 )
153 154 self._set_identity()
154 self._docket = docketmod.DirstateDocket.parse(
155 self._readdirstatefile(), self._nodeconstants
156 )
155 try:
156 self._docket = docketmod.DirstateDocket.parse(
157 self._readdirstatefile(), self._nodeconstants
158 )
159 except struct.error:
160 self._ui.debug(b"failed to read dirstate-v2 data")
161 raise error.CorruptedDirstate(
162 b"failed to read dirstate-v2 data"
163 )
157 164 return self._docket
158 165
159 166 def _read_v2_data(self):
@@ -176,11 +183,23 b' class _dirstatemapcommon:'
176 183 return self._opener.read(self.docket.data_filename())
177 184
178 185 def write_v2_no_append(self, tr, st, meta, packed):
179 old_docket = self.docket
186 try:
187 old_docket = self.docket
188 except error.CorruptedDirstate:
189 # This means we've identified a dirstate-v1 file on-disk when we
190 # were expecting a dirstate-v2 docket. We've managed to recover
191 # from that unexpected situation, and now we want to write back a
192 # dirstate-v2 file to make the on-disk situation right again.
193 #
194 # This shouldn't be triggered since `self.docket` is cached and
195 # we would have called parents() or read() first, but it's here
196 # just in case.
197 old_docket = None
198
180 199 new_docket = docketmod.DirstateDocket.with_new_uuid(
181 200 self.parents(), len(packed), meta
182 201 )
183 if old_docket.uuid == new_docket.uuid:
202 if old_docket is not None and old_docket.uuid == new_docket.uuid:
184 203 raise error.ProgrammingError(b'dirstate docket name collision')
185 204 data_filename = new_docket.data_filename()
186 205 self._opener.write(data_filename, packed)
@@ -194,7 +213,7 b' class _dirstatemapcommon:'
194 213 st.close()
195 214 # Remove the old data file after the new docket pointing to
196 215 # the new data file was written.
197 if old_docket.uuid:
216 if old_docket is not None and old_docket.uuid:
198 217 data_filename = old_docket.data_filename()
199 218 if tr is not None:
200 219 tr.addbackup(data_filename, location=b'plain')
@@ -211,28 +230,40 b' class _dirstatemapcommon:'
211 230 def parents(self):
212 231 if not self._parents:
213 232 if self._use_dirstate_v2:
214 self._parents = self.docket.parents
233 try:
234 self.docket
235 except error.CorruptedDirstate as e:
236 # fall back to dirstate-v1 if we fail to read v2
237 self._v1_parents(e)
238 else:
239 self._parents = self.docket.parents
215 240 else:
216 read_len = self._nodelen * 2
217 st = self._readdirstatefile(read_len)
218 l = len(st)
219 if l == read_len:
220 self._parents = (
221 st[: self._nodelen],
222 st[self._nodelen : 2 * self._nodelen],
223 )
224 elif l == 0:
225 self._parents = (
226 self._nodeconstants.nullid,
227 self._nodeconstants.nullid,
228 )
229 else:
230 raise error.Abort(
231 _(b'working directory state appears damaged!')
232 )
241 self._v1_parents()
233 242
234 243 return self._parents
235 244
245 def _v1_parents(self, from_v2_exception=None):
246 read_len = self._nodelen * 2
247 st = self._readdirstatefile(read_len)
248 l = len(st)
249 if l == read_len:
250 self._parents = (
251 st[: self._nodelen],
252 st[self._nodelen : 2 * self._nodelen],
253 )
254 elif l == 0:
255 self._parents = (
256 self._nodeconstants.nullid,
257 self._nodeconstants.nullid,
258 )
259 else:
260 hint = None
261 if from_v2_exception is not None:
262 hint = _(b"falling back to dirstate-v1 from v2 also failed")
263 raise error.Abort(
264 _(b'working directory state appears damaged!'), hint
265 )
266
236 267
237 268 class dirstatemap(_dirstatemapcommon):
238 269 """Map encapsulating the dirstate's contents.
@@ -330,11 +361,17 b' class dirstatemap(_dirstatemapcommon):'
330 361 def read(self):
331 362 testing.wait_on_cfg(self._ui, b'dirstate.pre-read-file')
332 363 if self._use_dirstate_v2:
333
334 if not self.docket.uuid:
335 return
336 testing.wait_on_cfg(self._ui, b'dirstate.post-docket-read-file')
337 st = self._read_v2_data()
364 try:
365 self.docket
366 except error.CorruptedDirstate:
367 # fall back to dirstate-v1 if we fail to read v2
368 self._set_identity()
369 st = self._readdirstatefile()
370 else:
371 if not self.docket.uuid:
372 return
373 testing.wait_on_cfg(self._ui, b'dirstate.post-docket-read-file')
374 st = self._read_v2_data()
338 375 else:
339 376 self._set_identity()
340 377 st = self._readdirstatefile()
@@ -365,10 +402,17 b' class dirstatemap(_dirstatemapcommon):'
365 402 #
366 403 # (we cannot decorate the function directly since it is in a C module)
367 404 if self._use_dirstate_v2:
368 p = self.docket.parents
369 meta = self.docket.tree_metadata
370 parse_dirstate = util.nogc(v2.parse_dirstate)
371 parse_dirstate(self._map, self.copymap, st, meta)
405 try:
406 self.docket
407 except error.CorruptedDirstate:
408 # fall back to dirstate-v1 if we fail to parse v2
409 parse_dirstate = util.nogc(parsers.parse_dirstate)
410 p = parse_dirstate(self._map, self.copymap, st)
411 else:
412 p = self.docket.parents
413 meta = self.docket.tree_metadata
414 parse_dirstate = util.nogc(v2.parse_dirstate)
415 parse_dirstate(self._map, self.copymap, st, meta)
372 416 else:
373 417 parse_dirstate = util.nogc(parsers.parse_dirstate)
374 418 p = parse_dirstate(self._map, self.copymap, st)
@@ -597,38 +641,37 b' if rustmod is not None:'
597 641
598 642 testing.wait_on_cfg(self._ui, b'dirstate.pre-read-file')
599 643 if self._use_dirstate_v2:
600 self.docket # load the data if needed
601 inode = (
602 self.identity.stat.st_ino
603 if self.identity is not None
604 and self.identity.stat is not None
605 else None
606 )
607 testing.wait_on_cfg(self._ui, b'dirstate.post-docket-read-file')
608 if not self.docket.uuid:
609 data = b''
610 self._map = rustmod.DirstateMap.new_empty()
644 try:
645 self.docket
646 except error.CorruptedDirstate as e:
647 # fall back to dirstate-v1 if we fail to read v2
648 parents = self._v1_map(e)
611 649 else:
612 data = self._read_v2_data()
613 self._map = rustmod.DirstateMap.new_v2(
614 data,
615 self.docket.data_size,
616 self.docket.tree_metadata,
617 self.docket.uuid,
618 inode,
650 parents = self.docket.parents
651 inode = (
652 self.identity.stat.st_ino
653 if self.identity is not None
654 and self.identity.stat is not None
655 else None
656 )
657 testing.wait_on_cfg(
658 self._ui, b'dirstate.post-docket-read-file'
619 659 )
620 parents = self.docket.parents
660 if not self.docket.uuid:
661 data = b''
662 self._map = rustmod.DirstateMap.new_empty()
663 else:
664 data = self._read_v2_data()
665 self._map = rustmod.DirstateMap.new_v2(
666 data,
667 self.docket.data_size,
668 self.docket.tree_metadata,
669 self.docket.uuid,
670 inode,
671 )
672 parents = self.docket.parents
621 673 else:
622 self._set_identity()
623 inode = (
624 self.identity.stat.st_ino
625 if self.identity is not None
626 and self.identity.stat is not None
627 else None
628 )
629 self._map, parents = rustmod.DirstateMap.new_v1(
630 self._readdirstatefile(), inode
631 )
674 parents = self._v1_map()
632 675
633 676 if parents and not self._dirtyparents:
634 677 self.setparents(*parents)
@@ -638,6 +681,23 b' if rustmod is not None:'
638 681 self.get = self._map.get
639 682 return self._map
640 683
684 def _v1_map(self, from_v2_exception=None):
685 self._set_identity()
686 inode = (
687 self.identity.stat.st_ino
688 if self.identity is not None and self.identity.stat is not None
689 else None
690 )
691 try:
692 self._map, parents = rustmod.DirstateMap.new_v1(
693 self._readdirstatefile(), inode
694 )
695 except OSError as e:
696 if from_v2_exception is not None:
697 raise e from from_v2_exception
698 raise
699 return parents
700
641 701 @property
642 702 def copymap(self):
643 703 return self._map.copymap()
@@ -696,9 +756,15 b' if rustmod is not None:'
696 756 self._dirtyparents = False
697 757 return
698 758
759 write_mode = self._write_mode
760 try:
761 docket = self.docket
762 except error.CorruptedDirstate:
763 # fall back to dirstate-v1 if we fail to parse v2
764 docket = None
765
699 766 # We can only append to an existing data file if there is one
700 write_mode = self._write_mode
701 if self.docket.uuid is None:
767 if docket is None or docket.uuid is None:
702 768 write_mode = WRITE_MODE_FORCE_NEW
703 769 packed, meta, append = self._map.write_v2(write_mode)
704 770 if append:
@@ -650,6 +650,13 b' class CorruptedState(Exception):'
650 650 __bytes__ = _tobytes
651 651
652 652
653 class CorruptedDirstate(Exception):
654 """error raised the dirstate appears corrupted on-disk. It may be due to
655 a dirstate version mismatch (i.e. expecting v2 and finding v1 on disk)."""
656
657 __bytes__ = _tobytes
658
659
653 660 class PeerTransportError(Abort):
654 661 """Transport-level I/O error when communicating with a peer repo."""
655 662
@@ -67,6 +67,10 b' The following bundle <compression> engin'
67 67
68 68 .. bundlecompressionmarker
69 69
70 The compression engines can be prepended with ``stream`` to create a streaming bundle.
71 These are bundles that are extremely efficient to produce and consume,
72 but do not have guaranteed compatibility with older clients.
73
70 74 Available Options
71 75 =================
72 76
@@ -89,7 +93,6 b' phases'
89 93 revbranchcache
90 94 Include the "tags-fnodes" cache inside the bundle.
91 95
92
93 96 tagsfnodescache
94 97 Include the "tags-fnodes" cache inside the bundle.
95 98
@@ -109,3 +112,10 b' Examples'
109 112
110 113 ``zstd-v1``
111 114 This errors because ``zstd`` is not supported for ``v1`` types.
115
116 ``none-streamv2``
117 Produce a ``v2`` streaming bundle with no compression.
118
119 ``zstd-v2;obsolescence=true;phases=true``
120 Produce a ``v2`` bundle with zstandard compression which includes
121 obsolescence markers and phases.
@@ -615,8 +615,8 b' class revlog:'
615 615 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
616 616 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
617 617 entry_point = b'%s.i.a' % self.radix
618 elif self._try_split and self.opener.exists(b'%s.i.s' % self.radix):
619 entry_point = b'%s.i.s' % self.radix
618 elif self._try_split and self.opener.exists(self._split_index_file):
619 entry_point = self._split_index_file
620 620 else:
621 621 entry_point = b'%s.i' % self.radix
622 622
@@ -2125,6 +2125,22 b' class revlog:'
2125 2125 raise error.CensoredNodeError(self.display_id, node, text)
2126 2126 raise
2127 2127
2128 @property
2129 def _split_index_file(self):
2130 """the path where to expect the index of an ongoing splitting operation
2131
2132 The file will only exist if a splitting operation is in progress, but
2133 it is always expected at the same location."""
2134 parts = os.path.split(self.radix)
2135 if len(parts) > 1:
2136 # adds a '-s' prefix to the ``data/` or `meta/` base
2137 head = parts[0] + b'-s'
2138 return os.path.join(head, *parts[1:])
2139 else:
2140 # the revlog is stored at the root of the store (changelog or
2141 # manifest), no risk of collision.
2142 return self.radix + b'.i.s'
2143
2128 2144 def _enforceinlinesize(self, tr, side_write=True):
2129 2145 """Check if the revlog is too big for inline and convert if so.
2130 2146
@@ -2161,7 +2177,7 b' class revlog:'
2161 2177 # this code
2162 2178 if side_write:
2163 2179 old_index_file_path = self._indexfile
2164 new_index_file_path = self._indexfile + b'.s'
2180 new_index_file_path = self._split_index_file
2165 2181 opener = self.opener
2166 2182 weak_self = weakref.ref(self)
2167 2183
@@ -1087,10 +1087,17 b' class deltacomputer:'
1087 1087 ):
1088 1088 self.revlog = revlog
1089 1089 self._write_debug = write_debug
1090 self._debug_search = debug_search
1090 if write_debug is None:
1091 self._debug_search = False
1092 else:
1093 self._debug_search = debug_search
1091 1094 self._debug_info = debug_info
1092 1095 self._snapshot_cache = SnapshotCache()
1093 1096
1097 @property
1098 def _gather_debug(self):
1099 return self._write_debug is not None or self._debug_info is not None
1100
1094 1101 def buildtext(self, revinfo, fh):
1095 1102 """Builds a fulltext version of a revision
1096 1103
@@ -1136,7 +1143,6 b' class deltacomputer:'
1136 1143 def _builddeltainfo(self, revinfo, base, fh, target_rev=None):
1137 1144 # can we use the cached delta?
1138 1145 revlog = self.revlog
1139 debug_search = self._write_debug is not None and self._debug_search
1140 1146 chainbase = revlog.chainbase(base)
1141 1147 if revlog._generaldelta:
1142 1148 deltabase = base
@@ -1173,7 +1179,7 b' class deltacomputer:'
1173 1179 delta = revinfo.cachedelta[1]
1174 1180 if delta is None:
1175 1181 delta = self._builddeltadiff(base, revinfo, fh)
1176 if debug_search:
1182 if self._debug_search:
1177 1183 msg = b"DBG-DELTAS-SEARCH: uncompressed-delta-size=%d\n"
1178 1184 msg %= len(delta)
1179 1185 self._write_debug(msg)
@@ -1181,17 +1187,17 b' class deltacomputer:'
1181 1187 if revlog.upperboundcomp is not None and snapshotdepth:
1182 1188 lowestrealisticdeltalen = len(delta) // revlog.upperboundcomp
1183 1189 snapshotlimit = revinfo.textlen >> snapshotdepth
1184 if debug_search:
1190 if self._debug_search:
1185 1191 msg = b"DBG-DELTAS-SEARCH: projected-lower-size=%d\n"
1186 1192 msg %= lowestrealisticdeltalen
1187 1193 self._write_debug(msg)
1188 1194 if snapshotlimit < lowestrealisticdeltalen:
1189 if debug_search:
1195 if self._debug_search:
1190 1196 msg = b"DBG-DELTAS-SEARCH: DISCARDED (snapshot limit)\n"
1191 1197 self._write_debug(msg)
1192 1198 return None
1193 1199 if revlog.length(base) < lowestrealisticdeltalen:
1194 if debug_search:
1200 if self._debug_search:
1195 1201 msg = b"DBG-DELTAS-SEARCH: DISCARDED (prev size)\n"
1196 1202 self._write_debug(msg)
1197 1203 return None
@@ -1253,41 +1259,34 b' class deltacomputer:'
1253 1259 if target_rev is None:
1254 1260 target_rev = len(self.revlog)
1255 1261
1256 if not revinfo.textlen:
1257 return self._fullsnapshotinfo(fh, revinfo, target_rev)
1262 gather_debug = self._gather_debug
1263 cachedelta = revinfo.cachedelta
1264 revlog = self.revlog
1265 p1r = p2r = None
1258 1266
1259 1267 if excluded_bases is None:
1260 1268 excluded_bases = set()
1261 1269
1262 # no delta for flag processor revision (see "candelta" for why)
1263 # not calling candelta since only one revision needs test, also to
1264 # avoid overhead fetching flags again.
1265 if revinfo.flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
1266 return self._fullsnapshotinfo(fh, revinfo, target_rev)
1267
1268 gather_debug = (
1269 self._write_debug is not None or self._debug_info is not None
1270 )
1271 debug_search = self._write_debug is not None and self._debug_search
1272
1273 1270 if gather_debug:
1274 1271 start = util.timer()
1275
1276 # count the number of different delta we tried (for debug purpose)
1277 dbg_try_count = 0
1278 # count the number of "search round" we did. (for debug purpose)
1279 dbg_try_rounds = 0
1280 dbg_type = b'unknown'
1281
1282 cachedelta = revinfo.cachedelta
1283 p1 = revinfo.p1
1284 p2 = revinfo.p2
1285 revlog = self.revlog
1286
1287 deltainfo = None
1288 p1r, p2r = revlog.rev(p1), revlog.rev(p2)
1289
1290 if gather_debug:
1272 dbg = self._one_dbg_data()
1273 dbg['revision'] = target_rev
1274 target_revlog = b"UNKNOWN"
1275 target_type = self.revlog.target[0]
1276 target_key = self.revlog.target[1]
1277 if target_type == KIND_CHANGELOG:
1278 target_revlog = b'CHANGELOG:'
1279 elif target_type == KIND_MANIFESTLOG:
1280 target_revlog = b'MANIFESTLOG:'
1281 if target_key:
1282 target_revlog += b'%s:' % target_key
1283 elif target_type == KIND_FILELOG:
1284 target_revlog = b'FILELOG:'
1285 if target_key:
1286 target_revlog += b'%s:' % target_key
1287 dbg['target-revlog'] = target_revlog
1288 p1r = revlog.rev(revinfo.p1)
1289 p2r = revlog.rev(revinfo.p2)
1291 1290 if p1r != nullrev:
1292 1291 p1_chain_len = revlog._chaininfo(p1r)[0]
1293 1292 else:
@@ -1296,7 +1295,109 b' class deltacomputer:'
1296 1295 p2_chain_len = revlog._chaininfo(p2r)[0]
1297 1296 else:
1298 1297 p2_chain_len = -1
1299 if debug_search:
1298 dbg['p1-chain-len'] = p1_chain_len
1299 dbg['p2-chain-len'] = p2_chain_len
1300
1301 # 1) if the revision is empty, no amount of delta can beat it
1302 #
1303 # 2) no delta for flag processor revision (see "candelta" for why)
1304 # not calling candelta since only one revision needs test, also to
1305 # avoid overhead fetching flags again.
1306 if not revinfo.textlen or revinfo.flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
1307 deltainfo = self._fullsnapshotinfo(fh, revinfo, target_rev)
1308 if gather_debug:
1309 end = util.timer()
1310 dbg['duration'] = end - start
1311 dbg[
1312 'delta-base'
1313 ] = deltainfo.base # pytype: disable=attribute-error
1314 dbg['search_round_count'] = 0
1315 dbg['using-cached-base'] = False
1316 dbg['delta_try_count'] = 0
1317 dbg['type'] = b"full"
1318 dbg['snapshot-depth'] = 0
1319 self._dbg_process_data(dbg)
1320 return deltainfo
1321
1322 deltainfo = None
1323
1324 # If this source delta are to be forcibly reuse, let us comply early.
1325 if (
1326 revlog._generaldelta
1327 and revinfo.cachedelta is not None
1328 and revinfo.cachedelta[2] == DELTA_BASE_REUSE_FORCE
1329 ):
1330 base = revinfo.cachedelta[0]
1331 if base == nullrev:
1332 dbg_type = b"full"
1333 deltainfo = self._fullsnapshotinfo(fh, revinfo, target_rev)
1334 if gather_debug:
1335 snapshotdepth = 0
1336 elif base not in excluded_bases:
1337 delta = revinfo.cachedelta[1]
1338 header, data = revlog.compress(delta)
1339 deltalen = len(header) + len(data)
1340 if gather_debug:
1341 offset = revlog.end(len(revlog) - 1)
1342 chainbase = revlog.chainbase(base)
1343 distance = deltalen + offset - revlog.start(chainbase)
1344 chainlen, compresseddeltalen = revlog._chaininfo(base)
1345 chainlen += 1
1346 compresseddeltalen += deltalen
1347 if base == p1r or base == p2r:
1348 dbg_type = b"delta"
1349 snapshotdepth = None
1350 elif not revlog.issnapshot(base):
1351 snapshotdepth = None
1352 else:
1353 dbg_type = b"snapshot"
1354 snapshotdepth = revlog.snapshotdepth(base) + 1
1355 else:
1356 distance = None
1357 chainbase = None
1358 chainlen = None
1359 compresseddeltalen = None
1360 snapshotdepth = None
1361 deltainfo = _deltainfo(
1362 distance=distance,
1363 deltalen=deltalen,
1364 data=(header, data),
1365 base=base,
1366 chainbase=chainbase,
1367 chainlen=chainlen,
1368 compresseddeltalen=compresseddeltalen,
1369 snapshotdepth=snapshotdepth,
1370 )
1371
1372 if deltainfo is not None:
1373 if gather_debug:
1374 end = util.timer()
1375 dbg['duration'] = end - start
1376 dbg[
1377 'delta-base'
1378 ] = deltainfo.base # pytype: disable=attribute-error
1379 dbg['search_round_count'] = 0
1380 dbg['using-cached-base'] = True
1381 dbg['delta_try_count'] = 0
1382 dbg['type'] = b"full"
1383 if snapshotdepth is None:
1384 dbg['snapshot-depth'] = 0
1385 else:
1386 dbg['snapshot-depth'] = snapshotdepth
1387 self._dbg_process_data(dbg)
1388 return deltainfo
1389
1390 # count the number of different delta we tried (for debug purpose)
1391 dbg_try_count = 0
1392 # count the number of "search round" we did. (for debug purpose)
1393 dbg_try_rounds = 0
1394 dbg_type = b'unknown'
1395
1396 if p1r is None:
1397 p1r = revlog.rev(revinfo.p1)
1398 p2r = revlog.rev(revinfo.p2)
1399
1400 if self._debug_search:
1300 1401 msg = b"DBG-DELTAS-SEARCH: SEARCH rev=%d\n"
1301 1402 msg %= target_rev
1302 1403 self._write_debug(msg)
@@ -1314,7 +1415,7 b' class deltacomputer:'
1314 1415 candidaterevs = next(groups)
1315 1416 while candidaterevs is not None:
1316 1417 dbg_try_rounds += 1
1317 if debug_search:
1418 if self._debug_search:
1318 1419 prev = None
1319 1420 if deltainfo is not None:
1320 1421 prev = deltainfo.base
@@ -1325,7 +1426,7 b' class deltacomputer:'
1325 1426 and cachedelta[0] in candidaterevs
1326 1427 ):
1327 1428 round_type = b"cached-delta"
1328 elif p1 in candidaterevs or p2 in candidaterevs:
1429 elif p1r in candidaterevs or p2r in candidaterevs:
1329 1430 round_type = b"parents"
1330 1431 elif prev is not None and all(c < prev for c in candidaterevs):
1331 1432 round_type = b"refine-down"
@@ -1338,7 +1439,7 b' class deltacomputer:'
1338 1439 self._write_debug(msg)
1339 1440 nominateddeltas = []
1340 1441 if deltainfo is not None:
1341 if debug_search:
1442 if self._debug_search:
1342 1443 msg = (
1343 1444 b"DBG-DELTAS-SEARCH: CONTENDER: rev=%d - length=%d\n"
1344 1445 )
@@ -1348,14 +1449,14 b' class deltacomputer:'
1348 1449 # challenge it against refined candidates
1349 1450 nominateddeltas.append(deltainfo)
1350 1451 for candidaterev in candidaterevs:
1351 if debug_search:
1452 if self._debug_search:
1352 1453 msg = b"DBG-DELTAS-SEARCH: CANDIDATE: rev=%d\n"
1353 1454 msg %= candidaterev
1354 1455 self._write_debug(msg)
1355 1456 candidate_type = None
1356 if candidaterev == p1:
1457 if candidaterev == p1r:
1357 1458 candidate_type = b"p1"
1358 elif candidaterev == p2:
1459 elif candidaterev == p2r:
1359 1460 candidate_type = b"p2"
1360 1461 elif self.revlog.issnapshot(candidaterev):
1361 1462 candidate_type = b"snapshot-%d"
@@ -1376,7 +1477,7 b' class deltacomputer:'
1376 1477
1377 1478 dbg_try_count += 1
1378 1479
1379 if debug_search:
1480 if self._debug_search:
1380 1481 delta_start = util.timer()
1381 1482 candidatedelta = self._builddeltainfo(
1382 1483 revinfo,
@@ -1384,23 +1485,23 b' class deltacomputer:'
1384 1485 fh,
1385 1486 target_rev=target_rev,
1386 1487 )
1387 if debug_search:
1488 if self._debug_search:
1388 1489 delta_end = util.timer()
1389 1490 msg = b"DBG-DELTAS-SEARCH: delta-search-time=%f\n"
1390 1491 msg %= delta_end - delta_start
1391 1492 self._write_debug(msg)
1392 1493 if candidatedelta is not None:
1393 1494 if is_good_delta_info(self.revlog, candidatedelta, revinfo):
1394 if debug_search:
1495 if self._debug_search:
1395 1496 msg = b"DBG-DELTAS-SEARCH: DELTA: length=%d (GOOD)\n"
1396 1497 msg %= candidatedelta.deltalen
1397 1498 self._write_debug(msg)
1398 1499 nominateddeltas.append(candidatedelta)
1399 elif debug_search:
1500 elif self._debug_search:
1400 1501 msg = b"DBG-DELTAS-SEARCH: DELTA: length=%d (BAD)\n"
1401 1502 msg %= candidatedelta.deltalen
1402 1503 self._write_debug(msg)
1403 elif debug_search:
1504 elif self._debug_search:
1404 1505 msg = b"DBG-DELTAS-SEARCH: NO-DELTA\n"
1405 1506 self._write_debug(msg)
1406 1507 if nominateddeltas:
@@ -1434,17 +1535,14 b' class deltacomputer:'
1434 1535 and dbg_try_count == 1
1435 1536 and deltainfo.base == cachedelta[0]
1436 1537 )
1437 dbg = {
1438 'duration': end - start,
1439 'revision': target_rev,
1440 'delta-base': deltainfo.base, # pytype: disable=attribute-error
1441 'search_round_count': dbg_try_rounds,
1442 'using-cached-base': used_cached,
1443 'delta_try_count': dbg_try_count,
1444 'type': dbg_type,
1445 'p1-chain-len': p1_chain_len,
1446 'p2-chain-len': p2_chain_len,
1447 }
1538 dbg['duration'] = end - start
1539 dbg[
1540 'delta-base'
1541 ] = deltainfo.base # pytype: disable=attribute-error
1542 dbg['search_round_count'] = dbg_try_rounds
1543 dbg['using-cached-base'] = used_cached
1544 dbg['delta_try_count'] = dbg_try_count
1545 dbg['type'] = dbg_type
1448 1546 if (
1449 1547 deltainfo.snapshotdepth # pytype: disable=attribute-error
1450 1548 is not None
@@ -1454,55 +1552,58 b' class deltacomputer:'
1454 1552 ] = deltainfo.snapshotdepth # pytype: disable=attribute-error
1455 1553 else:
1456 1554 dbg['snapshot-depth'] = 0
1457 target_revlog = b"UNKNOWN"
1458 target_type = self.revlog.target[0]
1459 target_key = self.revlog.target[1]
1460 if target_type == KIND_CHANGELOG:
1461 target_revlog = b'CHANGELOG:'
1462 elif target_type == KIND_MANIFESTLOG:
1463 target_revlog = b'MANIFESTLOG:'
1464 if target_key:
1465 target_revlog += b'%s:' % target_key
1466 elif target_type == KIND_FILELOG:
1467 target_revlog = b'FILELOG:'
1468 if target_key:
1469 target_revlog += b'%s:' % target_key
1470 dbg['target-revlog'] = target_revlog
1555 self._dbg_process_data(dbg)
1556 return deltainfo
1471 1557
1472 if self._debug_info is not None:
1473 self._debug_info.append(dbg)
1558 def _one_dbg_data(self):
1559 return {
1560 'duration': None,
1561 'revision': None,
1562 'delta-base': None,
1563 'search_round_count': None,
1564 'using-cached-base': None,
1565 'delta_try_count': None,
1566 'type': None,
1567 'p1-chain-len': None,
1568 'p2-chain-len': None,
1569 'snapshot-depth': None,
1570 'target-revlog': None,
1571 }
1572
1573 def _dbg_process_data(self, dbg):
1574 if self._debug_info is not None:
1575 self._debug_info.append(dbg)
1474 1576
1475 if self._write_debug is not None:
1476 msg = (
1477 b"DBG-DELTAS:"
1478 b" %-12s"
1479 b" rev=%d:"
1480 b" delta-base=%d"
1481 b" is-cached=%d"
1482 b" - search-rounds=%d"
1483 b" try-count=%d"
1484 b" - delta-type=%-6s"
1485 b" snap-depth=%d"
1486 b" - p1-chain-length=%d"
1487 b" p2-chain-length=%d"
1488 b" - duration=%f"
1489 b"\n"
1490 )
1491 msg %= (
1492 dbg["target-revlog"],
1493 dbg["revision"],
1494 dbg["delta-base"],
1495 dbg["using-cached-base"],
1496 dbg["search_round_count"],
1497 dbg["delta_try_count"],
1498 dbg["type"],
1499 dbg["snapshot-depth"],
1500 dbg["p1-chain-len"],
1501 dbg["p2-chain-len"],
1502 dbg["duration"],
1503 )
1504 self._write_debug(msg)
1505 return deltainfo
1577 if self._write_debug is not None:
1578 msg = (
1579 b"DBG-DELTAS:"
1580 b" %-12s"
1581 b" rev=%d:"
1582 b" delta-base=%d"
1583 b" is-cached=%d"
1584 b" - search-rounds=%d"
1585 b" try-count=%d"
1586 b" - delta-type=%-6s"
1587 b" snap-depth=%d"
1588 b" - p1-chain-length=%d"
1589 b" p2-chain-length=%d"
1590 b" - duration=%f"
1591 b"\n"
1592 )
1593 msg %= (
1594 dbg["target-revlog"],
1595 dbg["revision"],
1596 dbg["delta-base"],
1597 dbg["using-cached-base"],
1598 dbg["search_round_count"],
1599 dbg["delta_try_count"],
1600 dbg["type"],
1601 dbg["snapshot-depth"],
1602 dbg["p1-chain-len"],
1603 dbg["p2-chain-len"],
1604 dbg["duration"],
1605 )
1606 self._write_debug(msg)
1506 1607
1507 1608
1508 1609 def delta_compression(default_compression_header, deltainfo):
@@ -50,8 +50,8 b' templatefunc = registrar.templatefunc(fu'
50 50
51 51 @templatefunc(b'date(date[, fmt])')
52 52 def date(context, mapping, args):
53 """Format a date. See :hg:`help dates` for formatting
54 strings. The default is a Unix date format, including the timezone:
53 """Format a date. The format string uses the Python strftime format.
54 The default is a Unix date format, including the timezone:
55 55 "Mon Sep 04 15:13:13 2006 0700"."""
56 56 if not (1 <= len(args) <= 2):
57 57 # i18n: "date" is a keyword
@@ -290,6 +290,8 b' class transaction(util.transactional):'
290 290 self._backupjournal = b"%s.backupfiles" % self._journal
291 291 self._backupsfile = opener.open(self._backupjournal, b'w')
292 292 self._backupsfile.write(b'%d\n' % version)
293 # the set of temporary files
294 self._tmp_files = set()
293 295
294 296 if createmode is not None:
295 297 opener.chmod(self._journal, createmode & 0o666)
@@ -354,6 +356,7 b' class transaction(util.transactional):'
354 356 file in self._newfiles
355 357 or file in self._offsetmap
356 358 or file in self._backupmap
359 or file in self._tmp_files
357 360 ):
358 361 return
359 362 if self._queue:
@@ -368,6 +371,7 b' class transaction(util.transactional):'
368 371 file in self._newfiles
369 372 or file in self._offsetmap
370 373 or file in self._backupmap
374 or file in self._tmp_files
371 375 ):
372 376 return
373 377 if offset:
@@ -439,6 +443,7 b' class transaction(util.transactional):'
439 443 Such files will be deleted when the transaction exits (on both
440 444 failure and success).
441 445 """
446 self._tmp_files.add(tmpfile)
442 447 self._addbackupentry((location, b'', tmpfile, False))
443 448
444 449 @active
@@ -437,7 +437,7 b' def find_pullbundle(repo, proto, opts, c'
437 437 if not manifest:
438 438 return None
439 439 res = bundlecaches.parseclonebundlesmanifest(repo, manifest)
440 res = bundlecaches.filterclonebundleentries(repo, res)
440 res = bundlecaches.filterclonebundleentries(repo, res, pullbundles=True)
441 441 if not res:
442 442 return None
443 443 cl = repo.unfiltered().changelog
@@ -1,3 +1,14 b''
1 = Mercurial 6.4.4 =
2
3 * clonebundles: filter out invalid schemes instead of failing on them
4 * doc: format argument for date uses strftime format string (issue6818)
5 * test: make test-contrib-perf.t more robust
6 * revlog: fix a bug in revlog splitting
7 * bundles: clarify streaming v2 bundle usage
8 * delta-find: fix pulled-delta-reuse-policy=forced behavior
9 * dirstate: fall back to v1 if reading v2 failed
10 * revlog: avoid possible collision between directory and temporary index
11
1 12 = Mercurial 6.4.3 =
2 13
3 14 * chg: declare environ (issue6812)
@@ -232,7 +232,17 b' impl Repo {'
232 232 try_with_lock_no_wait(self.hg_vfs(), "wlock", f)
233 233 }
234 234
235 pub fn has_dirstate_v2(&self) -> bool {
235 /// Whether this repo should use dirstate-v2.
236 /// The presence of `dirstate-v2` in the requirements does not mean that
237 /// the on-disk dirstate is necessarily in version 2. In most cases,
238 /// a dirstate-v2 file will indeed be found, but in rare cases (like the
239 /// upgrade mechanism being cut short), the on-disk version will be a
240 /// v1 file.
241 /// Semantically, having a requirement only means that a client cannot
242 /// properly understand or properly update the repo if it lacks the support
243 /// for the required feature, but not that that feature is actually used
244 /// in all occasions.
245 pub fn use_dirstate_v2(&self) -> bool {
236 246 self.requirements
237 247 .contains(requirements::DIRSTATE_V2_REQUIREMENT)
238 248 }
@@ -277,10 +287,21 b' impl Repo {'
277 287 let dirstate = self.dirstate_file_contents()?;
278 288 let parents = if dirstate.is_empty() {
279 289 DirstateParents::NULL
280 } else if self.has_dirstate_v2() {
281 let docket =
282 crate::dirstate_tree::on_disk::read_docket(&dirstate)?;
283 docket.parents()
290 } else if self.use_dirstate_v2() {
291 let docket_res =
292 crate::dirstate_tree::on_disk::read_docket(&dirstate);
293 match docket_res {
294 Ok(docket) => docket.parents(),
295 Err(_) => {
296 log::info!(
297 "Parsing dirstate docket failed, \
298 falling back to dirstate-v1"
299 );
300 *crate::dirstate::parsers::parse_dirstate_parents(
301 &dirstate,
302 )?
303 }
304 }
284 305 } else {
285 306 *crate::dirstate::parsers::parse_dirstate_parents(&dirstate)?
286 307 };
@@ -296,7 +317,7 b' impl Repo {'
296 317 &self,
297 318 ) -> Result<DirstateMapIdentity, HgError> {
298 319 assert!(
299 self.has_dirstate_v2(),
320 self.use_dirstate_v2(),
300 321 "accessing dirstate data file ID without dirstate-v2"
301 322 );
302 323 // Get the identity before the contents since we could have a race
@@ -308,15 +329,35 b' impl Repo {'
308 329 self.dirstate_parents.set(DirstateParents::NULL);
309 330 Ok((identity, None, 0))
310 331 } else {
311 let docket =
312 crate::dirstate_tree::on_disk::read_docket(&dirstate)?;
313 self.dirstate_parents.set(docket.parents());
314 Ok((identity, Some(docket.uuid.to_owned()), docket.data_size()))
332 let docket_res =
333 crate::dirstate_tree::on_disk::read_docket(&dirstate);
334 match docket_res {
335 Ok(docket) => {
336 self.dirstate_parents.set(docket.parents());
337 Ok((
338 identity,
339 Some(docket.uuid.to_owned()),
340 docket.data_size(),
341 ))
342 }
343 Err(_) => {
344 log::info!(
345 "Parsing dirstate docket failed, \
346 falling back to dirstate-v1"
347 );
348 let parents =
349 *crate::dirstate::parsers::parse_dirstate_parents(
350 &dirstate,
351 )?;
352 self.dirstate_parents.set(parents);
353 Ok((identity, None, 0))
354 }
355 }
315 356 }
316 357 }
317 358
318 359 fn new_dirstate_map(&self) -> Result<OwningDirstateMap, DirstateError> {
319 if self.has_dirstate_v2() {
360 if self.use_dirstate_v2() {
320 361 // The v2 dirstate is split into a docket and a data file.
321 362 // Since we don't always take the `wlock` to read it
322 363 // (like in `hg status`), it is susceptible to races.
@@ -343,7 +384,13 b' impl Repo {'
343 384 );
344 385 continue;
345 386 }
346 _ => return Err(e),
387 _ => {
388 log::info!(
389 "Reading dirstate v2 failed, \
390 falling back to v1"
391 );
392 return self.new_dirstate_map_v1();
393 }
347 394 },
348 395 }
349 396 }
@@ -354,23 +401,22 b' impl Repo {'
354 401 );
355 402 Err(DirstateError::Common(error))
356 403 } else {
357 debug_wait_for_file_or_print(
358 self.config(),
359 "dirstate.pre-read-file",
360 );
361 let identity = self.dirstate_identity()?;
362 let dirstate_file_contents = self.dirstate_file_contents()?;
363 if dirstate_file_contents.is_empty() {
364 self.dirstate_parents.set(DirstateParents::NULL);
365 Ok(OwningDirstateMap::new_empty(Vec::new()))
366 } else {
367 let (map, parents) = OwningDirstateMap::new_v1(
368 dirstate_file_contents,
369 identity,
370 )?;
371 self.dirstate_parents.set(parents);
372 Ok(map)
373 }
404 self.new_dirstate_map_v1()
405 }
406 }
407
408 fn new_dirstate_map_v1(&self) -> Result<OwningDirstateMap, DirstateError> {
409 debug_wait_for_file_or_print(self.config(), "dirstate.pre-read-file");
410 let identity = self.dirstate_identity()?;
411 let dirstate_file_contents = self.dirstate_file_contents()?;
412 if dirstate_file_contents.is_empty() {
413 self.dirstate_parents.set(DirstateParents::NULL);
414 Ok(OwningDirstateMap::new_empty(Vec::new()))
415 } else {
416 let (map, parents) =
417 OwningDirstateMap::new_v1(dirstate_file_contents, identity)?;
418 self.dirstate_parents.set(parents);
419 Ok(map)
374 420 }
375 421 }
376 422
@@ -550,7 +596,7 b' impl Repo {'
550 596 // TODO: Maintain a `DirstateMap::dirty` flag, and return early here if
551 597 // it’s unset
552 598 let parents = self.dirstate_parents()?;
553 let (packed_dirstate, old_uuid_to_remove) = if self.has_dirstate_v2() {
599 let (packed_dirstate, old_uuid_to_remove) = if self.use_dirstate_v2() {
554 600 let (identity, uuid, data_size) =
555 601 self.get_dirstate_data_file_integrity()?;
556 602 let identity_changed = identity != map.old_identity();
@@ -59,6 +59,20 b' Manifest file with invalid URL aborts'
59 59 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
60 60 [255]
61 61
62 Manifest file with URL with unknown scheme skips the URL
63 $ echo 'weirdscheme://does.not.exist/bundle.hg' > server/.hg/clonebundles.manifest
64 $ hg clone http://localhost:$HGPORT unknown-scheme
65 no compatible clone bundles available on server; falling back to regular clone
66 (you may want to report this to the server operator)
67 requesting all changes
68 adding changesets
69 adding manifests
70 adding file changes
71 added 2 changesets with 2 changes to 2 files
72 new changesets 53245c60e682:aaff8d2ffbbf
73 updating to branch default
74 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
75
62 76 Server is not running aborts
63 77
64 78 $ echo "http://localhost:$HGPORT1/bundle.hg" > server/.hg/clonebundles.manifest
@@ -304,20 +304,20 b' Simple single entry'
304 304
305 305 Multiple entries
306 306
307 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
308 ! wall * comb * user * sys * (best of 5) (glob)
307 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-50'
308 ! wall * comb * user * sys * (best of 50) (glob)
309 309
310 310 error case are ignored
311 311
312 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
312 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-50'
313 313 malformatted run limit entry, missing "-": 500
314 ! wall * comb * user * sys * (best of 5) (glob)
315 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
316 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12
317 ! wall * comb * user * sys * (best of 5) (glob)
318 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
319 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
320 ! wall * comb * user * sys * (best of 5) (glob)
314 ! wall * comb * user * sys * (best of 50) (glob)
315 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-120, 0.000000001-50'
316 malformatted run limit entry, could not convert string to float: 'aaa': aaa-120
317 ! wall * comb * user * sys * (best of 50) (glob)
318 $ hg perfparents --config perf.stub=no --config perf.run-limits='120-aaaaaa, 0.000000001-50'
319 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 120-aaaaaa
320 ! wall * comb * user * sys * (best of 50) (glob)
321 321
322 322 test actual output
323 323 ------------------
@@ -329,5 +329,34 b' more subtle to test this behavior.'
329 329 DBG-DELTAS: CHANGELOG: * (glob)
330 330 DBG-DELTAS: MANIFESTLOG: * (glob)
331 331 DBG-DELTAS: MANIFESTLOG: * (glob)
332 DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=2 * (glob)
333 DBG-DELTAS: FILELOG:my-file.txt: rev=4: delta-base=3 * (glob)
332 DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=2 is-cached=1 *search-rounds=0 try-count=0* (glob)
333 DBG-DELTAS: FILELOG:my-file.txt: rev=4: delta-base=3 is-cached=1 *search-rounds=0 try-count=0* (glob)
334
335 Check that running "forced" on a non-general delta repository does not corrupt it
336 ---------------------------------------------------------------------------------
337
338 Even if requested to be used, some of the delta in the revlog cannot be stored on a non-general delta repository. We check that the bundle application was correct.
339
340 $ hg init \
341 > --config format.usegeneraldelta=no \
342 > --config format.sparse-revlog=no \
343 > local-forced-full-p1-no-gd
344 $ hg debugformat -R local-forced-full-p1-no-gd | grep generaldelta
345 generaldelta: no
346 $ hg -R local-forced-full-p1-no-gd pull --quiet local-pre-pull-full \
347 > --config debug.revlog.debug-delta=no
348 $ hg -R local-forced-full-p1-no-gd pull --quiet \
349 > --config 'paths.*:pulled-delta-reuse-policy=forced' all-p1.hg
350 DBG-DELTAS: CHANGELOG: * (glob)
351 DBG-DELTAS: CHANGELOG: * (glob)
352 DBG-DELTAS: MANIFESTLOG: * (glob)
353 DBG-DELTAS: MANIFESTLOG: * (glob)
354 DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=0 * - search-rounds=1 try-count=1 * (glob)
355 DBG-DELTAS: FILELOG:my-file.txt: rev=4: delta-base=4 * - search-rounds=1 try-count=1 * (glob)
356 $ hg -R local-forced-full-p1-no-gd verify
357 checking changesets
358 checking manifests
359 crosschecking files in changesets and manifests
360 checking files
361 checking dirstate
362 checked 5 changesets with 5 changes to 1 files
@@ -84,6 +84,8 b' setup a repository for tests'
84 84 > Directory_With,Special%Char/Complex_File.babar
85 85 > foo/bar/babar_celeste/foo
86 86 > 1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/f
87 > some_dir/sub_dir/foo_bar
88 > some_dir/sub_dir/foo_bar.i.s/tutu
87 89 > "
88 90 $ for f in $files; do
89 91 > mkdir -p `dirname $f`
@@ -104,13 +106,17 b' setup a repository for tests'
104 106 > dd if=/dev/zero of=$f bs=1k count=128 > /dev/null 2>&1
105 107 > done
106 108 $ hg commit -AqmD --traceback
109 $ for f in $files; do
110 > dd if=/dev/zero of=$f bs=1k count=132 > /dev/null 2>&1
111 > done
112 $ hg commit -AqmD --traceback
107 113
108 114 Reference size:
109 115 $ f -s file
110 file: size=131072
111 $ f -s .hg/store/data/file*
112 .hg/store/data/file.d: size=132139
113 .hg/store/data/file.i: size=256
116 file: size=135168
117 $ f -s .hg/store/data*/file*
118 .hg/store/data/file.d: size=267307
119 .hg/store/data/file.i: size=320
114 120
115 121 $ cd ..
116 122
@@ -134,16 +140,16 b' Reference size:'
134 140 adding changesets
135 141 adding manifests
136 142 adding file changes
137 added 2 changesets with 8 changes to 4 files
138 new changesets 16a630ece54e:8437c461d70a
143 added 3 changesets with 18 changes to 6 files
144 new changesets c99a94cae9b1:64874a3b0160
139 145 (run 'hg update' to get a working copy)
140 146
141 147
142 148 The inline revlog has been replaced
143 149
144 150 $ f -s .hg/store/data/file*
145 .hg/store/data/file.d: size=132139
146 .hg/store/data/file.i: size=256
151 .hg/store/data/file.d: size=267307
152 .hg/store/data/file.i: size=320
147 153
148 154
149 155 $ hg verify -q
@@ -171,7 +177,7 b' but truncate the index and the data to r'
171 177 Reference size:
172 178 $ f -s file
173 179 file: size=1024
174 $ f -s .hg/store/data/file*
180 $ f -s .hg/store/data*/file*
175 181 .hg/store/data/file.i: size=1174
176 182
177 183 $ cat > .hg/hgrc <<EOF
@@ -192,10 +198,13 b' Reference size:'
192 198
193 199 The inline revlog still exist, but a split version exist next to it
194 200
195 $ f -s .hg/store/data/file*
196 .hg/store/data/file.d: size=132139
201 $ cat .hg/store/journal | tr '\0' ' ' | grep '\.s'
202 data/some_dir/sub_dir/foo_bar.i.s/tutu.i 1174
203 data/some_dir/sub_dir/foo_bar.i.s/tutu.d 0
204 $ f -s .hg/store/data*/file*
205 .hg/store/data-s/file: size=320
206 .hg/store/data/file.d: size=267307
197 207 .hg/store/data/file.i: size=132395
198 .hg/store/data/file.i.s: size=256
199 208
200 209
201 210 The first file.i entry should match the "Reference size" above.
@@ -206,19 +215,19 b' A "temporary file" entry exist for the s'
206 215 $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
207 216 data/file.i 1174
208 217 data/file.d 0
209 $ cat .hg/store/journal.backupfiles | tr -s '\000' ' ' | tr -s '\00' ' '| grep data/file
218 $ cat .hg/store/journal.backupfiles | tr -s '\000' ' ' | tr -s '\00' ' '| grep 'data.*/file'
210 219 data/file.i data/journal.backup.file.i.bck 0
211 data/file.i.s 0
220 data-s/file 0
212 221
213 222 recover is rolling the split back, the fncache is still valid
214 223
215 224 $ hg recover
216 225 rolling back interrupted transaction
217 226 (verify step skipped, run `hg verify` to check your repository content)
218 $ f -s .hg/store/data/file*
227 $ f -s .hg/store/data*/file*
219 228 .hg/store/data/file.i: size=1174
220 229 $ hg tip
221 changeset: 1:cc8dfb126534
230 changeset: 1:64b04c8dc267
222 231 tag: tip
223 232 user: test
224 233 date: Thu Jan 01 00:00:00 1970 +0000
@@ -243,7 +252,7 b' where the data file is left as garbage.'
243 252 Reference size:
244 253 $ f -s file
245 254 file: size=1024
246 $ f -s .hg/store/data/file*
255 $ f -s .hg/store/data*/file*
247 256 .hg/store/data/file.i: size=1174
248 257
249 258 $ cat > .hg/hgrc <<EOF
@@ -271,12 +280,12 b' Reference size:'
271 280
272 281 The inline revlog still exist, but a split version exist next to it
273 282
274 $ f -s .hg/store/data/file*
275 .hg/store/data/file.d: size=132139
283 $ f -s .hg/store/data*/file*
284 .hg/store/data-s/file: size=320
285 .hg/store/data/file.d: size=267307
276 286 .hg/store/data/file.i: size=132395
277 .hg/store/data/file.i.s: size=256
278 287
279 $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
288 $ cat .hg/store/journal | tr -s '\000' ' ' | grep 'data.*/file'
280 289 data/file.i 1174
281 290 data/file.d 0
282 291
@@ -285,10 +294,10 b' recover is rolling the split back, the f'
285 294 $ hg recover
286 295 rolling back interrupted transaction
287 296 (verify step skipped, run `hg verify` to check your repository content)
288 $ f -s .hg/store/data/file*
297 $ f -s .hg/store/data*/file*
289 298 .hg/store/data/file.i: size=1174
290 299 $ hg tip
291 changeset: 1:cc8dfb126534
300 changeset: 1:64b04c8dc267
292 301 tag: tip
293 302 user: test
294 303 date: Thu Jan 01 00:00:00 1970 +0000
@@ -308,7 +317,7 b' Now retry the procedure but intercept th'
308 317 Reference size:
309 318 $ f -s file
310 319 file: size=1024
311 $ f -s .hg/store/data/file*
320 $ f -s .hg/store/data*/file*
312 321 .hg/store/data/file.i: size=1174
313 322
314 323 $ cat > .hg/hgrc <<EOF
@@ -336,11 +345,11 b' Reference size:'
336 345
337 346 The inline revlog was over written on disk
338 347
339 $ f -s .hg/store/data/file*
340 .hg/store/data/file.d: size=132139
341 .hg/store/data/file.i: size=256
348 $ f -s .hg/store/data*/file*
349 .hg/store/data/file.d: size=267307
350 .hg/store/data/file.i: size=320
342 351
343 $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
352 $ cat .hg/store/journal | tr -s '\000' ' ' | grep 'data.*/file'
344 353 data/file.i 1174
345 354 data/file.d 0
346 355
@@ -349,10 +358,10 b' recover is rolling the split back, the f'
349 358 $ hg recover
350 359 rolling back interrupted transaction
351 360 (verify step skipped, run `hg verify` to check your repository content)
352 $ f -s .hg/store/data/file*
361 $ f -s .hg/store/data*/file*
353 362 .hg/store/data/file.i: size=1174
354 363 $ hg tip
355 changeset: 1:cc8dfb126534
364 changeset: 1:64b04c8dc267
356 365 tag: tip
357 366 user: test
358 367 date: Thu Jan 01 00:00:00 1970 +0000
@@ -386,13 +395,13 b' Repeat the original test but let hg roll'
386 395
387 396 The split was rollback
388 397
389 $ f -s .hg/store/data/file*
398 $ f -s .hg/store/data*/file*
390 399 .hg/store/data/file.d: size=0
391 400 .hg/store/data/file.i: size=1174
392 401
393 402
394 403 $ hg tip
395 changeset: 1:cc8dfb126534
404 changeset: 1:64b04c8dc267
396 405 tag: tip
397 406 user: test
398 407 date: Thu Jan 01 00:00:00 1970 +0000
@@ -472,7 +481,7 b' We checks that hooks properly see the in'
472 481 adding changesets
473 482 adding manifests
474 483 adding file changes
475 size=131072
484 size=135168
476 485 transaction abort!
477 486 rollback completed
478 487 abort: pretxnclose.03-abort hook exited with status 1
@@ -839,6 +839,7 b' store directory has files we expect'
839 839 00changelog.i
840 840 00manifest.i
841 841 data
842 data-s
842 843 fncache
843 844 phaseroots
844 845 requires
@@ -862,6 +863,7 b' old store should be backed up'
862 863 00changelog.i
863 864 00manifest.i
864 865 data
866 data-s
865 867 fncache
866 868 phaseroots
867 869 requires
General Comments 0
You need to be logged in to leave comments. Login now