##// END OF EJS Templates
rust-index: drop offset_override...
marmoute -
r52338:d2858d97 default
parent child Browse files
Show More
@@ -18,11 +18,12 b' use crate::{'
18 };
18 };
19
19
20 pub const INDEX_ENTRY_SIZE: usize = 64;
20 pub const INDEX_ENTRY_SIZE: usize = 64;
21 pub const INDEX_HEADER_SIZE: usize = 4;
21 pub const COMPRESSION_MODE_INLINE: u8 = 2;
22 pub const COMPRESSION_MODE_INLINE: u8 = 2;
22
23
23 #[derive(Debug)]
24 #[derive(Debug)]
24 pub struct IndexHeader {
25 pub struct IndexHeader {
25 pub(super) header_bytes: [u8; 4],
26 pub(super) header_bytes: [u8; INDEX_HEADER_SIZE],
26 }
27 }
27
28
28 #[derive(Copy, Clone)]
29 #[derive(Copy, Clone)]
@@ -92,14 +93,21 b' struct IndexData {'
92 truncation: Option<usize>,
93 truncation: Option<usize>,
93 /// Bytes that were added after reading the index
94 /// Bytes that were added after reading the index
94 added: Vec<u8>,
95 added: Vec<u8>,
96 first_entry: [u8; INDEX_ENTRY_SIZE],
95 }
97 }
96
98
97 impl IndexData {
99 impl IndexData {
98 pub fn new(bytes: Box<dyn Deref<Target = [u8]> + Send + Sync>) -> Self {
100 pub fn new(bytes: Box<dyn Deref<Target = [u8]> + Send + Sync>) -> Self {
101 let mut first_entry = [0; INDEX_ENTRY_SIZE];
102 if bytes.len() >= INDEX_ENTRY_SIZE {
103 first_entry[INDEX_HEADER_SIZE..]
104 .copy_from_slice(&bytes[INDEX_HEADER_SIZE..INDEX_ENTRY_SIZE])
105 }
99 Self {
106 Self {
100 bytes,
107 bytes,
101 truncation: None,
108 truncation: None,
102 added: vec![],
109 added: vec![],
110 first_entry,
103 }
111 }
104 }
112 }
105
113
@@ -356,7 +364,6 b' impl Index {'
356 let end = offset + INDEX_ENTRY_SIZE;
364 let end = offset + INDEX_ENTRY_SIZE;
357 let entry = IndexEntry {
365 let entry = IndexEntry {
358 bytes: &bytes[offset..end],
366 bytes: &bytes[offset..end],
359 offset_override: None,
360 };
367 };
361
368
362 offset += INDEX_ENTRY_SIZE + entry.compressed_len() as usize;
369 offset += INDEX_ENTRY_SIZE + entry.compressed_len() as usize;
@@ -449,11 +456,17 b' impl Index {'
449 if rev == NULL_REVISION {
456 if rev == NULL_REVISION {
450 return None;
457 return None;
451 }
458 }
452 Some(if self.is_inline() {
459 if rev.0 == 0 {
453 self.get_entry_inline(rev)
460 Some(IndexEntry {
461 bytes: &self.bytes.first_entry[..],
462 })
454 } else {
463 } else {
455 self.get_entry_separated(rev)
464 Some(if self.is_inline() {
456 })
465 self.get_entry_inline(rev)
466 } else {
467 self.get_entry_separated(rev)
468 })
469 }
457 }
470 }
458
471
459 /// Return the binary content of the index entry for the given revision
472 /// Return the binary content of the index entry for the given revision
@@ -512,13 +525,7 b' impl Index {'
512 let end = start + INDEX_ENTRY_SIZE;
525 let end = start + INDEX_ENTRY_SIZE;
513 let bytes = &self.bytes[start..end];
526 let bytes = &self.bytes[start..end];
514
527
515 // See IndexEntry for an explanation of this override.
528 IndexEntry { bytes }
516 let offset_override = Some(end);
517
518 IndexEntry {
519 bytes,
520 offset_override,
521 }
522 }
529 }
523
530
524 fn get_entry_separated(&self, rev: Revision) -> IndexEntry {
531 fn get_entry_separated(&self, rev: Revision) -> IndexEntry {
@@ -526,20 +533,12 b' impl Index {'
526 let end = start + INDEX_ENTRY_SIZE;
533 let end = start + INDEX_ENTRY_SIZE;
527 let bytes = &self.bytes[start..end];
534 let bytes = &self.bytes[start..end];
528
535
529 // Override the offset of the first revision as its bytes are used
536 IndexEntry { bytes }
530 // for the index's metadata (saving space because it is always 0)
531 let offset_override = if rev == Revision(0) { Some(0) } else { None };
532
533 IndexEntry {
534 bytes,
535 offset_override,
536 }
537 }
537 }
538
538
539 fn null_entry(&self) -> IndexEntry {
539 fn null_entry(&self) -> IndexEntry {
540 IndexEntry {
540 IndexEntry {
541 bytes: &[0; INDEX_ENTRY_SIZE],
541 bytes: &[0; INDEX_ENTRY_SIZE],
542 offset_override: Some(0),
543 }
542 }
544 }
543 }
545
544
@@ -755,13 +754,20 b' impl Index {'
755 revision_data: RevisionDataParams,
754 revision_data: RevisionDataParams,
756 ) -> Result<(), RevlogError> {
755 ) -> Result<(), RevlogError> {
757 revision_data.validate()?;
756 revision_data.validate()?;
757 let entry_v1 = revision_data.into_v1();
758 let entry_bytes = entry_v1.as_bytes();
759 if self.bytes.len() == 0 {
760 self.bytes.first_entry[INDEX_HEADER_SIZE..].copy_from_slice(
761 &entry_bytes[INDEX_HEADER_SIZE..INDEX_ENTRY_SIZE],
762 )
763 }
758 if self.is_inline() {
764 if self.is_inline() {
759 let new_offset = self.bytes.len();
765 let new_offset = self.bytes.len();
760 if let Some(offsets) = &mut *self.get_offsets_mut() {
766 if let Some(offsets) = &mut *self.get_offsets_mut() {
761 offsets.push(new_offset)
767 offsets.push(new_offset)
762 }
768 }
763 }
769 }
764 self.bytes.added.extend(revision_data.into_v1().as_bytes());
770 self.bytes.added.extend(entry_bytes);
765 self.clear_head_revs();
771 self.clear_head_revs();
766 Ok(())
772 Ok(())
767 }
773 }
@@ -1654,7 +1660,6 b' fn inline_scan(bytes: &[u8]) -> (usize, '
1654 let end = offset + INDEX_ENTRY_SIZE;
1660 let end = offset + INDEX_ENTRY_SIZE;
1655 let entry = IndexEntry {
1661 let entry = IndexEntry {
1656 bytes: &bytes[offset..end],
1662 bytes: &bytes[offset..end],
1657 offset_override: None,
1658 };
1663 };
1659
1664
1660 offset += INDEX_ENTRY_SIZE + entry.compressed_len() as usize;
1665 offset += INDEX_ENTRY_SIZE + entry.compressed_len() as usize;
@@ -1678,29 +1683,14 b' impl super::RevlogIndex for Index {'
1678 #[derive(Debug)]
1683 #[derive(Debug)]
1679 pub struct IndexEntry<'a> {
1684 pub struct IndexEntry<'a> {
1680 bytes: &'a [u8],
1685 bytes: &'a [u8],
1681 /// Allows to override the offset value of the entry.
1682 ///
1683 /// For interleaved index and data, the offset stored in the index
1684 /// corresponds to the separated data offset.
1685 /// It has to be overridden with the actual offset in the interleaved
1686 /// index which is just after the index block.
1687 ///
1688 /// For separated index and data, the offset stored in the first index
1689 /// entry is mixed with the index headers.
1690 /// It has to be overridden with 0.
1691 offset_override: Option<usize>,
1692 }
1686 }
1693
1687
1694 impl<'a> IndexEntry<'a> {
1688 impl<'a> IndexEntry<'a> {
1695 /// Return the offset of the data.
1689 /// Return the offset of the data.
1696 pub fn offset(&self) -> usize {
1690 pub fn offset(&self) -> usize {
1697 if let Some(offset_override) = self.offset_override {
1691 let mut bytes = [0; 8];
1698 offset_override
1692 bytes[2..8].copy_from_slice(&self.bytes[0..=5]);
1699 } else {
1693 BigEndian::read_u64(&bytes[..]) as usize
1700 let mut bytes = [0; 8];
1701 bytes[2..8].copy_from_slice(&self.bytes[0..=5]);
1702 BigEndian::read_u64(&bytes[..]) as usize
1703 }
1704 }
1694 }
1705 pub fn raw_offset(&self) -> u64 {
1695 pub fn raw_offset(&self) -> u64 {
1706 BigEndian::read_u64(&self.bytes[0..8])
1696 BigEndian::read_u64(&self.bytes[0..8])
@@ -1956,32 +1946,15 b' mod tests {'
1956 #[test]
1946 #[test]
1957 fn test_offset() {
1947 fn test_offset() {
1958 let bytes = IndexEntryBuilder::new().with_offset(1).build();
1948 let bytes = IndexEntryBuilder::new().with_offset(1).build();
1959 let entry = IndexEntry {
1949 let entry = IndexEntry { bytes: &bytes };
1960 bytes: &bytes,
1961 offset_override: None,
1962 };
1963
1950
1964 assert_eq!(entry.offset(), 1)
1951 assert_eq!(entry.offset(), 1)
1965 }
1952 }
1966
1953
1967 #[test]
1954 #[test]
1968 fn test_with_overridden_offset() {
1969 let bytes = IndexEntryBuilder::new().with_offset(1).build();
1970 let entry = IndexEntry {
1971 bytes: &bytes,
1972 offset_override: Some(2),
1973 };
1974
1975 assert_eq!(entry.offset(), 2)
1976 }
1977
1978 #[test]
1979 fn test_compressed_len() {
1955 fn test_compressed_len() {
1980 let bytes = IndexEntryBuilder::new().with_compressed_len(1).build();
1956 let bytes = IndexEntryBuilder::new().with_compressed_len(1).build();
1981 let entry = IndexEntry {
1957 let entry = IndexEntry { bytes: &bytes };
1982 bytes: &bytes,
1983 offset_override: None,
1984 };
1985
1958
1986 assert_eq!(entry.compressed_len(), 1)
1959 assert_eq!(entry.compressed_len(), 1)
1987 }
1960 }
@@ -1989,10 +1962,7 b' mod tests {'
1989 #[test]
1962 #[test]
1990 fn test_uncompressed_len() {
1963 fn test_uncompressed_len() {
1991 let bytes = IndexEntryBuilder::new().with_uncompressed_len(1).build();
1964 let bytes = IndexEntryBuilder::new().with_uncompressed_len(1).build();
1992 let entry = IndexEntry {
1965 let entry = IndexEntry { bytes: &bytes };
1993 bytes: &bytes,
1994 offset_override: None,
1995 };
1996
1966
1997 assert_eq!(entry.uncompressed_len(), 1)
1967 assert_eq!(entry.uncompressed_len(), 1)
1998 }
1968 }
@@ -2002,10 +1972,7 b' mod tests {'
2002 let bytes = IndexEntryBuilder::new()
1972 let bytes = IndexEntryBuilder::new()
2003 .with_base_revision_or_base_of_delta_chain(Revision(1))
1973 .with_base_revision_or_base_of_delta_chain(Revision(1))
2004 .build();
1974 .build();
2005 let entry = IndexEntry {
1975 let entry = IndexEntry { bytes: &bytes };
2006 bytes: &bytes,
2007 offset_override: None,
2008 };
2009
1976
2010 assert_eq!(entry.base_revision_or_base_of_delta_chain(), 1.into())
1977 assert_eq!(entry.base_revision_or_base_of_delta_chain(), 1.into())
2011 }
1978 }
@@ -2016,10 +1983,7 b' mod tests {'
2016 .with_link_revision(Revision(123))
1983 .with_link_revision(Revision(123))
2017 .build();
1984 .build();
2018
1985
2019 let entry = IndexEntry {
1986 let entry = IndexEntry { bytes: &bytes };
2020 bytes: &bytes,
2021 offset_override: None,
2022 };
2023
1987
2024 assert_eq!(entry.link_revision(), 123.into());
1988 assert_eq!(entry.link_revision(), 123.into());
2025 }
1989 }
@@ -2028,10 +1992,7 b' mod tests {'
2028 fn p1_test() {
1992 fn p1_test() {
2029 let bytes = IndexEntryBuilder::new().with_p1(Revision(123)).build();
1993 let bytes = IndexEntryBuilder::new().with_p1(Revision(123)).build();
2030
1994
2031 let entry = IndexEntry {
1995 let entry = IndexEntry { bytes: &bytes };
2032 bytes: &bytes,
2033 offset_override: None,
2034 };
2035
1996
2036 assert_eq!(entry.p1(), 123.into());
1997 assert_eq!(entry.p1(), 123.into());
2037 }
1998 }
@@ -2040,10 +2001,7 b' mod tests {'
2040 fn p2_test() {
2001 fn p2_test() {
2041 let bytes = IndexEntryBuilder::new().with_p2(Revision(123)).build();
2002 let bytes = IndexEntryBuilder::new().with_p2(Revision(123)).build();
2042
2003
2043 let entry = IndexEntry {
2004 let entry = IndexEntry { bytes: &bytes };
2044 bytes: &bytes,
2045 offset_override: None,
2046 };
2047
2005
2048 assert_eq!(entry.p2(), 123.into());
2006 assert_eq!(entry.p2(), 123.into());
2049 }
2007 }
@@ -2054,10 +2012,7 b' mod tests {'
2054 .unwrap();
2012 .unwrap();
2055 let bytes = IndexEntryBuilder::new().with_node(node).build();
2013 let bytes = IndexEntryBuilder::new().with_node(node).build();
2056
2014
2057 let entry = IndexEntry {
2015 let entry = IndexEntry { bytes: &bytes };
2058 bytes: &bytes,
2059 offset_override: None,
2060 };
2061
2016
2062 assert_eq!(*entry.hash(), node);
2017 assert_eq!(*entry.hash(), node);
2063 }
2018 }
@@ -29,6 +29,7 b' use zstd;'
29 use self::node::{NODE_BYTES_LENGTH, NULL_NODE};
29 use self::node::{NODE_BYTES_LENGTH, NULL_NODE};
30 use self::nodemap_docket::NodeMapDocket;
30 use self::nodemap_docket::NodeMapDocket;
31 use super::index::Index;
31 use super::index::Index;
32 use super::index::INDEX_ENTRY_SIZE;
32 use super::nodemap::{NodeMap, NodeMapError};
33 use super::nodemap::{NodeMap, NodeMapError};
33 use crate::errors::HgError;
34 use crate::errors::HgError;
34 use crate::vfs::Vfs;
35 use crate::vfs::Vfs;
@@ -531,7 +532,12 b' impl Revlog {'
531 .index
532 .index
532 .get_entry(rev)
533 .get_entry(rev)
533 .ok_or(RevlogError::InvalidRevision)?;
534 .ok_or(RevlogError::InvalidRevision)?;
534 let start = index_entry.offset();
535 let offset = index_entry.offset();
536 let start = if self.index.is_inline() {
537 offset + ((rev.0 as usize + 1) * INDEX_ENTRY_SIZE)
538 } else {
539 offset
540 };
535 let end = start + index_entry.compressed_len() as usize;
541 let end = start + index_entry.compressed_len() as usize;
536 let data = if self.index.is_inline() {
542 let data = if self.index.is_inline() {
537 self.index.data(start, end)
543 self.index.data(start, end)
General Comments 0
You need to be logged in to leave comments. Login now