##// END OF EJS Templates
rust-revlog: add methods for getting parent revs and entries...
Martin von Zweigbergk -
r49939:5d205e47 default
parent child Browse files
Show More
@@ -1,254 +1,269 b''
1 use crate::errors::HgError;
1 use crate::errors::HgError;
2 use crate::repo::Repo;
2 use crate::repo::Repo;
3 use crate::revlog::revlog::{Revlog, RevlogError};
3 use crate::revlog::revlog::{Revlog, RevlogEntry, RevlogError};
4 use crate::revlog::Revision;
4 use crate::revlog::Revision;
5 use crate::revlog::{Node, NodePrefix};
5 use crate::revlog::{Node, NodePrefix};
6 use crate::utils::hg_path::HgPath;
6 use crate::utils::hg_path::HgPath;
7 use itertools::Itertools;
7 use itertools::Itertools;
8 use std::ascii::escape_default;
8 use std::ascii::escape_default;
9 use std::fmt::{Debug, Formatter};
9 use std::fmt::{Debug, Formatter};
10
10
11 /// A specialized `Revlog` to work with `changelog` data format.
11 /// A specialized `Revlog` to work with `changelog` data format.
12 pub struct Changelog {
12 pub struct Changelog {
13 /// The generic `revlog` format.
13 /// The generic `revlog` format.
14 pub(crate) revlog: Revlog,
14 pub(crate) revlog: Revlog,
15 }
15 }
16
16
17 impl Changelog {
17 impl Changelog {
18 /// Open the `changelog` of a repository given by its root.
18 /// Open the `changelog` of a repository given by its root.
19 pub fn open(repo: &Repo) -> Result<Self, HgError> {
19 pub fn open(repo: &Repo) -> Result<Self, HgError> {
20 let revlog = Revlog::open(repo, "00changelog.i", None)?;
20 let revlog = Revlog::open(repo, "00changelog.i", None)?;
21 Ok(Self { revlog })
21 Ok(Self { revlog })
22 }
22 }
23
23
24 /// Return the `ChangelogEntry` for the given node ID.
24 /// Return the `ChangelogEntry` for the given node ID.
25 pub fn data_for_node(
25 pub fn data_for_node(
26 &self,
26 &self,
27 node: NodePrefix,
27 node: NodePrefix,
28 ) -> Result<ChangelogRevisionData, RevlogError> {
28 ) -> Result<ChangelogRevisionData, RevlogError> {
29 let rev = self.revlog.rev_from_node(node)?;
29 let rev = self.revlog.rev_from_node(node)?;
30 self.data_for_rev(rev)
30 self.data_for_rev(rev)
31 }
31 }
32
32
33 /// Return the `RevlogEntry` of the given revision number.
34 pub fn entry_for_rev(
35 &self,
36 rev: Revision,
37 ) -> Result<RevlogEntry, RevlogError> {
38 self.revlog.get_entry(rev)
39 }
40
33 /// Return the `ChangelogEntry` of the given revision number.
41 /// Return the `ChangelogEntry` of the given revision number.
34 pub fn data_for_rev(
42 pub fn data_for_rev(
35 &self,
43 &self,
36 rev: Revision,
44 rev: Revision,
37 ) -> Result<ChangelogRevisionData, RevlogError> {
45 ) -> Result<ChangelogRevisionData, RevlogError> {
38 let bytes = self.revlog.get_rev_data(rev)?.into_owned();
46 let bytes = self.revlog.get_rev_data(rev)?.into_owned();
39 if bytes.is_empty() {
47 if bytes.is_empty() {
40 Ok(ChangelogRevisionData::null())
48 Ok(ChangelogRevisionData::null())
41 } else {
49 } else {
42 Ok(ChangelogRevisionData::new(bytes).map_err(|err| {
50 Ok(ChangelogRevisionData::new(bytes).map_err(|err| {
43 RevlogError::Other(HgError::CorruptedRepository(format!(
51 RevlogError::Other(HgError::CorruptedRepository(format!(
44 "Invalid changelog data for revision {}: {:?}",
52 "Invalid changelog data for revision {}: {:?}",
45 rev, err
53 rev, err
46 )))
54 )))
47 })?)
55 })?)
48 }
56 }
49 }
57 }
50
58
51 pub fn node_from_rev(&self, rev: Revision) -> Option<&Node> {
59 pub fn node_from_rev(&self, rev: Revision) -> Option<&Node> {
52 self.revlog.node_from_rev(rev)
60 self.revlog.node_from_rev(rev)
53 }
61 }
62
63 pub fn rev_from_node(
64 &self,
65 node: NodePrefix,
66 ) -> Result<Revision, RevlogError> {
67 self.revlog.rev_from_node(node)
68 }
54 }
69 }
55
70
56 /// `Changelog` entry which knows how to interpret the `changelog` data bytes.
71 /// `Changelog` entry which knows how to interpret the `changelog` data bytes.
57 #[derive(PartialEq)]
72 #[derive(PartialEq)]
58 pub struct ChangelogRevisionData {
73 pub struct ChangelogRevisionData {
59 /// The data bytes of the `changelog` entry.
74 /// The data bytes of the `changelog` entry.
60 bytes: Vec<u8>,
75 bytes: Vec<u8>,
61 /// The end offset for the hex manifest (not including the newline)
76 /// The end offset for the hex manifest (not including the newline)
62 manifest_end: usize,
77 manifest_end: usize,
63 /// The end offset for the user+email (not including the newline)
78 /// The end offset for the user+email (not including the newline)
64 user_end: usize,
79 user_end: usize,
65 /// The end offset for the timestamp+timezone+extras (not including the
80 /// The end offset for the timestamp+timezone+extras (not including the
66 /// newline)
81 /// newline)
67 timestamp_end: usize,
82 timestamp_end: usize,
68 /// The end offset for the file list (not including the newline)
83 /// The end offset for the file list (not including the newline)
69 files_end: usize,
84 files_end: usize,
70 }
85 }
71
86
72 impl ChangelogRevisionData {
87 impl ChangelogRevisionData {
73 fn new(bytes: Vec<u8>) -> Result<Self, HgError> {
88 fn new(bytes: Vec<u8>) -> Result<Self, HgError> {
74 let mut line_iter = bytes.split(|b| b == &b'\n');
89 let mut line_iter = bytes.split(|b| b == &b'\n');
75 let manifest_end = line_iter
90 let manifest_end = line_iter
76 .next()
91 .next()
77 .expect("Empty iterator from split()?")
92 .expect("Empty iterator from split()?")
78 .len();
93 .len();
79 let user_slice = line_iter.next().ok_or_else(|| {
94 let user_slice = line_iter.next().ok_or_else(|| {
80 HgError::corrupted("Changeset data truncated after manifest line")
95 HgError::corrupted("Changeset data truncated after manifest line")
81 })?;
96 })?;
82 let user_end = manifest_end + 1 + user_slice.len();
97 let user_end = manifest_end + 1 + user_slice.len();
83 let timestamp_slice = line_iter.next().ok_or_else(|| {
98 let timestamp_slice = line_iter.next().ok_or_else(|| {
84 HgError::corrupted("Changeset data truncated after user line")
99 HgError::corrupted("Changeset data truncated after user line")
85 })?;
100 })?;
86 let timestamp_end = user_end + 1 + timestamp_slice.len();
101 let timestamp_end = user_end + 1 + timestamp_slice.len();
87 let mut files_end = timestamp_end + 1;
102 let mut files_end = timestamp_end + 1;
88 loop {
103 loop {
89 let line = line_iter.next().ok_or_else(|| {
104 let line = line_iter.next().ok_or_else(|| {
90 HgError::corrupted("Changeset data truncated in files list")
105 HgError::corrupted("Changeset data truncated in files list")
91 })?;
106 })?;
92 if line.is_empty() {
107 if line.is_empty() {
93 if files_end == bytes.len() {
108 if files_end == bytes.len() {
94 // The list of files ended with a single newline (there
109 // The list of files ended with a single newline (there
95 // should be two)
110 // should be two)
96 return Err(HgError::corrupted(
111 return Err(HgError::corrupted(
97 "Changeset data truncated after files list",
112 "Changeset data truncated after files list",
98 ));
113 ));
99 }
114 }
100 files_end -= 1;
115 files_end -= 1;
101 break;
116 break;
102 }
117 }
103 files_end += line.len() + 1;
118 files_end += line.len() + 1;
104 }
119 }
105
120
106 Ok(Self {
121 Ok(Self {
107 bytes,
122 bytes,
108 manifest_end,
123 manifest_end,
109 user_end,
124 user_end,
110 timestamp_end,
125 timestamp_end,
111 files_end,
126 files_end,
112 })
127 })
113 }
128 }
114
129
115 fn null() -> Self {
130 fn null() -> Self {
116 Self::new(
131 Self::new(
117 b"0000000000000000000000000000000000000000\n\n0 0\n\n".to_vec(),
132 b"0000000000000000000000000000000000000000\n\n0 0\n\n".to_vec(),
118 )
133 )
119 .unwrap()
134 .unwrap()
120 }
135 }
121
136
122 /// Return an iterator over the lines of the entry.
137 /// Return an iterator over the lines of the entry.
123 pub fn lines(&self) -> impl Iterator<Item = &[u8]> {
138 pub fn lines(&self) -> impl Iterator<Item = &[u8]> {
124 self.bytes.split(|b| b == &b'\n')
139 self.bytes.split(|b| b == &b'\n')
125 }
140 }
126
141
127 /// Return the node id of the `manifest` referenced by this `changelog`
142 /// Return the node id of the `manifest` referenced by this `changelog`
128 /// entry.
143 /// entry.
129 pub fn manifest_node(&self) -> Result<Node, HgError> {
144 pub fn manifest_node(&self) -> Result<Node, HgError> {
130 let manifest_node_hex = &self.bytes[..self.manifest_end];
145 let manifest_node_hex = &self.bytes[..self.manifest_end];
131 Node::from_hex_for_repo(manifest_node_hex)
146 Node::from_hex_for_repo(manifest_node_hex)
132 }
147 }
133
148
134 /// The full user string (usually a name followed by an email enclosed in
149 /// The full user string (usually a name followed by an email enclosed in
135 /// angle brackets)
150 /// angle brackets)
136 pub fn user(&self) -> &[u8] {
151 pub fn user(&self) -> &[u8] {
137 &self.bytes[self.manifest_end + 1..self.user_end]
152 &self.bytes[self.manifest_end + 1..self.user_end]
138 }
153 }
139
154
140 /// The full timestamp line (timestamp in seconds, offset in seconds, and
155 /// The full timestamp line (timestamp in seconds, offset in seconds, and
141 /// possibly extras)
156 /// possibly extras)
142 // TODO: We should expose this in a more useful way
157 // TODO: We should expose this in a more useful way
143 pub fn timestamp_line(&self) -> &[u8] {
158 pub fn timestamp_line(&self) -> &[u8] {
144 &self.bytes[self.user_end + 1..self.timestamp_end]
159 &self.bytes[self.user_end + 1..self.timestamp_end]
145 }
160 }
146
161
147 /// The files changed in this revision.
162 /// The files changed in this revision.
148 pub fn files(&self) -> impl Iterator<Item = &HgPath> {
163 pub fn files(&self) -> impl Iterator<Item = &HgPath> {
149 self.bytes[self.timestamp_end + 1..self.files_end]
164 self.bytes[self.timestamp_end + 1..self.files_end]
150 .split(|b| b == &b'\n')
165 .split(|b| b == &b'\n')
151 .map(|path| HgPath::new(path))
166 .map(|path| HgPath::new(path))
152 }
167 }
153
168
154 /// The change description.
169 /// The change description.
155 pub fn description(&self) -> &[u8] {
170 pub fn description(&self) -> &[u8] {
156 &self.bytes[self.files_end + 2..]
171 &self.bytes[self.files_end + 2..]
157 }
172 }
158 }
173 }
159
174
160 impl Debug for ChangelogRevisionData {
175 impl Debug for ChangelogRevisionData {
161 fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
176 fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
162 f.debug_struct("ChangelogRevisionData")
177 f.debug_struct("ChangelogRevisionData")
163 .field("bytes", &debug_bytes(&self.bytes))
178 .field("bytes", &debug_bytes(&self.bytes))
164 .field("manifest", &debug_bytes(&self.bytes[..self.manifest_end]))
179 .field("manifest", &debug_bytes(&self.bytes[..self.manifest_end]))
165 .field(
180 .field(
166 "user",
181 "user",
167 &debug_bytes(
182 &debug_bytes(
168 &self.bytes[self.manifest_end + 1..self.user_end],
183 &self.bytes[self.manifest_end + 1..self.user_end],
169 ),
184 ),
170 )
185 )
171 .field(
186 .field(
172 "timestamp",
187 "timestamp",
173 &debug_bytes(
188 &debug_bytes(
174 &self.bytes[self.user_end + 1..self.timestamp_end],
189 &self.bytes[self.user_end + 1..self.timestamp_end],
175 ),
190 ),
176 )
191 )
177 .field(
192 .field(
178 "files",
193 "files",
179 &debug_bytes(
194 &debug_bytes(
180 &self.bytes[self.timestamp_end + 1..self.files_end],
195 &self.bytes[self.timestamp_end + 1..self.files_end],
181 ),
196 ),
182 )
197 )
183 .field(
198 .field(
184 "description",
199 "description",
185 &debug_bytes(&self.bytes[self.files_end + 2..]),
200 &debug_bytes(&self.bytes[self.files_end + 2..]),
186 )
201 )
187 .finish()
202 .finish()
188 }
203 }
189 }
204 }
190
205
191 fn debug_bytes(bytes: &[u8]) -> String {
206 fn debug_bytes(bytes: &[u8]) -> String {
192 String::from_utf8_lossy(
207 String::from_utf8_lossy(
193 &bytes.iter().flat_map(|b| escape_default(*b)).collect_vec(),
208 &bytes.iter().flat_map(|b| escape_default(*b)).collect_vec(),
194 )
209 )
195 .to_string()
210 .to_string()
196 }
211 }
197
212
198 #[cfg(test)]
213 #[cfg(test)]
199 mod tests {
214 mod tests {
200 use super::*;
215 use super::*;
201 use itertools::Itertools;
216 use itertools::Itertools;
202 use pretty_assertions::assert_eq;
217 use pretty_assertions::assert_eq;
203
218
204 #[test]
219 #[test]
205 fn test_create_changelogrevisiondata_invalid() {
220 fn test_create_changelogrevisiondata_invalid() {
206 // Completely empty
221 // Completely empty
207 assert!(ChangelogRevisionData::new(b"abcd".to_vec()).is_err());
222 assert!(ChangelogRevisionData::new(b"abcd".to_vec()).is_err());
208 // No newline after manifest
223 // No newline after manifest
209 assert!(ChangelogRevisionData::new(b"abcd".to_vec()).is_err());
224 assert!(ChangelogRevisionData::new(b"abcd".to_vec()).is_err());
210 // No newline after user
225 // No newline after user
211 assert!(ChangelogRevisionData::new(b"abcd\n".to_vec()).is_err());
226 assert!(ChangelogRevisionData::new(b"abcd\n".to_vec()).is_err());
212 // No newline after timestamp
227 // No newline after timestamp
213 assert!(ChangelogRevisionData::new(b"abcd\n\n0 0".to_vec()).is_err());
228 assert!(ChangelogRevisionData::new(b"abcd\n\n0 0".to_vec()).is_err());
214 // Missing newline after files
229 // Missing newline after files
215 assert!(ChangelogRevisionData::new(
230 assert!(ChangelogRevisionData::new(
216 b"abcd\n\n0 0\nfile1\nfile2".to_vec()
231 b"abcd\n\n0 0\nfile1\nfile2".to_vec()
217 )
232 )
218 .is_err(),);
233 .is_err(),);
219 // Only one newline after files
234 // Only one newline after files
220 assert!(ChangelogRevisionData::new(
235 assert!(ChangelogRevisionData::new(
221 b"abcd\n\n0 0\nfile1\nfile2\n".to_vec()
236 b"abcd\n\n0 0\nfile1\nfile2\n".to_vec()
222 )
237 )
223 .is_err(),);
238 .is_err(),);
224 }
239 }
225
240
226 #[test]
241 #[test]
227 fn test_create_changelogrevisiondata() {
242 fn test_create_changelogrevisiondata() {
228 let data = ChangelogRevisionData::new(
243 let data = ChangelogRevisionData::new(
229 b"0123456789abcdef0123456789abcdef01234567
244 b"0123456789abcdef0123456789abcdef01234567
230 Some One <someone@example.com>
245 Some One <someone@example.com>
231 0 0
246 0 0
232 file1
247 file1
233 file2
248 file2
234
249
235 some
250 some
236 commit
251 commit
237 message"
252 message"
238 .to_vec(),
253 .to_vec(),
239 )
254 )
240 .unwrap();
255 .unwrap();
241 assert_eq!(
256 assert_eq!(
242 data.manifest_node().unwrap(),
257 data.manifest_node().unwrap(),
243 Node::from_hex("0123456789abcdef0123456789abcdef01234567")
258 Node::from_hex("0123456789abcdef0123456789abcdef01234567")
244 .unwrap()
259 .unwrap()
245 );
260 );
246 assert_eq!(data.user(), b"Some One <someone@example.com>");
261 assert_eq!(data.user(), b"Some One <someone@example.com>");
247 assert_eq!(data.timestamp_line(), b"0 0");
262 assert_eq!(data.timestamp_line(), b"0 0");
248 assert_eq!(
263 assert_eq!(
249 data.files().collect_vec(),
264 data.files().collect_vec(),
250 vec![HgPath::new("file1"), HgPath::new("file2")]
265 vec![HgPath::new("file1"), HgPath::new("file2")]
251 );
266 );
252 assert_eq!(data.description(), b"some\ncommit\nmessage");
267 assert_eq!(data.description(), b"some\ncommit\nmessage");
253 }
268 }
254 }
269 }
@@ -1,468 +1,504 b''
1 use std::borrow::Cow;
1 use std::borrow::Cow;
2 use std::convert::TryFrom;
2 use std::convert::TryFrom;
3 use std::io::Read;
3 use std::io::Read;
4 use std::ops::Deref;
4 use std::ops::Deref;
5 use std::path::Path;
5 use std::path::Path;
6
6
7 use flate2::read::ZlibDecoder;
7 use flate2::read::ZlibDecoder;
8 use micro_timer::timed;
8 use micro_timer::timed;
9 use sha1::{Digest, Sha1};
9 use sha1::{Digest, Sha1};
10 use zstd;
10 use zstd;
11
11
12 use super::index::Index;
12 use super::index::Index;
13 use super::node::{NodePrefix, NODE_BYTES_LENGTH, NULL_NODE};
13 use super::node::{NodePrefix, NODE_BYTES_LENGTH, NULL_NODE};
14 use super::nodemap;
14 use super::nodemap;
15 use super::nodemap::{NodeMap, NodeMapError};
15 use super::nodemap::{NodeMap, NodeMapError};
16 use super::nodemap_docket::NodeMapDocket;
16 use super::nodemap_docket::NodeMapDocket;
17 use super::patch;
17 use super::patch;
18 use crate::errors::HgError;
18 use crate::errors::HgError;
19 use crate::repo::Repo;
19 use crate::repo::Repo;
20 use crate::revlog::Revision;
20 use crate::revlog::Revision;
21 use crate::{Node, NULL_REVISION};
21 use crate::{Node, NULL_REVISION};
22
22
23 const REVISION_FLAG_CENSORED: u16 = 1 << 15;
23 const REVISION_FLAG_CENSORED: u16 = 1 << 15;
24 const REVISION_FLAG_ELLIPSIS: u16 = 1 << 14;
24 const REVISION_FLAG_ELLIPSIS: u16 = 1 << 14;
25 const REVISION_FLAG_EXTSTORED: u16 = 1 << 13;
25 const REVISION_FLAG_EXTSTORED: u16 = 1 << 13;
26 const REVISION_FLAG_HASCOPIESINFO: u16 = 1 << 12;
26 const REVISION_FLAG_HASCOPIESINFO: u16 = 1 << 12;
27
27
28 // Keep this in sync with REVIDX_KNOWN_FLAGS in
28 // Keep this in sync with REVIDX_KNOWN_FLAGS in
29 // mercurial/revlogutils/flagutil.py
29 // mercurial/revlogutils/flagutil.py
30 const REVIDX_KNOWN_FLAGS: u16 = REVISION_FLAG_CENSORED
30 const REVIDX_KNOWN_FLAGS: u16 = REVISION_FLAG_CENSORED
31 | REVISION_FLAG_ELLIPSIS
31 | REVISION_FLAG_ELLIPSIS
32 | REVISION_FLAG_EXTSTORED
32 | REVISION_FLAG_EXTSTORED
33 | REVISION_FLAG_HASCOPIESINFO;
33 | REVISION_FLAG_HASCOPIESINFO;
34
34
35 #[derive(derive_more::From)]
35 #[derive(derive_more::From)]
36 pub enum RevlogError {
36 pub enum RevlogError {
37 InvalidRevision,
37 InvalidRevision,
38 /// Working directory is not supported
38 /// Working directory is not supported
39 WDirUnsupported,
39 WDirUnsupported,
40 /// Found more than one entry whose ID match the requested prefix
40 /// Found more than one entry whose ID match the requested prefix
41 AmbiguousPrefix,
41 AmbiguousPrefix,
42 #[from]
42 #[from]
43 Other(HgError),
43 Other(HgError),
44 }
44 }
45
45
46 impl From<NodeMapError> for RevlogError {
46 impl From<NodeMapError> for RevlogError {
47 fn from(error: NodeMapError) -> Self {
47 fn from(error: NodeMapError) -> Self {
48 match error {
48 match error {
49 NodeMapError::MultipleResults => RevlogError::AmbiguousPrefix,
49 NodeMapError::MultipleResults => RevlogError::AmbiguousPrefix,
50 NodeMapError::RevisionNotInIndex(_) => RevlogError::corrupted(),
50 NodeMapError::RevisionNotInIndex(_) => RevlogError::corrupted(),
51 }
51 }
52 }
52 }
53 }
53 }
54
54
55 fn corrupted() -> HgError {
55 fn corrupted() -> HgError {
56 HgError::corrupted("corrupted revlog")
56 HgError::corrupted("corrupted revlog")
57 }
57 }
58
58
59 impl RevlogError {
59 impl RevlogError {
60 fn corrupted() -> Self {
60 fn corrupted() -> Self {
61 RevlogError::Other(corrupted())
61 RevlogError::Other(corrupted())
62 }
62 }
63 }
63 }
64
64
65 /// Read only implementation of revlog.
65 /// Read only implementation of revlog.
66 pub struct Revlog {
66 pub struct Revlog {
67 /// When index and data are not interleaved: bytes of the revlog index.
67 /// When index and data are not interleaved: bytes of the revlog index.
68 /// When index and data are interleaved: bytes of the revlog index and
68 /// When index and data are interleaved: bytes of the revlog index and
69 /// data.
69 /// data.
70 index: Index,
70 index: Index,
71 /// When index and data are not interleaved: bytes of the revlog data
71 /// When index and data are not interleaved: bytes of the revlog data
72 data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>>,
72 data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>>,
73 /// When present on disk: the persistent nodemap for this revlog
73 /// When present on disk: the persistent nodemap for this revlog
74 nodemap: Option<nodemap::NodeTree>,
74 nodemap: Option<nodemap::NodeTree>,
75 }
75 }
76
76
77 impl Revlog {
77 impl Revlog {
78 /// Open a revlog index file.
78 /// Open a revlog index file.
79 ///
79 ///
80 /// It will also open the associated data file if index and data are not
80 /// It will also open the associated data file if index and data are not
81 /// interleaved.
81 /// interleaved.
82 #[timed]
82 #[timed]
83 pub fn open(
83 pub fn open(
84 repo: &Repo,
84 repo: &Repo,
85 index_path: impl AsRef<Path>,
85 index_path: impl AsRef<Path>,
86 data_path: Option<&Path>,
86 data_path: Option<&Path>,
87 ) -> Result<Self, HgError> {
87 ) -> Result<Self, HgError> {
88 let index_path = index_path.as_ref();
88 let index_path = index_path.as_ref();
89 let index = {
89 let index = {
90 match repo.store_vfs().mmap_open_opt(&index_path)? {
90 match repo.store_vfs().mmap_open_opt(&index_path)? {
91 None => Index::new(Box::new(vec![])),
91 None => Index::new(Box::new(vec![])),
92 Some(index_mmap) => {
92 Some(index_mmap) => {
93 let index = Index::new(Box::new(index_mmap))?;
93 let index = Index::new(Box::new(index_mmap))?;
94 Ok(index)
94 Ok(index)
95 }
95 }
96 }
96 }
97 }?;
97 }?;
98
98
99 let default_data_path = index_path.with_extension("d");
99 let default_data_path = index_path.with_extension("d");
100
100
101 // type annotation required
101 // type annotation required
102 // won't recognize Mmap as Deref<Target = [u8]>
102 // won't recognize Mmap as Deref<Target = [u8]>
103 let data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>> =
103 let data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>> =
104 if index.is_inline() {
104 if index.is_inline() {
105 None
105 None
106 } else {
106 } else {
107 let data_path = data_path.unwrap_or(&default_data_path);
107 let data_path = data_path.unwrap_or(&default_data_path);
108 let data_mmap = repo.store_vfs().mmap_open(data_path)?;
108 let data_mmap = repo.store_vfs().mmap_open(data_path)?;
109 Some(Box::new(data_mmap))
109 Some(Box::new(data_mmap))
110 };
110 };
111
111
112 let nodemap = if index.is_inline() {
112 let nodemap = if index.is_inline() {
113 None
113 None
114 } else {
114 } else {
115 NodeMapDocket::read_from_file(repo, index_path)?.map(
115 NodeMapDocket::read_from_file(repo, index_path)?.map(
116 |(docket, data)| {
116 |(docket, data)| {
117 nodemap::NodeTree::load_bytes(
117 nodemap::NodeTree::load_bytes(
118 Box::new(data),
118 Box::new(data),
119 docket.data_length,
119 docket.data_length,
120 )
120 )
121 },
121 },
122 )
122 )
123 };
123 };
124
124
125 Ok(Revlog {
125 Ok(Revlog {
126 index,
126 index,
127 data_bytes,
127 data_bytes,
128 nodemap,
128 nodemap,
129 })
129 })
130 }
130 }
131
131
132 /// Return number of entries of the `Revlog`.
132 /// Return number of entries of the `Revlog`.
133 pub fn len(&self) -> usize {
133 pub fn len(&self) -> usize {
134 self.index.len()
134 self.index.len()
135 }
135 }
136
136
137 /// Returns `true` if the `Revlog` has zero `entries`.
137 /// Returns `true` if the `Revlog` has zero `entries`.
138 pub fn is_empty(&self) -> bool {
138 pub fn is_empty(&self) -> bool {
139 self.index.is_empty()
139 self.index.is_empty()
140 }
140 }
141
141
142 /// Returns the node ID for the given revision number, if it exists in this
142 /// Returns the node ID for the given revision number, if it exists in this
143 /// revlog
143 /// revlog
144 pub fn node_from_rev(&self, rev: Revision) -> Option<&Node> {
144 pub fn node_from_rev(&self, rev: Revision) -> Option<&Node> {
145 if rev == NULL_REVISION {
145 if rev == NULL_REVISION {
146 return Some(&NULL_NODE);
146 return Some(&NULL_NODE);
147 }
147 }
148 Some(self.index.get_entry(rev)?.hash())
148 Some(self.index.get_entry(rev)?.hash())
149 }
149 }
150
150
151 /// Return the revision number for the given node ID, if it exists in this
151 /// Return the revision number for the given node ID, if it exists in this
152 /// revlog
152 /// revlog
153 #[timed]
153 #[timed]
154 pub fn rev_from_node(
154 pub fn rev_from_node(
155 &self,
155 &self,
156 node: NodePrefix,
156 node: NodePrefix,
157 ) -> Result<Revision, RevlogError> {
157 ) -> Result<Revision, RevlogError> {
158 if node.is_prefix_of(&NULL_NODE) {
158 if node.is_prefix_of(&NULL_NODE) {
159 return Ok(NULL_REVISION);
159 return Ok(NULL_REVISION);
160 }
160 }
161
161
162 if let Some(nodemap) = &self.nodemap {
162 if let Some(nodemap) = &self.nodemap {
163 return nodemap
163 return nodemap
164 .find_bin(&self.index, node)?
164 .find_bin(&self.index, node)?
165 .ok_or(RevlogError::InvalidRevision);
165 .ok_or(RevlogError::InvalidRevision);
166 }
166 }
167
167
168 // Fallback to linear scan when a persistent nodemap is not present.
168 // Fallback to linear scan when a persistent nodemap is not present.
169 // This happens when the persistent-nodemap experimental feature is not
169 // This happens when the persistent-nodemap experimental feature is not
170 // enabled, or for small revlogs.
170 // enabled, or for small revlogs.
171 //
171 //
172 // TODO: consider building a non-persistent nodemap in memory to
172 // TODO: consider building a non-persistent nodemap in memory to
173 // optimize these cases.
173 // optimize these cases.
174 let mut found_by_prefix = None;
174 let mut found_by_prefix = None;
175 for rev in (0..self.len() as Revision).rev() {
175 for rev in (0..self.len() as Revision).rev() {
176 let index_entry =
176 let index_entry =
177 self.index.get_entry(rev).ok_or(HgError::corrupted(
177 self.index.get_entry(rev).ok_or(HgError::corrupted(
178 "revlog references a revision not in the index",
178 "revlog references a revision not in the index",
179 ))?;
179 ))?;
180 if node == *index_entry.hash() {
180 if node == *index_entry.hash() {
181 return Ok(rev);
181 return Ok(rev);
182 }
182 }
183 if node.is_prefix_of(index_entry.hash()) {
183 if node.is_prefix_of(index_entry.hash()) {
184 if found_by_prefix.is_some() {
184 if found_by_prefix.is_some() {
185 return Err(RevlogError::AmbiguousPrefix);
185 return Err(RevlogError::AmbiguousPrefix);
186 }
186 }
187 found_by_prefix = Some(rev)
187 found_by_prefix = Some(rev)
188 }
188 }
189 }
189 }
190 found_by_prefix.ok_or(RevlogError::InvalidRevision)
190 found_by_prefix.ok_or(RevlogError::InvalidRevision)
191 }
191 }
192
192
193 /// Returns whether the given revision exists in this revlog.
193 /// Returns whether the given revision exists in this revlog.
194 pub fn has_rev(&self, rev: Revision) -> bool {
194 pub fn has_rev(&self, rev: Revision) -> bool {
195 self.index.get_entry(rev).is_some()
195 self.index.get_entry(rev).is_some()
196 }
196 }
197
197
198 /// Return the full data associated to a revision.
198 /// Return the full data associated to a revision.
199 ///
199 ///
200 /// All entries required to build the final data out of deltas will be
200 /// All entries required to build the final data out of deltas will be
201 /// retrieved as needed, and the deltas will be applied to the inital
201 /// retrieved as needed, and the deltas will be applied to the inital
202 /// snapshot to rebuild the final data.
202 /// snapshot to rebuild the final data.
203 #[timed]
203 #[timed]
204 pub fn get_rev_data(
204 pub fn get_rev_data(
205 &self,
205 &self,
206 rev: Revision,
206 rev: Revision,
207 ) -> Result<Cow<[u8]>, RevlogError> {
207 ) -> Result<Cow<[u8]>, RevlogError> {
208 if rev == NULL_REVISION {
208 if rev == NULL_REVISION {
209 return Ok(Cow::Borrowed(&[]));
209 return Ok(Cow::Borrowed(&[]));
210 };
210 };
211 Ok(self.get_entry(rev)?.data()?)
211 Ok(self.get_entry(rev)?.data()?)
212 }
212 }
213
213
214 /// Check the hash of some given data against the recorded hash.
214 /// Check the hash of some given data against the recorded hash.
215 pub fn check_hash(
215 pub fn check_hash(
216 &self,
216 &self,
217 p1: Revision,
217 p1: Revision,
218 p2: Revision,
218 p2: Revision,
219 expected: &[u8],
219 expected: &[u8],
220 data: &[u8],
220 data: &[u8],
221 ) -> bool {
221 ) -> bool {
222 let e1 = self.index.get_entry(p1);
222 let e1 = self.index.get_entry(p1);
223 let h1 = match e1 {
223 let h1 = match e1 {
224 Some(ref entry) => entry.hash(),
224 Some(ref entry) => entry.hash(),
225 None => &NULL_NODE,
225 None => &NULL_NODE,
226 };
226 };
227 let e2 = self.index.get_entry(p2);
227 let e2 = self.index.get_entry(p2);
228 let h2 = match e2 {
228 let h2 = match e2 {
229 Some(ref entry) => entry.hash(),
229 Some(ref entry) => entry.hash(),
230 None => &NULL_NODE,
230 None => &NULL_NODE,
231 };
231 };
232
232
233 &hash(data, h1.as_bytes(), h2.as_bytes()) == expected
233 &hash(data, h1.as_bytes(), h2.as_bytes()) == expected
234 }
234 }
235
235
236 /// Build the full data of a revision out its snapshot
236 /// Build the full data of a revision out its snapshot
237 /// and its deltas.
237 /// and its deltas.
238 #[timed]
238 #[timed]
239 fn build_data_from_deltas(
239 fn build_data_from_deltas(
240 snapshot: RevlogEntry,
240 snapshot: RevlogEntry,
241 deltas: &[RevlogEntry],
241 deltas: &[RevlogEntry],
242 ) -> Result<Vec<u8>, HgError> {
242 ) -> Result<Vec<u8>, HgError> {
243 let snapshot = snapshot.data_chunk()?;
243 let snapshot = snapshot.data_chunk()?;
244 let deltas = deltas
244 let deltas = deltas
245 .iter()
245 .iter()
246 .rev()
246 .rev()
247 .map(RevlogEntry::data_chunk)
247 .map(RevlogEntry::data_chunk)
248 .collect::<Result<Vec<_>, _>>()?;
248 .collect::<Result<Vec<_>, _>>()?;
249 let patches: Vec<_> =
249 let patches: Vec<_> =
250 deltas.iter().map(|d| patch::PatchList::new(d)).collect();
250 deltas.iter().map(|d| patch::PatchList::new(d)).collect();
251 let patch = patch::fold_patch_lists(&patches);
251 let patch = patch::fold_patch_lists(&patches);
252 Ok(patch.apply(&snapshot))
252 Ok(patch.apply(&snapshot))
253 }
253 }
254
254
255 /// Return the revlog data.
255 /// Return the revlog data.
256 fn data(&self) -> &[u8] {
256 fn data(&self) -> &[u8] {
257 match self.data_bytes {
257 match self.data_bytes {
258 Some(ref data_bytes) => &data_bytes,
258 Some(ref data_bytes) => &data_bytes,
259 None => panic!(
259 None => panic!(
260 "forgot to load the data or trying to access inline data"
260 "forgot to load the data or trying to access inline data"
261 ),
261 ),
262 }
262 }
263 }
263 }
264
264
265 /// Get an entry of the revlog.
265 /// Get an entry of the revlog.
266 pub fn get_entry(
266 pub fn get_entry(
267 &self,
267 &self,
268 rev: Revision,
268 rev: Revision,
269 ) -> Result<RevlogEntry, RevlogError> {
269 ) -> Result<RevlogEntry, RevlogError> {
270 let index_entry = self
270 let index_entry = self
271 .index
271 .index
272 .get_entry(rev)
272 .get_entry(rev)
273 .ok_or(RevlogError::InvalidRevision)?;
273 .ok_or(RevlogError::InvalidRevision)?;
274 let start = index_entry.offset();
274 let start = index_entry.offset();
275 let end = start + index_entry.compressed_len() as usize;
275 let end = start + index_entry.compressed_len() as usize;
276 let data = if self.index.is_inline() {
276 let data = if self.index.is_inline() {
277 self.index.data(start, end)
277 self.index.data(start, end)
278 } else {
278 } else {
279 &self.data()[start..end]
279 &self.data()[start..end]
280 };
280 };
281 let entry = RevlogEntry {
281 let entry = RevlogEntry {
282 revlog: self,
282 revlog: self,
283 rev,
283 rev,
284 bytes: data,
284 bytes: data,
285 compressed_len: index_entry.compressed_len(),
285 compressed_len: index_entry.compressed_len(),
286 uncompressed_len: index_entry.uncompressed_len(),
286 uncompressed_len: index_entry.uncompressed_len(),
287 base_rev_or_base_of_delta_chain: if index_entry
287 base_rev_or_base_of_delta_chain: if index_entry
288 .base_revision_or_base_of_delta_chain()
288 .base_revision_or_base_of_delta_chain()
289 == rev
289 == rev
290 {
290 {
291 None
291 None
292 } else {
292 } else {
293 Some(index_entry.base_revision_or_base_of_delta_chain())
293 Some(index_entry.base_revision_or_base_of_delta_chain())
294 },
294 },
295 p1: index_entry.p1(),
295 p1: index_entry.p1(),
296 p2: index_entry.p2(),
296 p2: index_entry.p2(),
297 flags: index_entry.flags(),
297 flags: index_entry.flags(),
298 hash: *index_entry.hash(),
298 hash: *index_entry.hash(),
299 };
299 };
300 Ok(entry)
300 Ok(entry)
301 }
301 }
302
302
303 /// when resolving internal references within revlog, any errors
303 /// when resolving internal references within revlog, any errors
304 /// should be reported as corruption, instead of e.g. "invalid revision"
304 /// should be reported as corruption, instead of e.g. "invalid revision"
305 fn get_entry_internal(
305 fn get_entry_internal(
306 &self,
306 &self,
307 rev: Revision,
307 rev: Revision,
308 ) -> Result<RevlogEntry, HgError> {
308 ) -> Result<RevlogEntry, HgError> {
309 return self.get_entry(rev).map_err(|_| corrupted());
309 return self.get_entry(rev).map_err(|_| corrupted());
310 }
310 }
311 }
311 }
312
312
313 /// The revlog entry's bytes and the necessary informations to extract
313 /// The revlog entry's bytes and the necessary informations to extract
314 /// the entry's data.
314 /// the entry's data.
315 #[derive(Clone)]
315 #[derive(Clone)]
316 pub struct RevlogEntry<'a> {
316 pub struct RevlogEntry<'a> {
317 revlog: &'a Revlog,
317 revlog: &'a Revlog,
318 rev: Revision,
318 rev: Revision,
319 bytes: &'a [u8],
319 bytes: &'a [u8],
320 compressed_len: u32,
320 compressed_len: u32,
321 uncompressed_len: i32,
321 uncompressed_len: i32,
322 base_rev_or_base_of_delta_chain: Option<Revision>,
322 base_rev_or_base_of_delta_chain: Option<Revision>,
323 p1: Revision,
323 p1: Revision,
324 p2: Revision,
324 p2: Revision,
325 flags: u16,
325 flags: u16,
326 hash: Node,
326 hash: Node,
327 }
327 }
328
328
329 impl<'a> RevlogEntry<'a> {
329 impl<'a> RevlogEntry<'a> {
330 pub fn revision(&self) -> Revision {
330 pub fn revision(&self) -> Revision {
331 self.rev
331 self.rev
332 }
332 }
333
333
334 pub fn node(&self) -> &Node {
335 &self.hash
336 }
337
334 pub fn uncompressed_len(&self) -> Option<u32> {
338 pub fn uncompressed_len(&self) -> Option<u32> {
335 u32::try_from(self.uncompressed_len).ok()
339 u32::try_from(self.uncompressed_len).ok()
336 }
340 }
337
341
338 pub fn has_p1(&self) -> bool {
342 pub fn has_p1(&self) -> bool {
339 self.p1 != NULL_REVISION
343 self.p1 != NULL_REVISION
340 }
344 }
341
345
346 pub fn p1_entry(&self) -> Result<Option<RevlogEntry>, RevlogError> {
347 if self.p1 == NULL_REVISION {
348 Ok(None)
349 } else {
350 Ok(Some(self.revlog.get_entry(self.p1)?))
351 }
352 }
353
354 pub fn p2_entry(&self) -> Result<Option<RevlogEntry>, RevlogError> {
355 if self.p2 == NULL_REVISION {
356 Ok(None)
357 } else {
358 Ok(Some(self.revlog.get_entry(self.p2)?))
359 }
360 }
361
362 pub fn p1(&self) -> Option<Revision> {
363 if self.p1 == NULL_REVISION {
364 None
365 } else {
366 Some(self.p1)
367 }
368 }
369
370 pub fn p2(&self) -> Option<Revision> {
371 if self.p2 == NULL_REVISION {
372 None
373 } else {
374 Some(self.p2)
375 }
376 }
377
342 pub fn is_cencored(&self) -> bool {
378 pub fn is_cencored(&self) -> bool {
343 (self.flags & REVISION_FLAG_CENSORED) != 0
379 (self.flags & REVISION_FLAG_CENSORED) != 0
344 }
380 }
345
381
346 pub fn has_length_affecting_flag_processor(&self) -> bool {
382 pub fn has_length_affecting_flag_processor(&self) -> bool {
347 // Relevant Python code: revlog.size()
383 // Relevant Python code: revlog.size()
348 // note: ELLIPSIS is known to not change the content
384 // note: ELLIPSIS is known to not change the content
349 (self.flags & (REVIDX_KNOWN_FLAGS ^ REVISION_FLAG_ELLIPSIS)) != 0
385 (self.flags & (REVIDX_KNOWN_FLAGS ^ REVISION_FLAG_ELLIPSIS)) != 0
350 }
386 }
351
387
352 /// The data for this entry, after resolving deltas if any.
388 /// The data for this entry, after resolving deltas if any.
353 pub fn data(&self) -> Result<Cow<'a, [u8]>, HgError> {
389 pub fn data(&self) -> Result<Cow<'a, [u8]>, HgError> {
354 let mut entry = self.clone();
390 let mut entry = self.clone();
355 let mut delta_chain = vec![];
391 let mut delta_chain = vec![];
356
392
357 // The meaning of `base_rev_or_base_of_delta_chain` depends on
393 // The meaning of `base_rev_or_base_of_delta_chain` depends on
358 // generaldelta. See the doc on `ENTRY_DELTA_BASE` in
394 // generaldelta. See the doc on `ENTRY_DELTA_BASE` in
359 // `mercurial/revlogutils/constants.py` and the code in
395 // `mercurial/revlogutils/constants.py` and the code in
360 // [_chaininfo] and in [index_deltachain].
396 // [_chaininfo] and in [index_deltachain].
361 let uses_generaldelta = self.revlog.index.uses_generaldelta();
397 let uses_generaldelta = self.revlog.index.uses_generaldelta();
362 while let Some(base_rev) = entry.base_rev_or_base_of_delta_chain {
398 while let Some(base_rev) = entry.base_rev_or_base_of_delta_chain {
363 let base_rev = if uses_generaldelta {
399 let base_rev = if uses_generaldelta {
364 base_rev
400 base_rev
365 } else {
401 } else {
366 entry.rev - 1
402 entry.rev - 1
367 };
403 };
368 delta_chain.push(entry);
404 delta_chain.push(entry);
369 entry = self.revlog.get_entry_internal(base_rev)?;
405 entry = self.revlog.get_entry_internal(base_rev)?;
370 }
406 }
371
407
372 let data = if delta_chain.is_empty() {
408 let data = if delta_chain.is_empty() {
373 entry.data_chunk()?
409 entry.data_chunk()?
374 } else {
410 } else {
375 Revlog::build_data_from_deltas(entry, &delta_chain)?.into()
411 Revlog::build_data_from_deltas(entry, &delta_chain)?.into()
376 };
412 };
377
413
378 if self.revlog.check_hash(
414 if self.revlog.check_hash(
379 self.p1,
415 self.p1,
380 self.p2,
416 self.p2,
381 self.hash.as_bytes(),
417 self.hash.as_bytes(),
382 &data,
418 &data,
383 ) {
419 ) {
384 Ok(data)
420 Ok(data)
385 } else {
421 } else {
386 Err(corrupted())
422 Err(corrupted())
387 }
423 }
388 }
424 }
389
425
390 /// Extract the data contained in the entry.
426 /// Extract the data contained in the entry.
391 /// This may be a delta. (See `is_delta`.)
427 /// This may be a delta. (See `is_delta`.)
392 fn data_chunk(&self) -> Result<Cow<'a, [u8]>, HgError> {
428 fn data_chunk(&self) -> Result<Cow<'a, [u8]>, HgError> {
393 if self.bytes.is_empty() {
429 if self.bytes.is_empty() {
394 return Ok(Cow::Borrowed(&[]));
430 return Ok(Cow::Borrowed(&[]));
395 }
431 }
396 match self.bytes[0] {
432 match self.bytes[0] {
397 // Revision data is the entirety of the entry, including this
433 // Revision data is the entirety of the entry, including this
398 // header.
434 // header.
399 b'\0' => Ok(Cow::Borrowed(self.bytes)),
435 b'\0' => Ok(Cow::Borrowed(self.bytes)),
400 // Raw revision data follows.
436 // Raw revision data follows.
401 b'u' => Ok(Cow::Borrowed(&self.bytes[1..])),
437 b'u' => Ok(Cow::Borrowed(&self.bytes[1..])),
402 // zlib (RFC 1950) data.
438 // zlib (RFC 1950) data.
403 b'x' => Ok(Cow::Owned(self.uncompressed_zlib_data()?)),
439 b'x' => Ok(Cow::Owned(self.uncompressed_zlib_data()?)),
404 // zstd data.
440 // zstd data.
405 b'\x28' => Ok(Cow::Owned(self.uncompressed_zstd_data()?)),
441 b'\x28' => Ok(Cow::Owned(self.uncompressed_zstd_data()?)),
406 // A proper new format should have had a repo/store requirement.
442 // A proper new format should have had a repo/store requirement.
407 _format_type => Err(corrupted()),
443 _format_type => Err(corrupted()),
408 }
444 }
409 }
445 }
410
446
411 fn uncompressed_zlib_data(&self) -> Result<Vec<u8>, HgError> {
447 fn uncompressed_zlib_data(&self) -> Result<Vec<u8>, HgError> {
412 let mut decoder = ZlibDecoder::new(self.bytes);
448 let mut decoder = ZlibDecoder::new(self.bytes);
413 if self.is_delta() {
449 if self.is_delta() {
414 let mut buf = Vec::with_capacity(self.compressed_len as usize);
450 let mut buf = Vec::with_capacity(self.compressed_len as usize);
415 decoder.read_to_end(&mut buf).map_err(|_| corrupted())?;
451 decoder.read_to_end(&mut buf).map_err(|_| corrupted())?;
416 Ok(buf)
452 Ok(buf)
417 } else {
453 } else {
418 let cap = self.uncompressed_len.max(0) as usize;
454 let cap = self.uncompressed_len.max(0) as usize;
419 let mut buf = vec![0; cap];
455 let mut buf = vec![0; cap];
420 decoder.read_exact(&mut buf).map_err(|_| corrupted())?;
456 decoder.read_exact(&mut buf).map_err(|_| corrupted())?;
421 Ok(buf)
457 Ok(buf)
422 }
458 }
423 }
459 }
424
460
425 fn uncompressed_zstd_data(&self) -> Result<Vec<u8>, HgError> {
461 fn uncompressed_zstd_data(&self) -> Result<Vec<u8>, HgError> {
426 if self.is_delta() {
462 if self.is_delta() {
427 let mut buf = Vec::with_capacity(self.compressed_len as usize);
463 let mut buf = Vec::with_capacity(self.compressed_len as usize);
428 zstd::stream::copy_decode(self.bytes, &mut buf)
464 zstd::stream::copy_decode(self.bytes, &mut buf)
429 .map_err(|_| corrupted())?;
465 .map_err(|_| corrupted())?;
430 Ok(buf)
466 Ok(buf)
431 } else {
467 } else {
432 let cap = self.uncompressed_len.max(0) as usize;
468 let cap = self.uncompressed_len.max(0) as usize;
433 let mut buf = vec![0; cap];
469 let mut buf = vec![0; cap];
434 let len = zstd::block::decompress_to_buffer(self.bytes, &mut buf)
470 let len = zstd::block::decompress_to_buffer(self.bytes, &mut buf)
435 .map_err(|_| corrupted())?;
471 .map_err(|_| corrupted())?;
436 if len != self.uncompressed_len as usize {
472 if len != self.uncompressed_len as usize {
437 Err(corrupted())
473 Err(corrupted())
438 } else {
474 } else {
439 Ok(buf)
475 Ok(buf)
440 }
476 }
441 }
477 }
442 }
478 }
443
479
444 /// Tell if the entry is a snapshot or a delta
480 /// Tell if the entry is a snapshot or a delta
445 /// (influences on decompression).
481 /// (influences on decompression).
446 fn is_delta(&self) -> bool {
482 fn is_delta(&self) -> bool {
447 self.base_rev_or_base_of_delta_chain.is_some()
483 self.base_rev_or_base_of_delta_chain.is_some()
448 }
484 }
449 }
485 }
450
486
451 /// Calculate the hash of a revision given its data and its parents.
487 /// Calculate the hash of a revision given its data and its parents.
452 fn hash(
488 fn hash(
453 data: &[u8],
489 data: &[u8],
454 p1_hash: &[u8],
490 p1_hash: &[u8],
455 p2_hash: &[u8],
491 p2_hash: &[u8],
456 ) -> [u8; NODE_BYTES_LENGTH] {
492 ) -> [u8; NODE_BYTES_LENGTH] {
457 let mut hasher = Sha1::new();
493 let mut hasher = Sha1::new();
458 let (a, b) = (p1_hash, p2_hash);
494 let (a, b) = (p1_hash, p2_hash);
459 if a > b {
495 if a > b {
460 hasher.update(b);
496 hasher.update(b);
461 hasher.update(a);
497 hasher.update(a);
462 } else {
498 } else {
463 hasher.update(a);
499 hasher.update(a);
464 hasher.update(b);
500 hasher.update(b);
465 }
501 }
466 hasher.update(data);
502 hasher.update(data);
467 *hasher.finalize().as_ref()
503 *hasher.finalize().as_ref()
468 }
504 }
General Comments 0
You need to be logged in to leave comments. Login now