##// END OF EJS Templates
unit-tests: Fix `cargo test` on 32-bit platforms...
Simon Sapin -
r47649:6d5a26e9 default
parent child Browse files
Show More
@@ -1,404 +1,404 b''
1 use std::convert::TryInto;
1 use std::convert::TryInto;
2 use std::ops::Deref;
2 use std::ops::Deref;
3
3
4 use byteorder::{BigEndian, ByteOrder};
4 use byteorder::{BigEndian, ByteOrder};
5
5
6 use crate::errors::HgError;
6 use crate::errors::HgError;
7 use crate::revlog::node::Node;
7 use crate::revlog::node::Node;
8 use crate::revlog::revlog::RevlogError;
8 use crate::revlog::revlog::RevlogError;
9 use crate::revlog::{Revision, NULL_REVISION};
9 use crate::revlog::{Revision, NULL_REVISION};
10
10
11 pub const INDEX_ENTRY_SIZE: usize = 64;
11 pub const INDEX_ENTRY_SIZE: usize = 64;
12
12
13 /// A Revlog index
13 /// A Revlog index
14 pub struct Index {
14 pub struct Index {
15 bytes: Box<dyn Deref<Target = [u8]> + Send>,
15 bytes: Box<dyn Deref<Target = [u8]> + Send>,
16 /// Offsets of starts of index blocks.
16 /// Offsets of starts of index blocks.
17 /// Only needed when the index is interleaved with data.
17 /// Only needed when the index is interleaved with data.
18 offsets: Option<Vec<usize>>,
18 offsets: Option<Vec<usize>>,
19 }
19 }
20
20
21 impl Index {
21 impl Index {
22 /// Create an index from bytes.
22 /// Create an index from bytes.
23 /// Calculate the start of each entry when is_inline is true.
23 /// Calculate the start of each entry when is_inline is true.
24 pub fn new(
24 pub fn new(
25 bytes: Box<dyn Deref<Target = [u8]> + Send>,
25 bytes: Box<dyn Deref<Target = [u8]> + Send>,
26 ) -> Result<Self, RevlogError> {
26 ) -> Result<Self, RevlogError> {
27 if is_inline(&bytes) {
27 if is_inline(&bytes) {
28 let mut offset: usize = 0;
28 let mut offset: usize = 0;
29 let mut offsets = Vec::new();
29 let mut offsets = Vec::new();
30
30
31 while offset + INDEX_ENTRY_SIZE <= bytes.len() {
31 while offset + INDEX_ENTRY_SIZE <= bytes.len() {
32 offsets.push(offset);
32 offsets.push(offset);
33 let end = offset + INDEX_ENTRY_SIZE;
33 let end = offset + INDEX_ENTRY_SIZE;
34 let entry = IndexEntry {
34 let entry = IndexEntry {
35 bytes: &bytes[offset..end],
35 bytes: &bytes[offset..end],
36 offset_override: None,
36 offset_override: None,
37 };
37 };
38
38
39 offset += INDEX_ENTRY_SIZE + entry.compressed_len();
39 offset += INDEX_ENTRY_SIZE + entry.compressed_len();
40 }
40 }
41
41
42 if offset == bytes.len() {
42 if offset == bytes.len() {
43 Ok(Self {
43 Ok(Self {
44 bytes,
44 bytes,
45 offsets: Some(offsets),
45 offsets: Some(offsets),
46 })
46 })
47 } else {
47 } else {
48 Err(HgError::corrupted("unexpected inline revlog length")
48 Err(HgError::corrupted("unexpected inline revlog length")
49 .into())
49 .into())
50 }
50 }
51 } else {
51 } else {
52 Ok(Self {
52 Ok(Self {
53 bytes,
53 bytes,
54 offsets: None,
54 offsets: None,
55 })
55 })
56 }
56 }
57 }
57 }
58
58
59 /// Value of the inline flag.
59 /// Value of the inline flag.
60 pub fn is_inline(&self) -> bool {
60 pub fn is_inline(&self) -> bool {
61 is_inline(&self.bytes)
61 is_inline(&self.bytes)
62 }
62 }
63
63
64 /// Return a slice of bytes if `revlog` is inline. Panic if not.
64 /// Return a slice of bytes if `revlog` is inline. Panic if not.
65 pub fn data(&self, start: usize, end: usize) -> &[u8] {
65 pub fn data(&self, start: usize, end: usize) -> &[u8] {
66 if !self.is_inline() {
66 if !self.is_inline() {
67 panic!("tried to access data in the index of a revlog that is not inline");
67 panic!("tried to access data in the index of a revlog that is not inline");
68 }
68 }
69 &self.bytes[start..end]
69 &self.bytes[start..end]
70 }
70 }
71
71
72 /// Return number of entries of the revlog index.
72 /// Return number of entries of the revlog index.
73 pub fn len(&self) -> usize {
73 pub fn len(&self) -> usize {
74 if let Some(offsets) = &self.offsets {
74 if let Some(offsets) = &self.offsets {
75 offsets.len()
75 offsets.len()
76 } else {
76 } else {
77 self.bytes.len() / INDEX_ENTRY_SIZE
77 self.bytes.len() / INDEX_ENTRY_SIZE
78 }
78 }
79 }
79 }
80
80
81 /// Returns `true` if the `Index` has zero `entries`.
81 /// Returns `true` if the `Index` has zero `entries`.
82 pub fn is_empty(&self) -> bool {
82 pub fn is_empty(&self) -> bool {
83 self.len() == 0
83 self.len() == 0
84 }
84 }
85
85
86 /// Return the index entry corresponding to the given revision if it
86 /// Return the index entry corresponding to the given revision if it
87 /// exists.
87 /// exists.
88 pub fn get_entry(&self, rev: Revision) -> Option<IndexEntry> {
88 pub fn get_entry(&self, rev: Revision) -> Option<IndexEntry> {
89 if rev == NULL_REVISION {
89 if rev == NULL_REVISION {
90 return None;
90 return None;
91 }
91 }
92 if let Some(offsets) = &self.offsets {
92 if let Some(offsets) = &self.offsets {
93 self.get_entry_inline(rev, offsets)
93 self.get_entry_inline(rev, offsets)
94 } else {
94 } else {
95 self.get_entry_separated(rev)
95 self.get_entry_separated(rev)
96 }
96 }
97 }
97 }
98
98
99 fn get_entry_inline(
99 fn get_entry_inline(
100 &self,
100 &self,
101 rev: Revision,
101 rev: Revision,
102 offsets: &[usize],
102 offsets: &[usize],
103 ) -> Option<IndexEntry> {
103 ) -> Option<IndexEntry> {
104 let start = *offsets.get(rev as usize)?;
104 let start = *offsets.get(rev as usize)?;
105 let end = start.checked_add(INDEX_ENTRY_SIZE)?;
105 let end = start.checked_add(INDEX_ENTRY_SIZE)?;
106 let bytes = &self.bytes[start..end];
106 let bytes = &self.bytes[start..end];
107
107
108 // See IndexEntry for an explanation of this override.
108 // See IndexEntry for an explanation of this override.
109 let offset_override = Some(end);
109 let offset_override = Some(end);
110
110
111 Some(IndexEntry {
111 Some(IndexEntry {
112 bytes,
112 bytes,
113 offset_override,
113 offset_override,
114 })
114 })
115 }
115 }
116
116
117 fn get_entry_separated(&self, rev: Revision) -> Option<IndexEntry> {
117 fn get_entry_separated(&self, rev: Revision) -> Option<IndexEntry> {
118 let max_rev = self.bytes.len() / INDEX_ENTRY_SIZE;
118 let max_rev = self.bytes.len() / INDEX_ENTRY_SIZE;
119 if rev as usize >= max_rev {
119 if rev as usize >= max_rev {
120 return None;
120 return None;
121 }
121 }
122 let start = rev as usize * INDEX_ENTRY_SIZE;
122 let start = rev as usize * INDEX_ENTRY_SIZE;
123 let end = start + INDEX_ENTRY_SIZE;
123 let end = start + INDEX_ENTRY_SIZE;
124 let bytes = &self.bytes[start..end];
124 let bytes = &self.bytes[start..end];
125
125
126 // Override the offset of the first revision as its bytes are used
126 // Override the offset of the first revision as its bytes are used
127 // for the index's metadata (saving space because it is always 0)
127 // for the index's metadata (saving space because it is always 0)
128 let offset_override = if rev == 0 { Some(0) } else { None };
128 let offset_override = if rev == 0 { Some(0) } else { None };
129
129
130 Some(IndexEntry {
130 Some(IndexEntry {
131 bytes,
131 bytes,
132 offset_override,
132 offset_override,
133 })
133 })
134 }
134 }
135 }
135 }
136
136
137 impl super::RevlogIndex for Index {
137 impl super::RevlogIndex for Index {
138 fn len(&self) -> usize {
138 fn len(&self) -> usize {
139 self.len()
139 self.len()
140 }
140 }
141
141
142 fn node(&self, rev: Revision) -> Option<&Node> {
142 fn node(&self, rev: Revision) -> Option<&Node> {
143 self.get_entry(rev).map(|entry| entry.hash())
143 self.get_entry(rev).map(|entry| entry.hash())
144 }
144 }
145 }
145 }
146
146
147 #[derive(Debug)]
147 #[derive(Debug)]
148 pub struct IndexEntry<'a> {
148 pub struct IndexEntry<'a> {
149 bytes: &'a [u8],
149 bytes: &'a [u8],
150 /// Allows to override the offset value of the entry.
150 /// Allows to override the offset value of the entry.
151 ///
151 ///
152 /// For interleaved index and data, the offset stored in the index
152 /// For interleaved index and data, the offset stored in the index
153 /// corresponds to the separated data offset.
153 /// corresponds to the separated data offset.
154 /// It has to be overridden with the actual offset in the interleaved
154 /// It has to be overridden with the actual offset in the interleaved
155 /// index which is just after the index block.
155 /// index which is just after the index block.
156 ///
156 ///
157 /// For separated index and data, the offset stored in the first index
157 /// For separated index and data, the offset stored in the first index
158 /// entry is mixed with the index headers.
158 /// entry is mixed with the index headers.
159 /// It has to be overridden with 0.
159 /// It has to be overridden with 0.
160 offset_override: Option<usize>,
160 offset_override: Option<usize>,
161 }
161 }
162
162
163 impl<'a> IndexEntry<'a> {
163 impl<'a> IndexEntry<'a> {
164 /// Return the offset of the data.
164 /// Return the offset of the data.
165 pub fn offset(&self) -> usize {
165 pub fn offset(&self) -> usize {
166 if let Some(offset_override) = self.offset_override {
166 if let Some(offset_override) = self.offset_override {
167 offset_override
167 offset_override
168 } else {
168 } else {
169 let mut bytes = [0; 8];
169 let mut bytes = [0; 8];
170 bytes[2..8].copy_from_slice(&self.bytes[0..=5]);
170 bytes[2..8].copy_from_slice(&self.bytes[0..=5]);
171 BigEndian::read_u64(&bytes[..]) as usize
171 BigEndian::read_u64(&bytes[..]) as usize
172 }
172 }
173 }
173 }
174
174
175 /// Return the compressed length of the data.
175 /// Return the compressed length of the data.
176 pub fn compressed_len(&self) -> usize {
176 pub fn compressed_len(&self) -> usize {
177 BigEndian::read_u32(&self.bytes[8..=11]) as usize
177 BigEndian::read_u32(&self.bytes[8..=11]) as usize
178 }
178 }
179
179
180 /// Return the uncompressed length of the data.
180 /// Return the uncompressed length of the data.
181 pub fn uncompressed_len(&self) -> usize {
181 pub fn uncompressed_len(&self) -> usize {
182 BigEndian::read_u32(&self.bytes[12..=15]) as usize
182 BigEndian::read_u32(&self.bytes[12..=15]) as usize
183 }
183 }
184
184
185 /// Return the revision upon which the data has been derived.
185 /// Return the revision upon which the data has been derived.
186 pub fn base_revision(&self) -> Revision {
186 pub fn base_revision(&self) -> Revision {
187 // TODO Maybe return an Option when base_revision == rev?
187 // TODO Maybe return an Option when base_revision == rev?
188 // Requires to add rev to IndexEntry
188 // Requires to add rev to IndexEntry
189
189
190 BigEndian::read_i32(&self.bytes[16..])
190 BigEndian::read_i32(&self.bytes[16..])
191 }
191 }
192
192
193 pub fn p1(&self) -> Revision {
193 pub fn p1(&self) -> Revision {
194 BigEndian::read_i32(&self.bytes[24..])
194 BigEndian::read_i32(&self.bytes[24..])
195 }
195 }
196
196
197 pub fn p2(&self) -> Revision {
197 pub fn p2(&self) -> Revision {
198 BigEndian::read_i32(&self.bytes[28..])
198 BigEndian::read_i32(&self.bytes[28..])
199 }
199 }
200
200
201 /// Return the hash of revision's full text.
201 /// Return the hash of revision's full text.
202 ///
202 ///
203 /// Currently, SHA-1 is used and only the first 20 bytes of this field
203 /// Currently, SHA-1 is used and only the first 20 bytes of this field
204 /// are used.
204 /// are used.
205 pub fn hash(&self) -> &'a Node {
205 pub fn hash(&self) -> &'a Node {
206 (&self.bytes[32..52]).try_into().unwrap()
206 (&self.bytes[32..52]).try_into().unwrap()
207 }
207 }
208 }
208 }
209
209
210 /// Value of the inline flag.
210 /// Value of the inline flag.
211 pub fn is_inline(index_bytes: &[u8]) -> bool {
211 pub fn is_inline(index_bytes: &[u8]) -> bool {
212 match &index_bytes[0..=1] {
212 match &index_bytes[0..=1] {
213 [0, 0] | [0, 2] => false,
213 [0, 0] | [0, 2] => false,
214 _ => true,
214 _ => true,
215 }
215 }
216 }
216 }
217
217
218 #[cfg(test)]
218 #[cfg(test)]
219 mod tests {
219 mod tests {
220 use super::*;
220 use super::*;
221
221
222 #[cfg(test)]
222 #[cfg(test)]
223 #[derive(Debug, Copy, Clone)]
223 #[derive(Debug, Copy, Clone)]
224 pub struct IndexEntryBuilder {
224 pub struct IndexEntryBuilder {
225 is_first: bool,
225 is_first: bool,
226 is_inline: bool,
226 is_inline: bool,
227 is_general_delta: bool,
227 is_general_delta: bool,
228 version: u16,
228 version: u16,
229 offset: usize,
229 offset: usize,
230 compressed_len: usize,
230 compressed_len: usize,
231 uncompressed_len: usize,
231 uncompressed_len: usize,
232 base_revision: Revision,
232 base_revision: Revision,
233 }
233 }
234
234
235 #[cfg(test)]
235 #[cfg(test)]
236 impl IndexEntryBuilder {
236 impl IndexEntryBuilder {
237 pub fn new() -> Self {
237 pub fn new() -> Self {
238 Self {
238 Self {
239 is_first: false,
239 is_first: false,
240 is_inline: false,
240 is_inline: false,
241 is_general_delta: true,
241 is_general_delta: true,
242 version: 2,
242 version: 2,
243 offset: 0,
243 offset: 0,
244 compressed_len: 0,
244 compressed_len: 0,
245 uncompressed_len: 0,
245 uncompressed_len: 0,
246 base_revision: 0,
246 base_revision: 0,
247 }
247 }
248 }
248 }
249
249
250 pub fn is_first(&mut self, value: bool) -> &mut Self {
250 pub fn is_first(&mut self, value: bool) -> &mut Self {
251 self.is_first = value;
251 self.is_first = value;
252 self
252 self
253 }
253 }
254
254
255 pub fn with_inline(&mut self, value: bool) -> &mut Self {
255 pub fn with_inline(&mut self, value: bool) -> &mut Self {
256 self.is_inline = value;
256 self.is_inline = value;
257 self
257 self
258 }
258 }
259
259
260 pub fn with_general_delta(&mut self, value: bool) -> &mut Self {
260 pub fn with_general_delta(&mut self, value: bool) -> &mut Self {
261 self.is_general_delta = value;
261 self.is_general_delta = value;
262 self
262 self
263 }
263 }
264
264
265 pub fn with_version(&mut self, value: u16) -> &mut Self {
265 pub fn with_version(&mut self, value: u16) -> &mut Self {
266 self.version = value;
266 self.version = value;
267 self
267 self
268 }
268 }
269
269
270 pub fn with_offset(&mut self, value: usize) -> &mut Self {
270 pub fn with_offset(&mut self, value: usize) -> &mut Self {
271 self.offset = value;
271 self.offset = value;
272 self
272 self
273 }
273 }
274
274
275 pub fn with_compressed_len(&mut self, value: usize) -> &mut Self {
275 pub fn with_compressed_len(&mut self, value: usize) -> &mut Self {
276 self.compressed_len = value;
276 self.compressed_len = value;
277 self
277 self
278 }
278 }
279
279
280 pub fn with_uncompressed_len(&mut self, value: usize) -> &mut Self {
280 pub fn with_uncompressed_len(&mut self, value: usize) -> &mut Self {
281 self.uncompressed_len = value;
281 self.uncompressed_len = value;
282 self
282 self
283 }
283 }
284
284
285 pub fn with_base_revision(&mut self, value: Revision) -> &mut Self {
285 pub fn with_base_revision(&mut self, value: Revision) -> &mut Self {
286 self.base_revision = value;
286 self.base_revision = value;
287 self
287 self
288 }
288 }
289
289
290 pub fn build(&self) -> Vec<u8> {
290 pub fn build(&self) -> Vec<u8> {
291 let mut bytes = Vec::with_capacity(INDEX_ENTRY_SIZE);
291 let mut bytes = Vec::with_capacity(INDEX_ENTRY_SIZE);
292 if self.is_first {
292 if self.is_first {
293 bytes.extend(&match (self.is_general_delta, self.is_inline) {
293 bytes.extend(&match (self.is_general_delta, self.is_inline) {
294 (false, false) => [0u8, 0],
294 (false, false) => [0u8, 0],
295 (false, true) => [0u8, 1],
295 (false, true) => [0u8, 1],
296 (true, false) => [0u8, 2],
296 (true, false) => [0u8, 2],
297 (true, true) => [0u8, 3],
297 (true, true) => [0u8, 3],
298 });
298 });
299 bytes.extend(&self.version.to_be_bytes());
299 bytes.extend(&self.version.to_be_bytes());
300 // Remaining offset bytes.
300 // Remaining offset bytes.
301 bytes.extend(&[0u8; 2]);
301 bytes.extend(&[0u8; 2]);
302 } else {
302 } else {
303 // Offset is only 6 bytes will usize is 8.
303 // Offset stored on 48 bits (6 bytes)
304 bytes.extend(&self.offset.to_be_bytes()[2..]);
304 bytes.extend(&(self.offset as u64).to_be_bytes()[2..]);
305 }
305 }
306 bytes.extend(&[0u8; 2]); // Revision flags.
306 bytes.extend(&[0u8; 2]); // Revision flags.
307 bytes.extend(&self.compressed_len.to_be_bytes()[4..]);
307 bytes.extend(&(self.compressed_len as u32).to_be_bytes());
308 bytes.extend(&self.uncompressed_len.to_be_bytes()[4..]);
308 bytes.extend(&(self.uncompressed_len as u32).to_be_bytes());
309 bytes.extend(&self.base_revision.to_be_bytes());
309 bytes.extend(&self.base_revision.to_be_bytes());
310 bytes
310 bytes
311 }
311 }
312 }
312 }
313
313
314 #[test]
314 #[test]
315 fn is_not_inline_when_no_inline_flag_test() {
315 fn is_not_inline_when_no_inline_flag_test() {
316 let bytes = IndexEntryBuilder::new()
316 let bytes = IndexEntryBuilder::new()
317 .is_first(true)
317 .is_first(true)
318 .with_general_delta(false)
318 .with_general_delta(false)
319 .with_inline(false)
319 .with_inline(false)
320 .build();
320 .build();
321
321
322 assert_eq!(is_inline(&bytes), false)
322 assert_eq!(is_inline(&bytes), false)
323 }
323 }
324
324
325 #[test]
325 #[test]
326 fn is_inline_when_inline_flag_test() {
326 fn is_inline_when_inline_flag_test() {
327 let bytes = IndexEntryBuilder::new()
327 let bytes = IndexEntryBuilder::new()
328 .is_first(true)
328 .is_first(true)
329 .with_general_delta(false)
329 .with_general_delta(false)
330 .with_inline(true)
330 .with_inline(true)
331 .build();
331 .build();
332
332
333 assert_eq!(is_inline(&bytes), true)
333 assert_eq!(is_inline(&bytes), true)
334 }
334 }
335
335
336 #[test]
336 #[test]
337 fn is_inline_when_inline_and_generaldelta_flags_test() {
337 fn is_inline_when_inline_and_generaldelta_flags_test() {
338 let bytes = IndexEntryBuilder::new()
338 let bytes = IndexEntryBuilder::new()
339 .is_first(true)
339 .is_first(true)
340 .with_general_delta(true)
340 .with_general_delta(true)
341 .with_inline(true)
341 .with_inline(true)
342 .build();
342 .build();
343
343
344 assert_eq!(is_inline(&bytes), true)
344 assert_eq!(is_inline(&bytes), true)
345 }
345 }
346
346
347 #[test]
347 #[test]
348 fn test_offset() {
348 fn test_offset() {
349 let bytes = IndexEntryBuilder::new().with_offset(1).build();
349 let bytes = IndexEntryBuilder::new().with_offset(1).build();
350 let entry = IndexEntry {
350 let entry = IndexEntry {
351 bytes: &bytes,
351 bytes: &bytes,
352 offset_override: None,
352 offset_override: None,
353 };
353 };
354
354
355 assert_eq!(entry.offset(), 1)
355 assert_eq!(entry.offset(), 1)
356 }
356 }
357
357
358 #[test]
358 #[test]
359 fn test_with_overridden_offset() {
359 fn test_with_overridden_offset() {
360 let bytes = IndexEntryBuilder::new().with_offset(1).build();
360 let bytes = IndexEntryBuilder::new().with_offset(1).build();
361 let entry = IndexEntry {
361 let entry = IndexEntry {
362 bytes: &bytes,
362 bytes: &bytes,
363 offset_override: Some(2),
363 offset_override: Some(2),
364 };
364 };
365
365
366 assert_eq!(entry.offset(), 2)
366 assert_eq!(entry.offset(), 2)
367 }
367 }
368
368
369 #[test]
369 #[test]
370 fn test_compressed_len() {
370 fn test_compressed_len() {
371 let bytes = IndexEntryBuilder::new().with_compressed_len(1).build();
371 let bytes = IndexEntryBuilder::new().with_compressed_len(1).build();
372 let entry = IndexEntry {
372 let entry = IndexEntry {
373 bytes: &bytes,
373 bytes: &bytes,
374 offset_override: None,
374 offset_override: None,
375 };
375 };
376
376
377 assert_eq!(entry.compressed_len(), 1)
377 assert_eq!(entry.compressed_len(), 1)
378 }
378 }
379
379
380 #[test]
380 #[test]
381 fn test_uncompressed_len() {
381 fn test_uncompressed_len() {
382 let bytes = IndexEntryBuilder::new().with_uncompressed_len(1).build();
382 let bytes = IndexEntryBuilder::new().with_uncompressed_len(1).build();
383 let entry = IndexEntry {
383 let entry = IndexEntry {
384 bytes: &bytes,
384 bytes: &bytes,
385 offset_override: None,
385 offset_override: None,
386 };
386 };
387
387
388 assert_eq!(entry.uncompressed_len(), 1)
388 assert_eq!(entry.uncompressed_len(), 1)
389 }
389 }
390
390
391 #[test]
391 #[test]
392 fn test_base_revision() {
392 fn test_base_revision() {
393 let bytes = IndexEntryBuilder::new().with_base_revision(1).build();
393 let bytes = IndexEntryBuilder::new().with_base_revision(1).build();
394 let entry = IndexEntry {
394 let entry = IndexEntry {
395 bytes: &bytes,
395 bytes: &bytes,
396 offset_override: None,
396 offset_override: None,
397 };
397 };
398
398
399 assert_eq!(entry.base_revision(), 1)
399 assert_eq!(entry.base_revision(), 1)
400 }
400 }
401 }
401 }
402
402
403 #[cfg(test)]
403 #[cfg(test)]
404 pub use tests::IndexEntryBuilder;
404 pub use tests::IndexEntryBuilder;
General Comments 0
You need to be logged in to leave comments. Login now