##// END OF EJS Templates
unit-tests: Fix `cargo test` on 32-bit platforms...
Simon Sapin -
r47649:6d5a26e9 default
parent child Browse files
Show More
@@ -1,404 +1,404 b''
1 1 use std::convert::TryInto;
2 2 use std::ops::Deref;
3 3
4 4 use byteorder::{BigEndian, ByteOrder};
5 5
6 6 use crate::errors::HgError;
7 7 use crate::revlog::node::Node;
8 8 use crate::revlog::revlog::RevlogError;
9 9 use crate::revlog::{Revision, NULL_REVISION};
10 10
11 11 pub const INDEX_ENTRY_SIZE: usize = 64;
12 12
13 13 /// A Revlog index
14 14 pub struct Index {
15 15 bytes: Box<dyn Deref<Target = [u8]> + Send>,
16 16 /// Offsets of starts of index blocks.
17 17 /// Only needed when the index is interleaved with data.
18 18 offsets: Option<Vec<usize>>,
19 19 }
20 20
21 21 impl Index {
22 22 /// Create an index from bytes.
23 23 /// Calculate the start of each entry when is_inline is true.
24 24 pub fn new(
25 25 bytes: Box<dyn Deref<Target = [u8]> + Send>,
26 26 ) -> Result<Self, RevlogError> {
27 27 if is_inline(&bytes) {
28 28 let mut offset: usize = 0;
29 29 let mut offsets = Vec::new();
30 30
31 31 while offset + INDEX_ENTRY_SIZE <= bytes.len() {
32 32 offsets.push(offset);
33 33 let end = offset + INDEX_ENTRY_SIZE;
34 34 let entry = IndexEntry {
35 35 bytes: &bytes[offset..end],
36 36 offset_override: None,
37 37 };
38 38
39 39 offset += INDEX_ENTRY_SIZE + entry.compressed_len();
40 40 }
41 41
42 42 if offset == bytes.len() {
43 43 Ok(Self {
44 44 bytes,
45 45 offsets: Some(offsets),
46 46 })
47 47 } else {
48 48 Err(HgError::corrupted("unexpected inline revlog length")
49 49 .into())
50 50 }
51 51 } else {
52 52 Ok(Self {
53 53 bytes,
54 54 offsets: None,
55 55 })
56 56 }
57 57 }
58 58
59 59 /// Value of the inline flag.
60 60 pub fn is_inline(&self) -> bool {
61 61 is_inline(&self.bytes)
62 62 }
63 63
64 64 /// Return a slice of bytes if `revlog` is inline. Panic if not.
65 65 pub fn data(&self, start: usize, end: usize) -> &[u8] {
66 66 if !self.is_inline() {
67 67 panic!("tried to access data in the index of a revlog that is not inline");
68 68 }
69 69 &self.bytes[start..end]
70 70 }
71 71
72 72 /// Return number of entries of the revlog index.
73 73 pub fn len(&self) -> usize {
74 74 if let Some(offsets) = &self.offsets {
75 75 offsets.len()
76 76 } else {
77 77 self.bytes.len() / INDEX_ENTRY_SIZE
78 78 }
79 79 }
80 80
81 81 /// Returns `true` if the `Index` has zero `entries`.
82 82 pub fn is_empty(&self) -> bool {
83 83 self.len() == 0
84 84 }
85 85
86 86 /// Return the index entry corresponding to the given revision if it
87 87 /// exists.
88 88 pub fn get_entry(&self, rev: Revision) -> Option<IndexEntry> {
89 89 if rev == NULL_REVISION {
90 90 return None;
91 91 }
92 92 if let Some(offsets) = &self.offsets {
93 93 self.get_entry_inline(rev, offsets)
94 94 } else {
95 95 self.get_entry_separated(rev)
96 96 }
97 97 }
98 98
99 99 fn get_entry_inline(
100 100 &self,
101 101 rev: Revision,
102 102 offsets: &[usize],
103 103 ) -> Option<IndexEntry> {
104 104 let start = *offsets.get(rev as usize)?;
105 105 let end = start.checked_add(INDEX_ENTRY_SIZE)?;
106 106 let bytes = &self.bytes[start..end];
107 107
108 108 // See IndexEntry for an explanation of this override.
109 109 let offset_override = Some(end);
110 110
111 111 Some(IndexEntry {
112 112 bytes,
113 113 offset_override,
114 114 })
115 115 }
116 116
117 117 fn get_entry_separated(&self, rev: Revision) -> Option<IndexEntry> {
118 118 let max_rev = self.bytes.len() / INDEX_ENTRY_SIZE;
119 119 if rev as usize >= max_rev {
120 120 return None;
121 121 }
122 122 let start = rev as usize * INDEX_ENTRY_SIZE;
123 123 let end = start + INDEX_ENTRY_SIZE;
124 124 let bytes = &self.bytes[start..end];
125 125
126 126 // Override the offset of the first revision as its bytes are used
127 127 // for the index's metadata (saving space because it is always 0)
128 128 let offset_override = if rev == 0 { Some(0) } else { None };
129 129
130 130 Some(IndexEntry {
131 131 bytes,
132 132 offset_override,
133 133 })
134 134 }
135 135 }
136 136
137 137 impl super::RevlogIndex for Index {
138 138 fn len(&self) -> usize {
139 139 self.len()
140 140 }
141 141
142 142 fn node(&self, rev: Revision) -> Option<&Node> {
143 143 self.get_entry(rev).map(|entry| entry.hash())
144 144 }
145 145 }
146 146
147 147 #[derive(Debug)]
148 148 pub struct IndexEntry<'a> {
149 149 bytes: &'a [u8],
150 150 /// Allows to override the offset value of the entry.
151 151 ///
152 152 /// For interleaved index and data, the offset stored in the index
153 153 /// corresponds to the separated data offset.
154 154 /// It has to be overridden with the actual offset in the interleaved
155 155 /// index which is just after the index block.
156 156 ///
157 157 /// For separated index and data, the offset stored in the first index
158 158 /// entry is mixed with the index headers.
159 159 /// It has to be overridden with 0.
160 160 offset_override: Option<usize>,
161 161 }
162 162
163 163 impl<'a> IndexEntry<'a> {
164 164 /// Return the offset of the data.
165 165 pub fn offset(&self) -> usize {
166 166 if let Some(offset_override) = self.offset_override {
167 167 offset_override
168 168 } else {
169 169 let mut bytes = [0; 8];
170 170 bytes[2..8].copy_from_slice(&self.bytes[0..=5]);
171 171 BigEndian::read_u64(&bytes[..]) as usize
172 172 }
173 173 }
174 174
175 175 /// Return the compressed length of the data.
176 176 pub fn compressed_len(&self) -> usize {
177 177 BigEndian::read_u32(&self.bytes[8..=11]) as usize
178 178 }
179 179
180 180 /// Return the uncompressed length of the data.
181 181 pub fn uncompressed_len(&self) -> usize {
182 182 BigEndian::read_u32(&self.bytes[12..=15]) as usize
183 183 }
184 184
185 185 /// Return the revision upon which the data has been derived.
186 186 pub fn base_revision(&self) -> Revision {
187 187 // TODO Maybe return an Option when base_revision == rev?
188 188 // Requires to add rev to IndexEntry
189 189
190 190 BigEndian::read_i32(&self.bytes[16..])
191 191 }
192 192
193 193 pub fn p1(&self) -> Revision {
194 194 BigEndian::read_i32(&self.bytes[24..])
195 195 }
196 196
197 197 pub fn p2(&self) -> Revision {
198 198 BigEndian::read_i32(&self.bytes[28..])
199 199 }
200 200
201 201 /// Return the hash of revision's full text.
202 202 ///
203 203 /// Currently, SHA-1 is used and only the first 20 bytes of this field
204 204 /// are used.
205 205 pub fn hash(&self) -> &'a Node {
206 206 (&self.bytes[32..52]).try_into().unwrap()
207 207 }
208 208 }
209 209
210 210 /// Value of the inline flag.
211 211 pub fn is_inline(index_bytes: &[u8]) -> bool {
212 212 match &index_bytes[0..=1] {
213 213 [0, 0] | [0, 2] => false,
214 214 _ => true,
215 215 }
216 216 }
217 217
218 218 #[cfg(test)]
219 219 mod tests {
220 220 use super::*;
221 221
222 222 #[cfg(test)]
223 223 #[derive(Debug, Copy, Clone)]
224 224 pub struct IndexEntryBuilder {
225 225 is_first: bool,
226 226 is_inline: bool,
227 227 is_general_delta: bool,
228 228 version: u16,
229 229 offset: usize,
230 230 compressed_len: usize,
231 231 uncompressed_len: usize,
232 232 base_revision: Revision,
233 233 }
234 234
235 235 #[cfg(test)]
236 236 impl IndexEntryBuilder {
237 237 pub fn new() -> Self {
238 238 Self {
239 239 is_first: false,
240 240 is_inline: false,
241 241 is_general_delta: true,
242 242 version: 2,
243 243 offset: 0,
244 244 compressed_len: 0,
245 245 uncompressed_len: 0,
246 246 base_revision: 0,
247 247 }
248 248 }
249 249
250 250 pub fn is_first(&mut self, value: bool) -> &mut Self {
251 251 self.is_first = value;
252 252 self
253 253 }
254 254
255 255 pub fn with_inline(&mut self, value: bool) -> &mut Self {
256 256 self.is_inline = value;
257 257 self
258 258 }
259 259
260 260 pub fn with_general_delta(&mut self, value: bool) -> &mut Self {
261 261 self.is_general_delta = value;
262 262 self
263 263 }
264 264
265 265 pub fn with_version(&mut self, value: u16) -> &mut Self {
266 266 self.version = value;
267 267 self
268 268 }
269 269
270 270 pub fn with_offset(&mut self, value: usize) -> &mut Self {
271 271 self.offset = value;
272 272 self
273 273 }
274 274
275 275 pub fn with_compressed_len(&mut self, value: usize) -> &mut Self {
276 276 self.compressed_len = value;
277 277 self
278 278 }
279 279
280 280 pub fn with_uncompressed_len(&mut self, value: usize) -> &mut Self {
281 281 self.uncompressed_len = value;
282 282 self
283 283 }
284 284
285 285 pub fn with_base_revision(&mut self, value: Revision) -> &mut Self {
286 286 self.base_revision = value;
287 287 self
288 288 }
289 289
290 290 pub fn build(&self) -> Vec<u8> {
291 291 let mut bytes = Vec::with_capacity(INDEX_ENTRY_SIZE);
292 292 if self.is_first {
293 293 bytes.extend(&match (self.is_general_delta, self.is_inline) {
294 294 (false, false) => [0u8, 0],
295 295 (false, true) => [0u8, 1],
296 296 (true, false) => [0u8, 2],
297 297 (true, true) => [0u8, 3],
298 298 });
299 299 bytes.extend(&self.version.to_be_bytes());
300 300 // Remaining offset bytes.
301 301 bytes.extend(&[0u8; 2]);
302 302 } else {
303 // Offset is only 6 bytes will usize is 8.
304 bytes.extend(&self.offset.to_be_bytes()[2..]);
303 // Offset stored on 48 bits (6 bytes)
304 bytes.extend(&(self.offset as u64).to_be_bytes()[2..]);
305 305 }
306 306 bytes.extend(&[0u8; 2]); // Revision flags.
307 bytes.extend(&self.compressed_len.to_be_bytes()[4..]);
308 bytes.extend(&self.uncompressed_len.to_be_bytes()[4..]);
307 bytes.extend(&(self.compressed_len as u32).to_be_bytes());
308 bytes.extend(&(self.uncompressed_len as u32).to_be_bytes());
309 309 bytes.extend(&self.base_revision.to_be_bytes());
310 310 bytes
311 311 }
312 312 }
313 313
314 314 #[test]
315 315 fn is_not_inline_when_no_inline_flag_test() {
316 316 let bytes = IndexEntryBuilder::new()
317 317 .is_first(true)
318 318 .with_general_delta(false)
319 319 .with_inline(false)
320 320 .build();
321 321
322 322 assert_eq!(is_inline(&bytes), false)
323 323 }
324 324
325 325 #[test]
326 326 fn is_inline_when_inline_flag_test() {
327 327 let bytes = IndexEntryBuilder::new()
328 328 .is_first(true)
329 329 .with_general_delta(false)
330 330 .with_inline(true)
331 331 .build();
332 332
333 333 assert_eq!(is_inline(&bytes), true)
334 334 }
335 335
336 336 #[test]
337 337 fn is_inline_when_inline_and_generaldelta_flags_test() {
338 338 let bytes = IndexEntryBuilder::new()
339 339 .is_first(true)
340 340 .with_general_delta(true)
341 341 .with_inline(true)
342 342 .build();
343 343
344 344 assert_eq!(is_inline(&bytes), true)
345 345 }
346 346
347 347 #[test]
348 348 fn test_offset() {
349 349 let bytes = IndexEntryBuilder::new().with_offset(1).build();
350 350 let entry = IndexEntry {
351 351 bytes: &bytes,
352 352 offset_override: None,
353 353 };
354 354
355 355 assert_eq!(entry.offset(), 1)
356 356 }
357 357
358 358 #[test]
359 359 fn test_with_overridden_offset() {
360 360 let bytes = IndexEntryBuilder::new().with_offset(1).build();
361 361 let entry = IndexEntry {
362 362 bytes: &bytes,
363 363 offset_override: Some(2),
364 364 };
365 365
366 366 assert_eq!(entry.offset(), 2)
367 367 }
368 368
369 369 #[test]
370 370 fn test_compressed_len() {
371 371 let bytes = IndexEntryBuilder::new().with_compressed_len(1).build();
372 372 let entry = IndexEntry {
373 373 bytes: &bytes,
374 374 offset_override: None,
375 375 };
376 376
377 377 assert_eq!(entry.compressed_len(), 1)
378 378 }
379 379
380 380 #[test]
381 381 fn test_uncompressed_len() {
382 382 let bytes = IndexEntryBuilder::new().with_uncompressed_len(1).build();
383 383 let entry = IndexEntry {
384 384 bytes: &bytes,
385 385 offset_override: None,
386 386 };
387 387
388 388 assert_eq!(entry.uncompressed_len(), 1)
389 389 }
390 390
391 391 #[test]
392 392 fn test_base_revision() {
393 393 let bytes = IndexEntryBuilder::new().with_base_revision(1).build();
394 394 let entry = IndexEntry {
395 395 bytes: &bytes,
396 396 offset_override: None,
397 397 };
398 398
399 399 assert_eq!(entry.base_revision(), 1)
400 400 }
401 401 }
402 402
403 403 #[cfg(test)]
404 404 pub use tests::IndexEntryBuilder;
General Comments 0
You need to be logged in to leave comments. Login now