Show More
@@ -1,2031 +1,2032 | |||
|
1 | 1 | use std::collections::{HashMap, HashSet}; |
|
2 | 2 | use std::fmt::Debug; |
|
3 | 3 | use std::ops::Deref; |
|
4 | 4 | use std::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; |
|
5 | 5 | |
|
6 | 6 | use bitvec::prelude::*; |
|
7 | 7 | use byteorder::{BigEndian, ByteOrder}; |
|
8 | 8 | use bytes_cast::{unaligned, BytesCast}; |
|
9 | 9 | |
|
10 | 10 | use super::REVIDX_KNOWN_FLAGS; |
|
11 | 11 | use crate::errors::HgError; |
|
12 | 12 | use crate::node::{NODE_BYTES_LENGTH, NULL_NODE, STORED_NODE_ID_BYTES}; |
|
13 | 13 | use crate::revlog::node::Node; |
|
14 | 14 | use crate::revlog::{Revision, NULL_REVISION}; |
|
15 | 15 | use crate::{ |
|
16 | 16 | dagops, BaseRevision, FastHashMap, Graph, GraphError, RevlogError, |
|
17 | 17 | RevlogIndex, UncheckedRevision, |
|
18 | 18 | }; |
|
19 | 19 | |
|
20 | 20 | pub const INDEX_ENTRY_SIZE: usize = 64; |
|
21 | 21 | pub const INDEX_HEADER_SIZE: usize = 4; |
|
22 | 22 | pub const COMPRESSION_MODE_INLINE: u8 = 2; |
|
23 | 23 | |
|
24 | 24 | #[derive(Debug)] |
|
25 | 25 | pub struct IndexHeader { |
|
26 | 26 | pub(super) header_bytes: [u8; INDEX_HEADER_SIZE], |
|
27 | 27 | } |
|
28 | 28 | |
|
29 | 29 | #[derive(Copy, Clone, Debug)] |
|
30 | 30 | pub struct IndexHeaderFlags { |
|
31 | 31 | flags: u16, |
|
32 | 32 | } |
|
33 | 33 | |
|
34 | 34 | /// Corresponds to the high bits of `_format_flags` in python |
|
35 | 35 | impl IndexHeaderFlags { |
|
36 | 36 | /// Corresponds to FLAG_INLINE_DATA in python |
|
37 | 37 | pub fn is_inline(self) -> bool { |
|
38 | 38 | self.flags & 1 != 0 |
|
39 | 39 | } |
|
40 | 40 | /// Corresponds to FLAG_GENERALDELTA in python |
|
41 | 41 | pub fn uses_generaldelta(self) -> bool { |
|
42 | 42 | self.flags & 2 != 0 |
|
43 | 43 | } |
|
44 | 44 | } |
|
45 | 45 | |
|
46 | 46 | /// Corresponds to the INDEX_HEADER structure, |
|
47 | 47 | /// which is parsed as a `header` variable in `_loadindex` in `revlog.py` |
|
48 | 48 | impl IndexHeader { |
|
49 | 49 | fn format_flags(&self) -> IndexHeaderFlags { |
|
50 | 50 | // No "unknown flags" check here, unlike in python. Maybe there should |
|
51 | 51 | // be. |
|
52 | 52 | IndexHeaderFlags { |
|
53 | 53 | flags: BigEndian::read_u16(&self.header_bytes[0..2]), |
|
54 | 54 | } |
|
55 | 55 | } |
|
56 | 56 | |
|
57 | 57 | /// The only revlog version currently supported by rhg. |
|
58 | 58 | const REVLOGV1: u16 = 1; |
|
59 | 59 | |
|
60 | 60 | /// Corresponds to `_format_version` in Python. |
|
61 | 61 | fn format_version(&self) -> u16 { |
|
62 | 62 | BigEndian::read_u16(&self.header_bytes[2..4]) |
|
63 | 63 | } |
|
64 | 64 | |
|
65 | 65 | pub fn parse(index_bytes: &[u8]) -> Result<Option<IndexHeader>, HgError> { |
|
66 | 66 | if index_bytes.is_empty() { |
|
67 | 67 | return Ok(None); |
|
68 | 68 | } |
|
69 | 69 | if index_bytes.len() < 4 { |
|
70 | 70 | return Err(HgError::corrupted( |
|
71 | 71 | "corrupted revlog: can't read the index format header", |
|
72 | 72 | )); |
|
73 | 73 | } |
|
74 | 74 | Ok(Some(IndexHeader { |
|
75 | 75 | header_bytes: { |
|
76 | 76 | let bytes: [u8; 4] = |
|
77 | 77 | index_bytes[0..4].try_into().expect("impossible"); |
|
78 | 78 | bytes |
|
79 | 79 | }, |
|
80 | 80 | })) |
|
81 | 81 | } |
|
82 | 82 | } |
|
83 | 83 | |
|
84 | 84 | /// Abstracts the access to the index bytes since they can be spread between |
|
85 | 85 | /// the immutable (bytes) part and the mutable (added) part if any appends |
|
86 | 86 | /// happened. This makes it transparent for the callers. |
|
87 | 87 | struct IndexData { |
|
88 | 88 | /// Immutable bytes, most likely taken from disk |
|
89 | 89 | bytes: Box<dyn Deref<Target = [u8]> + Send + Sync>, |
|
90 | 90 | /// Used when stripping index contents, keeps track of the start of the |
|
91 | 91 | /// first stripped revision, which is used to give a slice of the |
|
92 | 92 | /// `bytes` field. |
|
93 | 93 | truncation: Option<usize>, |
|
94 | 94 | /// Bytes that were added after reading the index |
|
95 | 95 | added: Vec<u8>, |
|
96 | 96 | first_entry: [u8; INDEX_ENTRY_SIZE], |
|
97 | 97 | } |
|
98 | 98 | |
|
99 | 99 | impl IndexData { |
|
100 | 100 | pub fn new(bytes: Box<dyn Deref<Target = [u8]> + Send + Sync>) -> Self { |
|
101 | 101 | let mut first_entry = [0; INDEX_ENTRY_SIZE]; |
|
102 | 102 | if bytes.len() >= INDEX_ENTRY_SIZE { |
|
103 | 103 | first_entry[INDEX_HEADER_SIZE..] |
|
104 | 104 | .copy_from_slice(&bytes[INDEX_HEADER_SIZE..INDEX_ENTRY_SIZE]) |
|
105 | 105 | } |
|
106 | 106 | Self { |
|
107 | 107 | bytes, |
|
108 | 108 | truncation: None, |
|
109 | 109 | added: vec![], |
|
110 | 110 | first_entry, |
|
111 | 111 | } |
|
112 | 112 | } |
|
113 | 113 | |
|
114 | 114 | pub fn len(&self) -> usize { |
|
115 | 115 | match self.truncation { |
|
116 | 116 | Some(truncation) => truncation + self.added.len(), |
|
117 | 117 | None => self.bytes.len() + self.added.len(), |
|
118 | 118 | } |
|
119 | 119 | } |
|
120 | 120 | |
|
121 | 121 | fn remove( |
|
122 | 122 | &mut self, |
|
123 | 123 | rev: Revision, |
|
124 | 124 | offsets: Option<&[usize]>, |
|
125 | 125 | ) -> Result<(), RevlogError> { |
|
126 | 126 | let rev = rev.0 as usize; |
|
127 | 127 | let truncation = if let Some(offsets) = offsets { |
|
128 | 128 | offsets[rev] |
|
129 | 129 | } else { |
|
130 | 130 | rev * INDEX_ENTRY_SIZE |
|
131 | 131 | }; |
|
132 | 132 | if truncation < self.bytes.len() { |
|
133 | 133 | self.truncation = Some(truncation); |
|
134 | 134 | self.added.clear(); |
|
135 | 135 | } else { |
|
136 | 136 | self.added.truncate(truncation - self.bytes.len()); |
|
137 | 137 | } |
|
138 | 138 | Ok(()) |
|
139 | 139 | } |
|
140 | 140 | |
|
141 | 141 | fn is_new(&self) -> bool { |
|
142 | 142 | self.bytes.is_empty() |
|
143 | 143 | } |
|
144 | 144 | } |
|
145 | 145 | |
|
146 | 146 | impl std::ops::Index<std::ops::Range<usize>> for IndexData { |
|
147 | 147 | type Output = [u8]; |
|
148 | 148 | |
|
149 | 149 | fn index(&self, index: std::ops::Range<usize>) -> &Self::Output { |
|
150 | 150 | let start = index.start; |
|
151 | 151 | let end = index.end; |
|
152 | 152 | let immutable_len = match self.truncation { |
|
153 | 153 | Some(truncation) => truncation, |
|
154 | 154 | None => self.bytes.len(), |
|
155 | 155 | }; |
|
156 | 156 | if start < immutable_len { |
|
157 | 157 | if end > immutable_len { |
|
158 | 158 | panic!("index data cannot span existing and added ranges"); |
|
159 | 159 | } |
|
160 | 160 | &self.bytes[index] |
|
161 | 161 | } else { |
|
162 | 162 | &self.added[start - immutable_len..end - immutable_len] |
|
163 | 163 | } |
|
164 | 164 | } |
|
165 | 165 | } |
|
166 | 166 | |
|
167 | 167 | #[derive(Debug, PartialEq, Eq)] |
|
168 | 168 | pub struct RevisionDataParams { |
|
169 | 169 | pub flags: u16, |
|
170 | 170 | pub data_offset: u64, |
|
171 | 171 | pub data_compressed_length: i32, |
|
172 | 172 | pub data_uncompressed_length: i32, |
|
173 | 173 | pub data_delta_base: i32, |
|
174 | 174 | pub link_rev: i32, |
|
175 | 175 | pub parent_rev_1: i32, |
|
176 | 176 | pub parent_rev_2: i32, |
|
177 | 177 | pub node_id: [u8; NODE_BYTES_LENGTH], |
|
178 | 178 | pub _sidedata_offset: u64, |
|
179 | 179 | pub _sidedata_compressed_length: i32, |
|
180 | 180 | pub data_compression_mode: u8, |
|
181 | 181 | pub _sidedata_compression_mode: u8, |
|
182 | 182 | pub _rank: i32, |
|
183 | 183 | } |
|
184 | 184 | |
|
185 | 185 | impl Default for RevisionDataParams { |
|
186 | 186 | fn default() -> Self { |
|
187 | 187 | Self { |
|
188 | 188 | flags: 0, |
|
189 | 189 | data_offset: 0, |
|
190 | 190 | data_compressed_length: 0, |
|
191 | 191 | data_uncompressed_length: 0, |
|
192 | 192 | data_delta_base: -1, |
|
193 | 193 | link_rev: -1, |
|
194 | 194 | parent_rev_1: -1, |
|
195 | 195 | parent_rev_2: -1, |
|
196 | 196 | node_id: [0; NODE_BYTES_LENGTH], |
|
197 | 197 | _sidedata_offset: 0, |
|
198 | 198 | _sidedata_compressed_length: 0, |
|
199 | 199 | data_compression_mode: COMPRESSION_MODE_INLINE, |
|
200 | 200 | _sidedata_compression_mode: COMPRESSION_MODE_INLINE, |
|
201 | 201 | _rank: -1, |
|
202 | 202 | } |
|
203 | 203 | } |
|
204 | 204 | } |
|
205 | 205 | |
|
206 | 206 | #[derive(BytesCast)] |
|
207 | 207 | #[repr(C)] |
|
208 | 208 | pub struct RevisionDataV1 { |
|
209 | 209 | data_offset_or_flags: unaligned::U64Be, |
|
210 | 210 | data_compressed_length: unaligned::I32Be, |
|
211 | 211 | data_uncompressed_length: unaligned::I32Be, |
|
212 | 212 | data_delta_base: unaligned::I32Be, |
|
213 | 213 | link_rev: unaligned::I32Be, |
|
214 | 214 | parent_rev_1: unaligned::I32Be, |
|
215 | 215 | parent_rev_2: unaligned::I32Be, |
|
216 | 216 | node_id: [u8; STORED_NODE_ID_BYTES], |
|
217 | 217 | } |
|
218 | 218 | |
|
219 | 219 | fn _static_assert_size_of_revision_data_v1() { |
|
220 | 220 | let _ = std::mem::transmute::<RevisionDataV1, [u8; 64]>; |
|
221 | 221 | } |
|
222 | 222 | |
|
223 | 223 | impl RevisionDataParams { |
|
224 | 224 | pub fn validate(&self) -> Result<(), RevlogError> { |
|
225 | 225 | if self.flags & !REVIDX_KNOWN_FLAGS != 0 { |
|
226 | 226 | return Err(RevlogError::corrupted(format!( |
|
227 | 227 | "unknown revlog index flags: {}", |
|
228 | 228 | self.flags |
|
229 | 229 | ))); |
|
230 | 230 | } |
|
231 | 231 | if self.data_compression_mode != COMPRESSION_MODE_INLINE { |
|
232 | 232 | return Err(RevlogError::corrupted(format!( |
|
233 | 233 | "invalid data compression mode: {}", |
|
234 | 234 | self.data_compression_mode |
|
235 | 235 | ))); |
|
236 | 236 | } |
|
237 | 237 | // FIXME isn't this only for v2 or changelog v2? |
|
238 | 238 | if self._sidedata_compression_mode != COMPRESSION_MODE_INLINE { |
|
239 | 239 | return Err(RevlogError::corrupted(format!( |
|
240 | 240 | "invalid sidedata compression mode: {}", |
|
241 | 241 | self._sidedata_compression_mode |
|
242 | 242 | ))); |
|
243 | 243 | } |
|
244 | 244 | Ok(()) |
|
245 | 245 | } |
|
246 | 246 | |
|
247 | 247 | pub fn into_v1(self) -> RevisionDataV1 { |
|
248 | 248 | let data_offset_or_flags = self.data_offset << 16 | self.flags as u64; |
|
249 | 249 | let mut node_id = [0; STORED_NODE_ID_BYTES]; |
|
250 | 250 | node_id[..NODE_BYTES_LENGTH].copy_from_slice(&self.node_id); |
|
251 | 251 | RevisionDataV1 { |
|
252 | 252 | data_offset_or_flags: data_offset_or_flags.into(), |
|
253 | 253 | data_compressed_length: self.data_compressed_length.into(), |
|
254 | 254 | data_uncompressed_length: self.data_uncompressed_length.into(), |
|
255 | 255 | data_delta_base: self.data_delta_base.into(), |
|
256 | 256 | link_rev: self.link_rev.into(), |
|
257 | 257 | parent_rev_1: self.parent_rev_1.into(), |
|
258 | 258 | parent_rev_2: self.parent_rev_2.into(), |
|
259 | 259 | node_id, |
|
260 | 260 | } |
|
261 | 261 | } |
|
262 | 262 | } |
|
263 | 263 | |
|
264 | 264 | /// A Revlog index |
|
265 | 265 | pub struct Index { |
|
266 | 266 | bytes: IndexData, |
|
267 | 267 | /// Offsets of starts of index blocks. |
|
268 | 268 | /// Only needed when the index is interleaved with data. |
|
269 | 269 | offsets: RwLock<Option<Vec<usize>>>, |
|
270 | 270 | uses_generaldelta: bool, |
|
271 | 271 | is_inline: bool, |
|
272 | 272 | /// Cache of (head_revisions, filtered_revisions) |
|
273 | 273 | /// |
|
274 | 274 | /// The head revisions in this index, kept in sync. Should |
|
275 | 275 | /// be accessed via the [`Self::head_revs`] method. |
|
276 | 276 | /// The last filtered revisions in this index, used to make sure |
|
277 | 277 | /// we haven't changed filters when returning the cached `head_revs`. |
|
278 | 278 | head_revs: RwLock<(Vec<Revision>, HashSet<Revision>)>, |
|
279 | 279 | } |
|
280 | 280 | |
|
281 | 281 | impl Debug for Index { |
|
282 | 282 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { |
|
283 | 283 | f.debug_struct("Index") |
|
284 | 284 | .field("offsets", &self.offsets) |
|
285 | 285 | .field("uses_generaldelta", &self.uses_generaldelta) |
|
286 | 286 | .finish() |
|
287 | 287 | } |
|
288 | 288 | } |
|
289 | 289 | |
|
290 | 290 | impl Graph for Index { |
|
291 | 291 | #[inline(always)] |
|
292 | 292 | fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> { |
|
293 | 293 | let err = || GraphError::ParentOutOfRange(rev); |
|
294 | 294 | match self.get_entry(rev) { |
|
295 | 295 | Some(entry) => { |
|
296 | 296 | // The C implementation checks that the parents are valid |
|
297 | 297 | // before returning |
|
298 | 298 | Ok([ |
|
299 | 299 | self.check_revision(entry.p1()).ok_or_else(err)?, |
|
300 | 300 | self.check_revision(entry.p2()).ok_or_else(err)?, |
|
301 | 301 | ]) |
|
302 | 302 | } |
|
303 | 303 | None => Ok([NULL_REVISION, NULL_REVISION]), |
|
304 | 304 | } |
|
305 | 305 | } |
|
306 | 306 | } |
|
307 | 307 | |
|
308 | 308 | /// A cache suitable for find_snapshots |
|
309 | 309 | /// |
|
310 | 310 | /// Logically equivalent to a mapping whose keys are [`BaseRevision`] and |
|
311 | 311 | /// values sets of [`BaseRevision`] |
|
312 | 312 | /// |
|
313 | 313 | /// TODO the dubious part is insisting that errors must be RevlogError |
|
314 | 314 | /// we would probably need to sprinkle some magic here, such as an associated |
|
315 | 315 | /// type that would be Into<RevlogError> but even that would not be |
|
316 | 316 | /// satisfactory, as errors potentially have nothing to do with the revlog. |
|
317 | 317 | pub trait SnapshotsCache { |
|
318 | 318 | fn insert_for( |
|
319 | 319 | &mut self, |
|
320 | 320 | rev: BaseRevision, |
|
321 | 321 | value: BaseRevision, |
|
322 | 322 | ) -> Result<(), RevlogError>; |
|
323 | 323 | } |
|
324 | 324 | |
|
325 | 325 | impl SnapshotsCache for FastHashMap<BaseRevision, HashSet<BaseRevision>> { |
|
326 | 326 | fn insert_for( |
|
327 | 327 | &mut self, |
|
328 | 328 | rev: BaseRevision, |
|
329 | 329 | value: BaseRevision, |
|
330 | 330 | ) -> Result<(), RevlogError> { |
|
331 | 331 | let all_values = self.entry(rev).or_default(); |
|
332 | 332 | all_values.insert(value); |
|
333 | 333 | Ok(()) |
|
334 | 334 | } |
|
335 | 335 | } |
|
336 | 336 | |
|
337 | 337 | impl Index { |
|
338 | 338 | /// Create an index from bytes. |
|
339 | 339 | /// Calculate the start of each entry when is_inline is true. |
|
340 | 340 | pub fn new( |
|
341 | 341 | bytes: Box<dyn Deref<Target = [u8]> + Send + Sync>, |
|
342 | 342 | default_header: IndexHeader, |
|
343 | 343 | ) -> Result<Self, HgError> { |
|
344 | 344 | let header = |
|
345 | 345 | IndexHeader::parse(bytes.as_ref())?.unwrap_or(default_header); |
|
346 | 346 | |
|
347 | 347 | if header.format_version() != IndexHeader::REVLOGV1 { |
|
348 | 348 | // A proper new version should have had a repo/store |
|
349 | 349 | // requirement. |
|
350 | 350 | return Err(HgError::corrupted("unsupported revlog version")); |
|
351 | 351 | } |
|
352 | 352 | |
|
353 | 353 | let uses_generaldelta = header.format_flags().uses_generaldelta(); |
|
354 | 354 | |
|
355 | 355 | if header.format_flags().is_inline() { |
|
356 | 356 | let mut offset: usize = 0; |
|
357 | 357 | let mut offsets = Vec::new(); |
|
358 | 358 | |
|
359 | 359 | while offset + INDEX_ENTRY_SIZE <= bytes.len() { |
|
360 | 360 | offsets.push(offset); |
|
361 | 361 | let end = offset + INDEX_ENTRY_SIZE; |
|
362 | 362 | let entry = IndexEntry { |
|
363 | 363 | bytes: &bytes[offset..end], |
|
364 | 364 | }; |
|
365 | 365 | |
|
366 | 366 | offset += INDEX_ENTRY_SIZE + entry.compressed_len() as usize; |
|
367 | 367 | } |
|
368 | 368 | |
|
369 | 369 | if offset == bytes.len() { |
|
370 | 370 | Ok(Self { |
|
371 | 371 | bytes: IndexData::new(bytes), |
|
372 | 372 | offsets: RwLock::new(Some(offsets)), |
|
373 | 373 | uses_generaldelta, |
|
374 | 374 | is_inline: true, |
|
375 | 375 | head_revs: RwLock::new((vec![], HashSet::new())), |
|
376 | 376 | }) |
|
377 | 377 | } else { |
|
378 | 378 | Err(HgError::corrupted("unexpected inline revlog length")) |
|
379 | 379 | } |
|
380 | 380 | } else { |
|
381 | 381 | Ok(Self { |
|
382 | 382 | bytes: IndexData::new(bytes), |
|
383 | 383 | offsets: RwLock::new(None), |
|
384 | 384 | uses_generaldelta, |
|
385 | 385 | is_inline: false, |
|
386 | 386 | head_revs: RwLock::new((vec![], HashSet::new())), |
|
387 | 387 | }) |
|
388 | 388 | } |
|
389 | 389 | } |
|
390 | 390 | |
|
391 | 391 | pub fn uses_generaldelta(&self) -> bool { |
|
392 | 392 | self.uses_generaldelta |
|
393 | 393 | } |
|
394 | 394 | |
|
395 | 395 | /// Value of the inline flag. |
|
396 | 396 | pub fn is_inline(&self) -> bool { |
|
397 | 397 | self.is_inline |
|
398 | 398 | } |
|
399 | 399 | |
|
400 | 400 | /// Return a slice of bytes if `revlog` is inline. Panic if not. |
|
401 | 401 | pub fn data(&self, start: usize, end: usize) -> &[u8] { |
|
402 | 402 | if !self.is_inline() { |
|
403 | 403 | panic!("tried to access data in the index of a revlog that is not inline"); |
|
404 | 404 | } |
|
405 | 405 | &self.bytes[start..end] |
|
406 | 406 | } |
|
407 | 407 | |
|
408 | 408 | /// Return number of entries of the revlog index. |
|
409 | 409 | pub fn len(&self) -> usize { |
|
410 | 410 | if self.is_inline() { |
|
411 | 411 | (*self.get_offsets()) |
|
412 | 412 | .as_ref() |
|
413 | 413 | .expect("inline should have offsets") |
|
414 | 414 | .len() |
|
415 | 415 | } else { |
|
416 | 416 | self.bytes.len() / INDEX_ENTRY_SIZE |
|
417 | 417 | } |
|
418 | 418 | } |
|
419 | 419 | |
|
420 | 420 | pub fn get_offsets(&self) -> RwLockReadGuard<Option<Vec<usize>>> { |
|
421 | 421 | assert!(self.is_inline()); |
|
422 | 422 | { |
|
423 | 423 | // Wrap in a block to drop the read guard |
|
424 | 424 | let mut offsets = self.offsets.write().unwrap(); |
|
425 | 425 | if offsets.is_none() { |
|
426 | 426 | offsets.replace(inline_scan(&self.bytes.bytes).1); |
|
427 | 427 | } |
|
428 | 428 | } |
|
429 | 429 | self.offsets.read().unwrap() |
|
430 | 430 | } |
|
431 | 431 | |
|
432 | 432 | pub fn get_offsets_mut(&mut self) -> RwLockWriteGuard<Option<Vec<usize>>> { |
|
433 | 433 | assert!(self.is_inline()); |
|
434 | 434 | let mut offsets = self.offsets.write().unwrap(); |
|
435 | 435 | if offsets.is_none() { |
|
436 | 436 | offsets.replace(inline_scan(&self.bytes.bytes).1); |
|
437 | 437 | } |
|
438 | 438 | offsets |
|
439 | 439 | } |
|
440 | 440 | |
|
441 | 441 | /// Returns `true` if the `Index` has zero `entries`. |
|
442 | 442 | pub fn is_empty(&self) -> bool { |
|
443 | 443 | self.len() == 0 |
|
444 | 444 | } |
|
445 | 445 | |
|
446 | 446 | /// Return the index entry corresponding to the given revision or `None` |
|
447 | 447 | /// for [`NULL_REVISION`] |
|
448 | 448 | /// |
|
449 | 449 | /// The specified revision being of the checked type, it always exists |
|
450 | 450 | /// if it was validated by this index. |
|
451 | #[inline(always)] | |
|
451 | 452 | pub fn get_entry(&self, rev: Revision) -> Option<IndexEntry> { |
|
452 | 453 | if rev == NULL_REVISION { |
|
453 | 454 | return None; |
|
454 | 455 | } |
|
455 | 456 | if rev.0 == 0 { |
|
456 | 457 | Some(IndexEntry { |
|
457 | 458 | bytes: &self.bytes.first_entry[..], |
|
458 | 459 | }) |
|
459 | 460 | } else { |
|
460 | 461 | Some(if self.is_inline() { |
|
461 | 462 | self.get_entry_inline(rev) |
|
462 | 463 | } else { |
|
463 | 464 | self.get_entry_separated(rev) |
|
464 | 465 | }) |
|
465 | 466 | } |
|
466 | 467 | } |
|
467 | 468 | |
|
468 | 469 | /// Return the binary content of the index entry for the given revision |
|
469 | 470 | /// |
|
470 | 471 | /// See [get_entry()](`Self::get_entry()`) for cases when `None` is |
|
471 | 472 | /// returned. |
|
472 | 473 | pub fn entry_binary(&self, rev: Revision) -> Option<&[u8]> { |
|
473 | 474 | self.get_entry(rev).map(|e| { |
|
474 | 475 | let bytes = e.as_bytes(); |
|
475 | 476 | if rev.0 == 0 { |
|
476 | 477 | &bytes[4..] |
|
477 | 478 | } else { |
|
478 | 479 | bytes |
|
479 | 480 | } |
|
480 | 481 | }) |
|
481 | 482 | } |
|
482 | 483 | |
|
483 | 484 | pub fn entry_as_params( |
|
484 | 485 | &self, |
|
485 | 486 | rev: UncheckedRevision, |
|
486 | 487 | ) -> Option<RevisionDataParams> { |
|
487 | 488 | let rev = self.check_revision(rev)?; |
|
488 | 489 | self.get_entry(rev).map(|e| RevisionDataParams { |
|
489 | 490 | flags: e.flags(), |
|
490 | 491 | data_offset: if rev.0 == 0 && !self.bytes.is_new() { |
|
491 | 492 | e.flags() as u64 |
|
492 | 493 | } else { |
|
493 | 494 | e.raw_offset() |
|
494 | 495 | }, |
|
495 | 496 | data_compressed_length: e |
|
496 | 497 | .compressed_len() |
|
497 | 498 | .try_into() |
|
498 | 499 | .unwrap_or_else(|_| { |
|
499 | 500 | // Python's `unionrepo` sets the compressed length to be |
|
500 | 501 | // `-1` (or `u32::MAX` if transmuted to `u32`) because it |
|
501 | 502 | // cannot know the correct compressed length of a given |
|
502 | 503 | // revision. I'm not sure if this is true, but having this |
|
503 | 504 | // edge case won't hurt other use cases, let's handle it. |
|
504 | 505 | assert_eq!(e.compressed_len(), u32::MAX); |
|
505 | 506 | NULL_REVISION.0 |
|
506 | 507 | }), |
|
507 | 508 | data_uncompressed_length: e.uncompressed_len(), |
|
508 | 509 | data_delta_base: e.base_revision_or_base_of_delta_chain().0, |
|
509 | 510 | link_rev: e.link_revision().0, |
|
510 | 511 | parent_rev_1: e.p1().0, |
|
511 | 512 | parent_rev_2: e.p2().0, |
|
512 | 513 | node_id: e.hash().as_bytes().try_into().unwrap(), |
|
513 | 514 | ..Default::default() |
|
514 | 515 | }) |
|
515 | 516 | } |
|
516 | 517 | |
|
517 | 518 | fn get_entry_inline(&self, rev: Revision) -> IndexEntry { |
|
518 | 519 | let offsets = &self.get_offsets(); |
|
519 | 520 | let offsets = offsets.as_ref().expect("inline should have offsets"); |
|
520 | 521 | let start = offsets[rev.0 as usize]; |
|
521 | 522 | let end = start + INDEX_ENTRY_SIZE; |
|
522 | 523 | let bytes = &self.bytes[start..end]; |
|
523 | 524 | |
|
524 | 525 | IndexEntry { bytes } |
|
525 | 526 | } |
|
526 | 527 | |
|
527 | 528 | fn get_entry_separated(&self, rev: Revision) -> IndexEntry { |
|
528 | 529 | let start = rev.0 as usize * INDEX_ENTRY_SIZE; |
|
529 | 530 | let end = start + INDEX_ENTRY_SIZE; |
|
530 | 531 | let bytes = &self.bytes[start..end]; |
|
531 | 532 | |
|
532 | 533 | IndexEntry { bytes } |
|
533 | 534 | } |
|
534 | 535 | |
|
535 | 536 | fn null_entry(&self) -> IndexEntry { |
|
536 | 537 | IndexEntry { |
|
537 | 538 | bytes: &[0; INDEX_ENTRY_SIZE], |
|
538 | 539 | } |
|
539 | 540 | } |
|
540 | 541 | |
|
541 | 542 | /// Return the head revisions of this index |
|
542 | 543 | pub fn head_revs(&self) -> Result<Vec<Revision>, GraphError> { |
|
543 | 544 | self.head_revs_filtered(&HashSet::new(), false) |
|
544 | 545 | .map(|h| h.unwrap()) |
|
545 | 546 | } |
|
546 | 547 | |
|
547 | 548 | /// Python-specific shortcut to save on PyList creation |
|
548 | 549 | pub fn head_revs_shortcut( |
|
549 | 550 | &self, |
|
550 | 551 | ) -> Result<Option<Vec<Revision>>, GraphError> { |
|
551 | 552 | self.head_revs_filtered(&HashSet::new(), true) |
|
552 | 553 | } |
|
553 | 554 | |
|
554 | 555 | /// Return the heads removed and added by advancing from `begin` to `end`. |
|
555 | 556 | /// In revset language, we compute: |
|
556 | 557 | /// - `heads(:begin)-heads(:end)` |
|
557 | 558 | /// - `heads(:end)-heads(:begin)` |
|
558 | 559 | pub fn head_revs_diff( |
|
559 | 560 | &self, |
|
560 | 561 | begin: Revision, |
|
561 | 562 | end: Revision, |
|
562 | 563 | ) -> Result<(Vec<Revision>, Vec<Revision>), GraphError> { |
|
563 | 564 | let mut heads_added = vec![]; |
|
564 | 565 | let mut heads_removed = vec![]; |
|
565 | 566 | |
|
566 | 567 | let mut acc = HashSet::new(); |
|
567 | 568 | let Revision(begin) = begin; |
|
568 | 569 | let Revision(end) = end; |
|
569 | 570 | let mut i = end; |
|
570 | 571 | |
|
571 | 572 | while i > begin { |
|
572 | 573 | // acc invariant: |
|
573 | 574 | // `j` is in the set iff `j <= i` and it has children |
|
574 | 575 | // among `i+1..end` (inclusive) |
|
575 | 576 | if !acc.remove(&i) { |
|
576 | 577 | heads_added.push(Revision(i)); |
|
577 | 578 | } |
|
578 | 579 | for Revision(parent) in self.parents(Revision(i))? { |
|
579 | 580 | acc.insert(parent); |
|
580 | 581 | } |
|
581 | 582 | i -= 1; |
|
582 | 583 | } |
|
583 | 584 | |
|
584 | 585 | // At this point `acc` contains old revisions that gained new children. |
|
585 | 586 | // We need to check if they had any children before. If not, those |
|
586 | 587 | // revisions are the removed heads. |
|
587 | 588 | while !acc.is_empty() { |
|
588 | 589 | // acc invariant: |
|
589 | 590 | // `j` is in the set iff `j <= i` and it has children |
|
590 | 591 | // among `begin+1..end`, but not among `i+1..begin` (inclusive) |
|
591 | 592 | |
|
592 | 593 | assert!(i >= -1); // yes, `-1` can also be a head if the repo is empty |
|
593 | 594 | if acc.remove(&i) { |
|
594 | 595 | heads_removed.push(Revision(i)); |
|
595 | 596 | } |
|
596 | 597 | for Revision(parent) in self.parents(Revision(i))? { |
|
597 | 598 | acc.remove(&parent); |
|
598 | 599 | } |
|
599 | 600 | i -= 1; |
|
600 | 601 | } |
|
601 | 602 | |
|
602 | 603 | Ok((heads_removed, heads_added)) |
|
603 | 604 | } |
|
604 | 605 | |
|
605 | 606 | /// Return the head revisions of this index |
|
606 | 607 | pub fn head_revs_filtered( |
|
607 | 608 | &self, |
|
608 | 609 | filtered_revs: &HashSet<Revision>, |
|
609 | 610 | py_shortcut: bool, |
|
610 | 611 | ) -> Result<Option<Vec<Revision>>, GraphError> { |
|
611 | 612 | { |
|
612 | 613 | let guard = self |
|
613 | 614 | .head_revs |
|
614 | 615 | .read() |
|
615 | 616 | .expect("RwLock on Index.head_revs should not be poisoned"); |
|
616 | 617 | let self_head_revs = &guard.0; |
|
617 | 618 | let self_filtered_revs = &guard.1; |
|
618 | 619 | if !self_head_revs.is_empty() |
|
619 | 620 | && filtered_revs == self_filtered_revs |
|
620 | 621 | { |
|
621 | 622 | if py_shortcut { |
|
622 | 623 | // Don't copy the revs since we've already cached them |
|
623 | 624 | // on the Python side. |
|
624 | 625 | return Ok(None); |
|
625 | 626 | } else { |
|
626 | 627 | return Ok(Some(self_head_revs.to_owned())); |
|
627 | 628 | } |
|
628 | 629 | } |
|
629 | 630 | } |
|
630 | 631 | |
|
631 | 632 | let as_vec = if self.is_empty() { |
|
632 | 633 | vec![NULL_REVISION] |
|
633 | 634 | } else { |
|
634 | 635 | let mut not_heads = bitvec![0; self.len()]; |
|
635 | 636 | dagops::retain_heads_fast( |
|
636 | 637 | self, |
|
637 | 638 | not_heads.as_mut_bitslice(), |
|
638 | 639 | filtered_revs, |
|
639 | 640 | )?; |
|
640 | 641 | not_heads |
|
641 | 642 | .into_iter() |
|
642 | 643 | .enumerate() |
|
643 | 644 | .filter_map(|(idx, is_not_head)| { |
|
644 | 645 | if is_not_head { |
|
645 | 646 | None |
|
646 | 647 | } else { |
|
647 | 648 | Some(Revision(idx as BaseRevision)) |
|
648 | 649 | } |
|
649 | 650 | }) |
|
650 | 651 | .collect() |
|
651 | 652 | }; |
|
652 | 653 | *self |
|
653 | 654 | .head_revs |
|
654 | 655 | .write() |
|
655 | 656 | .expect("RwLock on Index.head_revs should not be poisoned") = |
|
656 | 657 | (as_vec.to_owned(), filtered_revs.to_owned()); |
|
657 | 658 | Ok(Some(as_vec)) |
|
658 | 659 | } |
|
659 | 660 | |
|
660 | 661 | /// Obtain the delta chain for a revision. |
|
661 | 662 | /// |
|
662 | 663 | /// `stop_rev` specifies a revision to stop at. If not specified, we |
|
663 | 664 | /// stop at the base of the chain. |
|
664 | 665 | /// |
|
665 | 666 | /// Returns a 2-tuple of (chain, stopped) where `chain` is a vec of |
|
666 | 667 | /// revs in ascending order and `stopped` is a bool indicating whether |
|
667 | 668 | /// `stoprev` was hit. |
|
668 | 669 | pub fn delta_chain( |
|
669 | 670 | &self, |
|
670 | 671 | rev: Revision, |
|
671 | 672 | stop_rev: Option<Revision>, |
|
672 | 673 | using_general_delta: Option<bool>, |
|
673 | 674 | ) -> Result<(Vec<Revision>, bool), HgError> { |
|
674 | 675 | let mut current_rev = rev; |
|
675 | 676 | let mut entry = self.get_entry(rev).unwrap(); |
|
676 | 677 | let mut chain = vec![]; |
|
677 | 678 | let using_general_delta = |
|
678 | 679 | using_general_delta.unwrap_or_else(|| self.uses_generaldelta()); |
|
679 | 680 | while current_rev.0 != entry.base_revision_or_base_of_delta_chain().0 |
|
680 | 681 | && stop_rev.map(|r| r != current_rev).unwrap_or(true) |
|
681 | 682 | { |
|
682 | 683 | chain.push(current_rev); |
|
683 | 684 | let new_rev = if using_general_delta { |
|
684 | 685 | entry.base_revision_or_base_of_delta_chain() |
|
685 | 686 | } else { |
|
686 | 687 | UncheckedRevision(current_rev.0 - 1) |
|
687 | 688 | }; |
|
688 | 689 | current_rev = self.check_revision(new_rev).ok_or_else(|| { |
|
689 | 690 | HgError::corrupted(format!("Revision {new_rev} out of range")) |
|
690 | 691 | })?; |
|
691 | 692 | if current_rev.0 == NULL_REVISION.0 { |
|
692 | 693 | break; |
|
693 | 694 | } |
|
694 | 695 | entry = self.get_entry(current_rev).unwrap() |
|
695 | 696 | } |
|
696 | 697 | |
|
697 | 698 | let stopped = if stop_rev.map(|r| current_rev == r).unwrap_or(false) { |
|
698 | 699 | true |
|
699 | 700 | } else { |
|
700 | 701 | chain.push(current_rev); |
|
701 | 702 | false |
|
702 | 703 | }; |
|
703 | 704 | chain.reverse(); |
|
704 | 705 | Ok((chain, stopped)) |
|
705 | 706 | } |
|
706 | 707 | |
|
707 | 708 | pub fn find_snapshots( |
|
708 | 709 | &self, |
|
709 | 710 | start_rev: UncheckedRevision, |
|
710 | 711 | end_rev: UncheckedRevision, |
|
711 | 712 | cache: &mut impl SnapshotsCache, |
|
712 | 713 | ) -> Result<(), RevlogError> { |
|
713 | 714 | let mut start_rev = start_rev.0; |
|
714 | 715 | let mut end_rev = end_rev.0; |
|
715 | 716 | end_rev += 1; |
|
716 | 717 | let len = self.len().try_into().unwrap(); |
|
717 | 718 | if end_rev > len { |
|
718 | 719 | end_rev = len; |
|
719 | 720 | } |
|
720 | 721 | if start_rev < 0 { |
|
721 | 722 | start_rev = 0; |
|
722 | 723 | } |
|
723 | 724 | for rev in start_rev..end_rev { |
|
724 | 725 | if !self.is_snapshot_unchecked(Revision(rev))? { |
|
725 | 726 | continue; |
|
726 | 727 | } |
|
727 | 728 | let mut base = self |
|
728 | 729 | .get_entry(Revision(rev)) |
|
729 | 730 | .unwrap() |
|
730 | 731 | .base_revision_or_base_of_delta_chain(); |
|
731 | 732 | if base.0 == rev { |
|
732 | 733 | base = NULL_REVISION.into(); |
|
733 | 734 | } |
|
734 | 735 | cache.insert_for(base.0, rev)?; |
|
735 | 736 | } |
|
736 | 737 | Ok(()) |
|
737 | 738 | } |
|
738 | 739 | |
|
739 | 740 | fn clear_head_revs(&self) { |
|
740 | 741 | self.head_revs |
|
741 | 742 | .write() |
|
742 | 743 | .expect("RwLock on Index.head_revs should not be poisoined") |
|
743 | 744 | .0 |
|
744 | 745 | .clear() |
|
745 | 746 | } |
|
746 | 747 | |
|
747 | 748 | /// TODO move this to the trait probably, along with other things |
|
748 | 749 | pub fn append( |
|
749 | 750 | &mut self, |
|
750 | 751 | revision_data: RevisionDataParams, |
|
751 | 752 | ) -> Result<(), RevlogError> { |
|
752 | 753 | revision_data.validate()?; |
|
753 | 754 | let entry_v1 = revision_data.into_v1(); |
|
754 | 755 | let entry_bytes = entry_v1.as_bytes(); |
|
755 | 756 | if self.bytes.len() == 0 { |
|
756 | 757 | self.bytes.first_entry[INDEX_HEADER_SIZE..].copy_from_slice( |
|
757 | 758 | &entry_bytes[INDEX_HEADER_SIZE..INDEX_ENTRY_SIZE], |
|
758 | 759 | ) |
|
759 | 760 | } |
|
760 | 761 | if self.is_inline() { |
|
761 | 762 | let new_offset = self.bytes.len(); |
|
762 | 763 | if let Some(offsets) = &mut *self.get_offsets_mut() { |
|
763 | 764 | offsets.push(new_offset) |
|
764 | 765 | } |
|
765 | 766 | } |
|
766 | 767 | self.bytes.added.extend(entry_bytes); |
|
767 | 768 | self.clear_head_revs(); |
|
768 | 769 | Ok(()) |
|
769 | 770 | } |
|
770 | 771 | |
|
771 | 772 | pub fn pack_header(&self, header: i32) -> [u8; 4] { |
|
772 | 773 | header.to_be_bytes() |
|
773 | 774 | } |
|
774 | 775 | |
|
775 | 776 | pub fn remove(&mut self, rev: Revision) -> Result<(), RevlogError> { |
|
776 | 777 | let offsets = if self.is_inline() { |
|
777 | 778 | self.get_offsets().clone() |
|
778 | 779 | } else { |
|
779 | 780 | None |
|
780 | 781 | }; |
|
781 | 782 | self.bytes.remove(rev, offsets.as_deref())?; |
|
782 | 783 | if self.is_inline() { |
|
783 | 784 | if let Some(offsets) = &mut *self.get_offsets_mut() { |
|
784 | 785 | offsets.truncate(rev.0 as usize) |
|
785 | 786 | } |
|
786 | 787 | } |
|
787 | 788 | self.clear_head_revs(); |
|
788 | 789 | Ok(()) |
|
789 | 790 | } |
|
790 | 791 | |
|
791 | 792 | pub fn clear_caches(&self) { |
|
792 | 793 | // We need to get the 'inline' value from Python at init and use this |
|
793 | 794 | // instead of offsets to determine whether we're inline since we might |
|
794 | 795 | // clear caches. This implies re-populating the offsets on-demand. |
|
795 | 796 | *self |
|
796 | 797 | .offsets |
|
797 | 798 | .write() |
|
798 | 799 | .expect("RwLock on Index.offsets should not be poisoed") = None; |
|
799 | 800 | self.clear_head_revs(); |
|
800 | 801 | } |
|
801 | 802 | |
|
802 | 803 | /// Unchecked version of `is_snapshot`. |
|
803 | 804 | /// Assumes the caller checked that `rev` is within a valid revision range. |
|
804 | 805 | pub fn is_snapshot_unchecked( |
|
805 | 806 | &self, |
|
806 | 807 | mut rev: Revision, |
|
807 | 808 | ) -> Result<bool, RevlogError> { |
|
808 | 809 | while rev.0 >= 0 { |
|
809 | 810 | let entry = self.get_entry(rev).unwrap(); |
|
810 | 811 | let mut base = entry.base_revision_or_base_of_delta_chain().0; |
|
811 | 812 | if base == rev.0 { |
|
812 | 813 | base = NULL_REVISION.0; |
|
813 | 814 | } |
|
814 | 815 | if base == NULL_REVISION.0 { |
|
815 | 816 | return Ok(true); |
|
816 | 817 | } |
|
817 | 818 | let [mut p1, mut p2] = self |
|
818 | 819 | .parents(rev) |
|
819 | 820 | .map_err(|_| RevlogError::InvalidRevision)?; |
|
820 | 821 | while let Some(p1_entry) = self.get_entry(p1) { |
|
821 | 822 | if p1_entry.compressed_len() != 0 || p1.0 == 0 { |
|
822 | 823 | break; |
|
823 | 824 | } |
|
824 | 825 | let parent_base = |
|
825 | 826 | p1_entry.base_revision_or_base_of_delta_chain(); |
|
826 | 827 | if parent_base.0 == p1.0 { |
|
827 | 828 | break; |
|
828 | 829 | } |
|
829 | 830 | p1 = self |
|
830 | 831 | .check_revision(parent_base) |
|
831 | 832 | .ok_or(RevlogError::InvalidRevision)?; |
|
832 | 833 | } |
|
833 | 834 | while let Some(p2_entry) = self.get_entry(p2) { |
|
834 | 835 | if p2_entry.compressed_len() != 0 || p2.0 == 0 { |
|
835 | 836 | break; |
|
836 | 837 | } |
|
837 | 838 | let parent_base = |
|
838 | 839 | p2_entry.base_revision_or_base_of_delta_chain(); |
|
839 | 840 | if parent_base.0 == p2.0 { |
|
840 | 841 | break; |
|
841 | 842 | } |
|
842 | 843 | p2 = self |
|
843 | 844 | .check_revision(parent_base) |
|
844 | 845 | .ok_or(RevlogError::InvalidRevision)?; |
|
845 | 846 | } |
|
846 | 847 | if base == p1.0 || base == p2.0 { |
|
847 | 848 | return Ok(false); |
|
848 | 849 | } |
|
849 | 850 | rev = self |
|
850 | 851 | .check_revision(base.into()) |
|
851 | 852 | .ok_or(RevlogError::InvalidRevision)?; |
|
852 | 853 | } |
|
853 | 854 | Ok(rev == NULL_REVISION) |
|
854 | 855 | } |
|
855 | 856 | |
|
856 | 857 | /// Return whether the given revision is a snapshot. Returns an error if |
|
857 | 858 | /// `rev` is not within a valid revision range. |
|
858 | 859 | pub fn is_snapshot( |
|
859 | 860 | &self, |
|
860 | 861 | rev: UncheckedRevision, |
|
861 | 862 | ) -> Result<bool, RevlogError> { |
|
862 | 863 | let rev = self |
|
863 | 864 | .check_revision(rev) |
|
864 | 865 | .ok_or_else(|| RevlogError::corrupted("test"))?; |
|
865 | 866 | self.is_snapshot_unchecked(rev) |
|
866 | 867 | } |
|
867 | 868 | |
|
868 | 869 | /// Slice revs to reduce the amount of unrelated data to be read from disk. |
|
869 | 870 | /// |
|
870 | 871 | /// The index is sliced into groups that should be read in one time. |
|
871 | 872 | /// |
|
872 | 873 | /// The initial chunk is sliced until the overall density |
|
873 | 874 | /// (payload/chunks-span ratio) is above `target_density`. |
|
874 | 875 | /// No gap smaller than `min_gap_size` is skipped. |
|
875 | 876 | pub fn slice_chunk_to_density( |
|
876 | 877 | &self, |
|
877 | 878 | revs: &[Revision], |
|
878 | 879 | target_density: f64, |
|
879 | 880 | min_gap_size: usize, |
|
880 | 881 | ) -> Vec<Vec<Revision>> { |
|
881 | 882 | if revs.is_empty() { |
|
882 | 883 | return vec![]; |
|
883 | 884 | } |
|
884 | 885 | if revs.len() == 1 { |
|
885 | 886 | return vec![revs.to_owned()]; |
|
886 | 887 | } |
|
887 | 888 | let delta_chain_span = self.segment_span(revs); |
|
888 | 889 | if delta_chain_span < min_gap_size { |
|
889 | 890 | return vec![revs.to_owned()]; |
|
890 | 891 | } |
|
891 | 892 | let entries: Vec<_> = revs |
|
892 | 893 | .iter() |
|
893 | 894 | .map(|r| { |
|
894 | 895 | (*r, self.get_entry(*r).unwrap_or_else(|| self.null_entry())) |
|
895 | 896 | }) |
|
896 | 897 | .collect(); |
|
897 | 898 | |
|
898 | 899 | let mut read_data = delta_chain_span; |
|
899 | 900 | let chain_payload: u32 = |
|
900 | 901 | entries.iter().map(|(_r, e)| e.compressed_len()).sum(); |
|
901 | 902 | let mut density = if delta_chain_span > 0 { |
|
902 | 903 | chain_payload as f64 / delta_chain_span as f64 |
|
903 | 904 | } else { |
|
904 | 905 | 1.0 |
|
905 | 906 | }; |
|
906 | 907 | |
|
907 | 908 | if density >= target_density { |
|
908 | 909 | return vec![revs.to_owned()]; |
|
909 | 910 | } |
|
910 | 911 | |
|
911 | 912 | // Store the gaps in a heap to have them sorted by decreasing size |
|
912 | 913 | let mut gaps = Vec::new(); |
|
913 | 914 | let mut previous_end = None; |
|
914 | 915 | |
|
915 | 916 | for (i, (_rev, entry)) in entries.iter().enumerate() { |
|
916 | 917 | let start = entry.c_start() as usize; |
|
917 | 918 | let length = entry.compressed_len(); |
|
918 | 919 | |
|
919 | 920 | // Skip empty revisions to form larger holes |
|
920 | 921 | if length == 0 { |
|
921 | 922 | continue; |
|
922 | 923 | } |
|
923 | 924 | |
|
924 | 925 | if let Some(end) = previous_end { |
|
925 | 926 | let gap_size = start - end; |
|
926 | 927 | // Only consider holes that are large enough |
|
927 | 928 | if gap_size > min_gap_size { |
|
928 | 929 | gaps.push((gap_size, i)); |
|
929 | 930 | } |
|
930 | 931 | } |
|
931 | 932 | previous_end = Some(start + length as usize); |
|
932 | 933 | } |
|
933 | 934 | if gaps.is_empty() { |
|
934 | 935 | return vec![revs.to_owned()]; |
|
935 | 936 | } |
|
936 | 937 | // sort the gaps to pop them from largest to small |
|
937 | 938 | gaps.sort_unstable(); |
|
938 | 939 | |
|
939 | 940 | // Collect the indices of the largest holes until |
|
940 | 941 | // the density is acceptable |
|
941 | 942 | let mut selected = vec![]; |
|
942 | 943 | while let Some((gap_size, gap_id)) = gaps.pop() { |
|
943 | 944 | if density >= target_density { |
|
944 | 945 | break; |
|
945 | 946 | } |
|
946 | 947 | selected.push(gap_id); |
|
947 | 948 | |
|
948 | 949 | // The gap sizes are stored as negatives to be sorted decreasingly |
|
949 | 950 | // by the heap |
|
950 | 951 | read_data -= gap_size; |
|
951 | 952 | density = if read_data > 0 { |
|
952 | 953 | chain_payload as f64 / read_data as f64 |
|
953 | 954 | } else { |
|
954 | 955 | 1.0 |
|
955 | 956 | }; |
|
956 | 957 | if density >= target_density { |
|
957 | 958 | break; |
|
958 | 959 | } |
|
959 | 960 | } |
|
960 | 961 | selected.sort_unstable(); |
|
961 | 962 | selected.push(revs.len()); |
|
962 | 963 | |
|
963 | 964 | // Cut the revs at collected indices |
|
964 | 965 | let mut previous_idx = 0; |
|
965 | 966 | let mut chunks = vec![]; |
|
966 | 967 | for idx in selected { |
|
967 | 968 | let chunk = self.trim_chunk(&entries, previous_idx, idx); |
|
968 | 969 | if !chunk.is_empty() { |
|
969 | 970 | chunks.push(chunk.iter().map(|(rev, _entry)| *rev).collect()); |
|
970 | 971 | } |
|
971 | 972 | previous_idx = idx; |
|
972 | 973 | } |
|
973 | 974 | let chunk = self.trim_chunk(&entries, previous_idx, entries.len()); |
|
974 | 975 | if !chunk.is_empty() { |
|
975 | 976 | chunks.push(chunk.iter().map(|(rev, _entry)| *rev).collect()); |
|
976 | 977 | } |
|
977 | 978 | |
|
978 | 979 | chunks |
|
979 | 980 | } |
|
980 | 981 | |
|
981 | 982 | /// Get the byte span of a segment of sorted revisions. |
|
982 | 983 | /// |
|
983 | 984 | /// Occurrences of [`NULL_REVISION`] are ignored at the beginning of |
|
984 | 985 | /// the `revs` segment. |
|
985 | 986 | /// |
|
986 | 987 | /// panics: |
|
987 | 988 | /// - if `revs` is empty or only made of `NULL_REVISION` |
|
988 | 989 | /// - if cannot retrieve entry for the last or first not null element of |
|
989 | 990 | /// `revs`. |
|
990 | 991 | fn segment_span(&self, revs: &[Revision]) -> usize { |
|
991 | 992 | if revs.is_empty() { |
|
992 | 993 | return 0; |
|
993 | 994 | } |
|
994 | 995 | let last_entry = &self.get_entry(revs[revs.len() - 1]).unwrap(); |
|
995 | 996 | let end = last_entry.c_start() + last_entry.compressed_len() as u64; |
|
996 | 997 | let first_rev = revs.iter().find(|r| r.0 != NULL_REVISION.0).unwrap(); |
|
997 | 998 | let start = if first_rev.0 == 0 { |
|
998 | 999 | 0 |
|
999 | 1000 | } else { |
|
1000 | 1001 | self.get_entry(*first_rev).unwrap().c_start() |
|
1001 | 1002 | }; |
|
1002 | 1003 | (end - start) as usize |
|
1003 | 1004 | } |
|
1004 | 1005 | |
|
1005 | 1006 | /// Returns `&revs[startidx..endidx]` without empty trailing revs |
|
1006 | 1007 | fn trim_chunk<'a>( |
|
1007 | 1008 | &'a self, |
|
1008 | 1009 | revs: &'a [(Revision, IndexEntry)], |
|
1009 | 1010 | start: usize, |
|
1010 | 1011 | mut end: usize, |
|
1011 | 1012 | ) -> &'a [(Revision, IndexEntry)] { |
|
1012 | 1013 | // Trim empty revs at the end, except the very first rev of a chain |
|
1013 | 1014 | let last_rev = revs[end - 1].0; |
|
1014 | 1015 | if last_rev.0 < self.len() as BaseRevision { |
|
1015 | 1016 | while end > 1 |
|
1016 | 1017 | && end > start |
|
1017 | 1018 | && revs[end - 1].1.compressed_len() == 0 |
|
1018 | 1019 | { |
|
1019 | 1020 | end -= 1 |
|
1020 | 1021 | } |
|
1021 | 1022 | } |
|
1022 | 1023 | &revs[start..end] |
|
1023 | 1024 | } |
|
1024 | 1025 | |
|
1025 | 1026 | /// Computes the set of revisions for each non-public phase from `roots`, |
|
1026 | 1027 | /// which are the last known roots for each non-public phase. |
|
1027 | 1028 | pub fn compute_phases_map_sets( |
|
1028 | 1029 | &self, |
|
1029 | 1030 | roots: HashMap<Phase, Vec<Revision>>, |
|
1030 | 1031 | ) -> Result<(usize, RootsPerPhase), GraphError> { |
|
1031 | 1032 | let mut phases = vec![Phase::Public; self.len()]; |
|
1032 | 1033 | let mut min_phase_rev = NULL_REVISION; |
|
1033 | 1034 | |
|
1034 | 1035 | for phase in Phase::non_public_phases() { |
|
1035 | 1036 | if let Some(phase_roots) = roots.get(phase) { |
|
1036 | 1037 | let min_rev = |
|
1037 | 1038 | self.add_roots_get_min(phase_roots, &mut phases, *phase); |
|
1038 | 1039 | if min_rev != NULL_REVISION |
|
1039 | 1040 | && (min_phase_rev == NULL_REVISION |
|
1040 | 1041 | || min_rev < min_phase_rev) |
|
1041 | 1042 | { |
|
1042 | 1043 | min_phase_rev = min_rev; |
|
1043 | 1044 | } |
|
1044 | 1045 | } else { |
|
1045 | 1046 | continue; |
|
1046 | 1047 | }; |
|
1047 | 1048 | } |
|
1048 | 1049 | let mut phase_sets: RootsPerPhase = Default::default(); |
|
1049 | 1050 | |
|
1050 | 1051 | if min_phase_rev == NULL_REVISION { |
|
1051 | 1052 | min_phase_rev = Revision(self.len() as BaseRevision); |
|
1052 | 1053 | } |
|
1053 | 1054 | |
|
1054 | 1055 | for rev in min_phase_rev.0..self.len() as BaseRevision { |
|
1055 | 1056 | let rev = Revision(rev); |
|
1056 | 1057 | let [p1, p2] = self.parents(rev)?; |
|
1057 | 1058 | |
|
1058 | 1059 | if p1.0 >= 0 && phases[p1.0 as usize] > phases[rev.0 as usize] { |
|
1059 | 1060 | phases[rev.0 as usize] = phases[p1.0 as usize]; |
|
1060 | 1061 | } |
|
1061 | 1062 | if p2.0 >= 0 && phases[p2.0 as usize] > phases[rev.0 as usize] { |
|
1062 | 1063 | phases[rev.0 as usize] = phases[p2.0 as usize]; |
|
1063 | 1064 | } |
|
1064 | 1065 | let set = match phases[rev.0 as usize] { |
|
1065 | 1066 | Phase::Public => continue, |
|
1066 | 1067 | phase => &mut phase_sets[phase as usize - 1], |
|
1067 | 1068 | }; |
|
1068 | 1069 | set.push(rev); |
|
1069 | 1070 | } |
|
1070 | 1071 | |
|
1071 | 1072 | Ok((self.len(), phase_sets)) |
|
1072 | 1073 | } |
|
1073 | 1074 | |
|
1074 | 1075 | fn add_roots_get_min( |
|
1075 | 1076 | &self, |
|
1076 | 1077 | phase_roots: &[Revision], |
|
1077 | 1078 | phases: &mut [Phase], |
|
1078 | 1079 | phase: Phase, |
|
1079 | 1080 | ) -> Revision { |
|
1080 | 1081 | let mut min_rev = NULL_REVISION; |
|
1081 | 1082 | |
|
1082 | 1083 | for root in phase_roots { |
|
1083 | 1084 | phases[root.0 as usize] = phase; |
|
1084 | 1085 | if min_rev == NULL_REVISION || min_rev > *root { |
|
1085 | 1086 | min_rev = *root; |
|
1086 | 1087 | } |
|
1087 | 1088 | } |
|
1088 | 1089 | min_rev |
|
1089 | 1090 | } |
|
1090 | 1091 | |
|
1091 | 1092 | /// Return `(heads(::(<roots> and <roots>::<heads>)))` |
|
1092 | 1093 | /// If `include_path` is `true`, return `(<roots>::<heads>)`.""" |
|
1093 | 1094 | /// |
|
1094 | 1095 | /// `min_root` and `roots` are unchecked since they are just used as |
|
1095 | 1096 | /// a bound or for comparison and don't need to represent a valid revision. |
|
1096 | 1097 | /// In practice, the only invalid revision passed is the working directory |
|
1097 | 1098 | /// revision ([`i32::MAX`]). |
|
1098 | 1099 | pub fn reachable_roots( |
|
1099 | 1100 | &self, |
|
1100 | 1101 | min_root: UncheckedRevision, |
|
1101 | 1102 | mut heads: Vec<Revision>, |
|
1102 | 1103 | roots: HashSet<UncheckedRevision>, |
|
1103 | 1104 | include_path: bool, |
|
1104 | 1105 | ) -> Result<HashSet<Revision>, GraphError> { |
|
1105 | 1106 | if roots.is_empty() { |
|
1106 | 1107 | return Ok(HashSet::new()); |
|
1107 | 1108 | } |
|
1108 | 1109 | let mut reachable = HashSet::new(); |
|
1109 | 1110 | let mut seen = HashMap::new(); |
|
1110 | 1111 | |
|
1111 | 1112 | while let Some(rev) = heads.pop() { |
|
1112 | 1113 | if roots.contains(&rev.into()) { |
|
1113 | 1114 | reachable.insert(rev); |
|
1114 | 1115 | if !include_path { |
|
1115 | 1116 | continue; |
|
1116 | 1117 | } |
|
1117 | 1118 | } |
|
1118 | 1119 | let parents = self.parents(rev)?; |
|
1119 | 1120 | seen.insert(rev, parents); |
|
1120 | 1121 | for parent in parents { |
|
1121 | 1122 | if parent.0 >= min_root.0 && !seen.contains_key(&parent) { |
|
1122 | 1123 | heads.push(parent); |
|
1123 | 1124 | } |
|
1124 | 1125 | } |
|
1125 | 1126 | } |
|
1126 | 1127 | if !include_path { |
|
1127 | 1128 | return Ok(reachable); |
|
1128 | 1129 | } |
|
1129 | 1130 | let mut revs: Vec<_> = seen.keys().collect(); |
|
1130 | 1131 | revs.sort_unstable(); |
|
1131 | 1132 | for rev in revs { |
|
1132 | 1133 | for parent in seen[rev] { |
|
1133 | 1134 | if reachable.contains(&parent) { |
|
1134 | 1135 | reachable.insert(*rev); |
|
1135 | 1136 | } |
|
1136 | 1137 | } |
|
1137 | 1138 | } |
|
1138 | 1139 | Ok(reachable) |
|
1139 | 1140 | } |
|
1140 | 1141 | |
|
1141 | 1142 | /// Given a (possibly overlapping) set of revs, return all the |
|
1142 | 1143 | /// common ancestors heads: `heads(::args[0] and ::a[1] and ...)` |
|
1143 | 1144 | pub fn common_ancestor_heads( |
|
1144 | 1145 | &self, |
|
1145 | 1146 | revisions: &[Revision], |
|
1146 | 1147 | ) -> Result<Vec<Revision>, GraphError> { |
|
1147 | 1148 | // given that revisions is expected to be small, we find this shortcut |
|
1148 | 1149 | // potentially acceptable, especially given that `hg-cpython` could |
|
1149 | 1150 | // very much bypass this, constructing a vector of unique values from |
|
1150 | 1151 | // the onset. |
|
1151 | 1152 | let as_set: HashSet<Revision> = revisions.iter().copied().collect(); |
|
1152 | 1153 | // Besides deduplicating, the C version also implements the shortcut |
|
1153 | 1154 | // for `NULL_REVISION`: |
|
1154 | 1155 | if as_set.contains(&NULL_REVISION) { |
|
1155 | 1156 | return Ok(vec![]); |
|
1156 | 1157 | } |
|
1157 | 1158 | |
|
1158 | 1159 | let revisions: Vec<Revision> = as_set.into_iter().collect(); |
|
1159 | 1160 | |
|
1160 | 1161 | if revisions.len() < 8 { |
|
1161 | 1162 | self.find_gca_candidates::<u8>(&revisions) |
|
1162 | 1163 | } else if revisions.len() < 64 { |
|
1163 | 1164 | self.find_gca_candidates::<u64>(&revisions) |
|
1164 | 1165 | } else { |
|
1165 | 1166 | self.find_gca_candidates::<NonStaticPoisonableBitSet>(&revisions) |
|
1166 | 1167 | } |
|
1167 | 1168 | } |
|
1168 | 1169 | |
|
1169 | 1170 | pub fn ancestors( |
|
1170 | 1171 | &self, |
|
1171 | 1172 | revisions: &[Revision], |
|
1172 | 1173 | ) -> Result<Vec<Revision>, GraphError> { |
|
1173 | 1174 | self.find_deepest_revs(&self.common_ancestor_heads(revisions)?) |
|
1174 | 1175 | } |
|
1175 | 1176 | |
|
1176 | 1177 | /// Given a disjoint set of revs, return all candidates for the |
|
1177 | 1178 | /// greatest common ancestor. In revset notation, this is the set |
|
1178 | 1179 | /// `heads(::a and ::b and ...)` |
|
1179 | 1180 | fn find_gca_candidates<BS: PoisonableBitSet + Clone>( |
|
1180 | 1181 | &self, |
|
1181 | 1182 | revs: &[Revision], |
|
1182 | 1183 | ) -> Result<Vec<Revision>, GraphError> { |
|
1183 | 1184 | if revs.is_empty() { |
|
1184 | 1185 | return Ok(vec![]); |
|
1185 | 1186 | } |
|
1186 | 1187 | let revcount = revs.len(); |
|
1187 | 1188 | let mut candidates = vec![]; |
|
1188 | 1189 | let max_rev = revs.iter().max().unwrap(); |
|
1189 | 1190 | |
|
1190 | 1191 | let mut seen = BS::vec_of_empty(revs.len(), (max_rev.0 + 1) as usize); |
|
1191 | 1192 | |
|
1192 | 1193 | for (idx, rev) in revs.iter().enumerate() { |
|
1193 | 1194 | seen[rev.0 as usize].add(idx); |
|
1194 | 1195 | } |
|
1195 | 1196 | let mut current_rev = *max_rev; |
|
1196 | 1197 | // Number of revisions whose inspection in the main loop |
|
1197 | 1198 | // will give a result or trigger inspection of other revisions |
|
1198 | 1199 | let mut interesting = revcount; |
|
1199 | 1200 | |
|
1200 | 1201 | // The algorithm works on a vector of bit sets, indexed by revision |
|
1201 | 1202 | // numbers and iterated on reverse order. |
|
1202 | 1203 | // An entry in this vector is poisoned if and only if the corresponding |
|
1203 | 1204 | // revision is a common, yet not maximal ancestor. |
|
1204 | 1205 | |
|
1205 | 1206 | // The principle of the algorithm is as follows: |
|
1206 | 1207 | // For a revision `r`, when entering the loop, `seen[r]` is either |
|
1207 | 1208 | // poisoned or the sub set of `revs` of which `r` is an ancestor. |
|
1208 | 1209 | // In this sub set is full, then `r` is a solution and its parents |
|
1209 | 1210 | // have to be poisoned. |
|
1210 | 1211 | // |
|
1211 | 1212 | // At each iteration, the bit sets of the parents are updated by |
|
1212 | 1213 | // union with `seen[r]`. |
|
1213 | 1214 | // As we walk the index from the end, we are sure we have encountered |
|
1214 | 1215 | // all children of `r` before `r`, hence we know that `seen[r]` is |
|
1215 | 1216 | // fully computed. |
|
1216 | 1217 | // |
|
1217 | 1218 | // On top of that there are several optimizations that make reading |
|
1218 | 1219 | // less obvious than the comment above: |
|
1219 | 1220 | // - The `interesting` counter allows to break early |
|
1220 | 1221 | // - The loop starts from `max(revs)` |
|
1221 | 1222 | // - Early return in case it is detected that one of the incoming revs |
|
1222 | 1223 | // is a common ancestor of all of them. |
|
1223 | 1224 | while current_rev.0 >= 0 && interesting > 0 { |
|
1224 | 1225 | let current_seen = seen[current_rev.0 as usize].clone(); |
|
1225 | 1226 | |
|
1226 | 1227 | if current_seen.is_empty() { |
|
1227 | 1228 | current_rev = Revision(current_rev.0 - 1); |
|
1228 | 1229 | continue; |
|
1229 | 1230 | } |
|
1230 | 1231 | let mut poison = current_seen.is_poisoned(); |
|
1231 | 1232 | if !poison { |
|
1232 | 1233 | interesting -= 1; |
|
1233 | 1234 | if current_seen.is_full_range(revcount) { |
|
1234 | 1235 | candidates.push(current_rev); |
|
1235 | 1236 | poison = true; |
|
1236 | 1237 | |
|
1237 | 1238 | // Being a common ancestor, if `current_rev` is among |
|
1238 | 1239 | // the input revisions, it is *the* answer. |
|
1239 | 1240 | for rev in revs { |
|
1240 | 1241 | if *rev == current_rev { |
|
1241 | 1242 | return Ok(candidates); |
|
1242 | 1243 | } |
|
1243 | 1244 | } |
|
1244 | 1245 | } |
|
1245 | 1246 | } |
|
1246 | 1247 | for parent in self.parents(current_rev)? { |
|
1247 | 1248 | if parent == NULL_REVISION { |
|
1248 | 1249 | continue; |
|
1249 | 1250 | } |
|
1250 | 1251 | let parent_seen = &mut seen[parent.0 as usize]; |
|
1251 | 1252 | if poison { |
|
1252 | 1253 | // this block is logically equivalent to poisoning parent |
|
1253 | 1254 | // and counting it as non interesting if it |
|
1254 | 1255 | // has been seen before (hence counted then as interesting) |
|
1255 | 1256 | if !parent_seen.is_empty() && !parent_seen.is_poisoned() { |
|
1256 | 1257 | interesting -= 1; |
|
1257 | 1258 | } |
|
1258 | 1259 | parent_seen.poison(); |
|
1259 | 1260 | } else { |
|
1260 | 1261 | if parent_seen.is_empty() { |
|
1261 | 1262 | interesting += 1; |
|
1262 | 1263 | } |
|
1263 | 1264 | parent_seen.union(¤t_seen); |
|
1264 | 1265 | } |
|
1265 | 1266 | } |
|
1266 | 1267 | |
|
1267 | 1268 | current_rev = Revision(current_rev.0 - 1); |
|
1268 | 1269 | } |
|
1269 | 1270 | |
|
1270 | 1271 | Ok(candidates) |
|
1271 | 1272 | } |
|
1272 | 1273 | |
|
1273 | 1274 | /// Given a disjoint set of revs, return the subset with the longest path |
|
1274 | 1275 | /// to the root. |
|
1275 | 1276 | fn find_deepest_revs( |
|
1276 | 1277 | &self, |
|
1277 | 1278 | revs: &[Revision], |
|
1278 | 1279 | ) -> Result<Vec<Revision>, GraphError> { |
|
1279 | 1280 | // TODO replace this all with just comparing rank? |
|
1280 | 1281 | // Also, the original implementations in C/Python are cryptic, not |
|
1281 | 1282 | // even sure we actually need this? |
|
1282 | 1283 | if revs.len() <= 1 { |
|
1283 | 1284 | return Ok(revs.to_owned()); |
|
1284 | 1285 | } |
|
1285 | 1286 | let max_rev = revs.iter().max().unwrap().0; |
|
1286 | 1287 | let mut interesting = HashMap::new(); |
|
1287 | 1288 | let mut seen = vec![0; max_rev as usize + 1]; |
|
1288 | 1289 | let mut depth = vec![0; max_rev as usize + 1]; |
|
1289 | 1290 | let mut mapping = vec![]; |
|
1290 | 1291 | let mut revs = revs.to_owned(); |
|
1291 | 1292 | revs.sort_unstable(); |
|
1292 | 1293 | |
|
1293 | 1294 | for (idx, rev) in revs.iter().enumerate() { |
|
1294 | 1295 | depth[rev.0 as usize] = 1; |
|
1295 | 1296 | let shift = 1 << idx; |
|
1296 | 1297 | seen[rev.0 as usize] = shift; |
|
1297 | 1298 | interesting.insert(shift, 1); |
|
1298 | 1299 | mapping.push((shift, *rev)); |
|
1299 | 1300 | } |
|
1300 | 1301 | |
|
1301 | 1302 | let mut current_rev = Revision(max_rev); |
|
1302 | 1303 | while current_rev.0 >= 0 && interesting.len() > 1 { |
|
1303 | 1304 | let current_depth = depth[current_rev.0 as usize]; |
|
1304 | 1305 | if current_depth == 0 { |
|
1305 | 1306 | current_rev = Revision(current_rev.0 - 1); |
|
1306 | 1307 | continue; |
|
1307 | 1308 | } |
|
1308 | 1309 | |
|
1309 | 1310 | let current_seen = seen[current_rev.0 as usize]; |
|
1310 | 1311 | for parent in self.parents(current_rev)? { |
|
1311 | 1312 | if parent == NULL_REVISION { |
|
1312 | 1313 | continue; |
|
1313 | 1314 | } |
|
1314 | 1315 | let parent_seen = seen[parent.0 as usize]; |
|
1315 | 1316 | let parent_depth = depth[parent.0 as usize]; |
|
1316 | 1317 | if parent_depth <= current_depth { |
|
1317 | 1318 | depth[parent.0 as usize] = current_depth + 1; |
|
1318 | 1319 | if parent_seen != current_seen { |
|
1319 | 1320 | *interesting.get_mut(¤t_seen).unwrap() += 1; |
|
1320 | 1321 | seen[parent.0 as usize] = current_seen; |
|
1321 | 1322 | if parent_seen != 0 { |
|
1322 | 1323 | let parent_interesting = |
|
1323 | 1324 | interesting.get_mut(&parent_seen).unwrap(); |
|
1324 | 1325 | *parent_interesting -= 1; |
|
1325 | 1326 | if *parent_interesting == 0 { |
|
1326 | 1327 | interesting.remove(&parent_seen); |
|
1327 | 1328 | } |
|
1328 | 1329 | } |
|
1329 | 1330 | } |
|
1330 | 1331 | } else if current_depth == parent_depth - 1 { |
|
1331 | 1332 | let either_seen = parent_seen | current_seen; |
|
1332 | 1333 | if either_seen == parent_seen { |
|
1333 | 1334 | continue; |
|
1334 | 1335 | } |
|
1335 | 1336 | seen[parent.0 as usize] = either_seen; |
|
1336 | 1337 | interesting |
|
1337 | 1338 | .entry(either_seen) |
|
1338 | 1339 | .and_modify(|v| *v += 1) |
|
1339 | 1340 | .or_insert(1); |
|
1340 | 1341 | *interesting.get_mut(&parent_seen).unwrap() -= 1; |
|
1341 | 1342 | if interesting[&parent_seen] == 0 { |
|
1342 | 1343 | interesting.remove(&parent_seen); |
|
1343 | 1344 | } |
|
1344 | 1345 | } |
|
1345 | 1346 | } |
|
1346 | 1347 | *interesting.get_mut(¤t_seen).unwrap() -= 1; |
|
1347 | 1348 | if interesting[¤t_seen] == 0 { |
|
1348 | 1349 | interesting.remove(¤t_seen); |
|
1349 | 1350 | } |
|
1350 | 1351 | |
|
1351 | 1352 | current_rev = Revision(current_rev.0 - 1); |
|
1352 | 1353 | } |
|
1353 | 1354 | |
|
1354 | 1355 | if interesting.len() != 1 { |
|
1355 | 1356 | return Ok(vec![]); |
|
1356 | 1357 | } |
|
1357 | 1358 | let mask = interesting.keys().next().unwrap(); |
|
1358 | 1359 | |
|
1359 | 1360 | Ok(mapping |
|
1360 | 1361 | .into_iter() |
|
1361 | 1362 | .filter_map(|(shift, rev)| { |
|
1362 | 1363 | if (mask & shift) != 0 { |
|
1363 | 1364 | return Some(rev); |
|
1364 | 1365 | } |
|
1365 | 1366 | None |
|
1366 | 1367 | }) |
|
1367 | 1368 | .collect()) |
|
1368 | 1369 | } |
|
1369 | 1370 | } |
|
1370 | 1371 | |
|
1371 | 1372 | /// The kind of functionality needed by find_gca_candidates |
|
1372 | 1373 | /// |
|
1373 | 1374 | /// This is a bit mask which can be declared to be "poisoned", which callers |
|
1374 | 1375 | /// interpret to break out of some loops. |
|
1375 | 1376 | /// |
|
1376 | 1377 | /// The maximum capacity of the bit mask is up to the actual implementation |
|
1377 | 1378 | trait PoisonableBitSet: Sized + PartialEq { |
|
1378 | 1379 | /// Return a vector of exactly n elements, initialized to be empty. |
|
1379 | 1380 | /// |
|
1380 | 1381 | /// Optimization can vastly depend on implementation. Those being `Copy` |
|
1381 | 1382 | /// and having constant capacity typically can have a very simple |
|
1382 | 1383 | /// implementation. |
|
1383 | 1384 | fn vec_of_empty(sets_size: usize, vec_len: usize) -> Vec<Self>; |
|
1384 | 1385 | |
|
1385 | 1386 | /// The size of the bit mask in memory |
|
1386 | 1387 | #[allow(unused)] |
|
1387 | 1388 | fn size(&self) -> usize; |
|
1388 | 1389 | |
|
1389 | 1390 | /// The number of elements that can be represented in the set. |
|
1390 | 1391 | /// |
|
1391 | 1392 | /// Another way to put it is that it is the highest integer `C` such that |
|
1392 | 1393 | /// the set is guaranteed to always be a subset of the integer range |
|
1393 | 1394 | /// `[0, C)` |
|
1394 | 1395 | #[allow(unused)] |
|
1395 | 1396 | fn capacity(&self) -> usize; |
|
1396 | 1397 | |
|
1397 | 1398 | /// Declare `n` to belong to the set |
|
1398 | 1399 | fn add(&mut self, n: usize); |
|
1399 | 1400 | |
|
1400 | 1401 | /// Declare `n` not to belong to the set |
|
1401 | 1402 | #[allow(unused)] |
|
1402 | 1403 | fn discard(&mut self, n: usize); |
|
1403 | 1404 | |
|
1404 | 1405 | /// Replace this bit set by its union with other |
|
1405 | 1406 | fn union(&mut self, other: &Self); |
|
1406 | 1407 | |
|
1407 | 1408 | /// Poison the bit set |
|
1408 | 1409 | /// |
|
1409 | 1410 | /// Interpretation up to the caller |
|
1410 | 1411 | fn poison(&mut self); |
|
1411 | 1412 | |
|
1412 | 1413 | /// Is the bit set poisoned? |
|
1413 | 1414 | /// |
|
1414 | 1415 | /// Interpretation is up to the caller |
|
1415 | 1416 | fn is_poisoned(&self) -> bool; |
|
1416 | 1417 | |
|
1417 | 1418 | /// Is the bit set empty? |
|
1418 | 1419 | fn is_empty(&self) -> bool; |
|
1419 | 1420 | |
|
1420 | 1421 | /// return `true` if and only if the bit is the full range `[0, n)` |
|
1421 | 1422 | /// of integers |
|
1422 | 1423 | fn is_full_range(&self, n: usize) -> bool; |
|
1423 | 1424 | } |
|
1424 | 1425 | |
|
1425 | 1426 | const U64_POISON: u64 = 1 << 63; |
|
1426 | 1427 | const U8_POISON: u8 = 1 << 7; |
|
1427 | 1428 | |
|
1428 | 1429 | impl PoisonableBitSet for u64 { |
|
1429 | 1430 | fn vec_of_empty(_sets_size: usize, vec_len: usize) -> Vec<Self> { |
|
1430 | 1431 | vec![0u64; vec_len] |
|
1431 | 1432 | } |
|
1432 | 1433 | |
|
1433 | 1434 | fn size(&self) -> usize { |
|
1434 | 1435 | 8 |
|
1435 | 1436 | } |
|
1436 | 1437 | |
|
1437 | 1438 | fn capacity(&self) -> usize { |
|
1438 | 1439 | 63 |
|
1439 | 1440 | } |
|
1440 | 1441 | |
|
1441 | 1442 | fn add(&mut self, n: usize) { |
|
1442 | 1443 | (*self) |= 1u64 << n; |
|
1443 | 1444 | } |
|
1444 | 1445 | |
|
1445 | 1446 | fn discard(&mut self, n: usize) { |
|
1446 | 1447 | (*self) &= u64::MAX - (1u64 << n); |
|
1447 | 1448 | } |
|
1448 | 1449 | |
|
1449 | 1450 | fn union(&mut self, other: &Self) { |
|
1450 | 1451 | if *self != *other { |
|
1451 | 1452 | (*self) |= *other; |
|
1452 | 1453 | } |
|
1453 | 1454 | } |
|
1454 | 1455 | |
|
1455 | 1456 | fn is_full_range(&self, n: usize) -> bool { |
|
1456 | 1457 | *self + 1 == (1u64 << n) |
|
1457 | 1458 | } |
|
1458 | 1459 | |
|
1459 | 1460 | fn is_empty(&self) -> bool { |
|
1460 | 1461 | *self == 0 |
|
1461 | 1462 | } |
|
1462 | 1463 | |
|
1463 | 1464 | fn poison(&mut self) { |
|
1464 | 1465 | *self = U64_POISON; |
|
1465 | 1466 | } |
|
1466 | 1467 | |
|
1467 | 1468 | fn is_poisoned(&self) -> bool { |
|
1468 | 1469 | // equality comparison would be tempting but would not resist |
|
1469 | 1470 | // operations after poisoning (even if these should be bogus). |
|
1470 | 1471 | *self >= U64_POISON |
|
1471 | 1472 | } |
|
1472 | 1473 | } |
|
1473 | 1474 | |
|
1474 | 1475 | impl PoisonableBitSet for u8 { |
|
1475 | 1476 | fn vec_of_empty(_sets_size: usize, vec_len: usize) -> Vec<Self> { |
|
1476 | 1477 | vec![0; vec_len] |
|
1477 | 1478 | } |
|
1478 | 1479 | |
|
1479 | 1480 | fn size(&self) -> usize { |
|
1480 | 1481 | 1 |
|
1481 | 1482 | } |
|
1482 | 1483 | |
|
1483 | 1484 | fn capacity(&self) -> usize { |
|
1484 | 1485 | 7 |
|
1485 | 1486 | } |
|
1486 | 1487 | |
|
1487 | 1488 | fn add(&mut self, n: usize) { |
|
1488 | 1489 | (*self) |= 1 << n; |
|
1489 | 1490 | } |
|
1490 | 1491 | |
|
1491 | 1492 | fn discard(&mut self, n: usize) { |
|
1492 | 1493 | (*self) &= u8::MAX - (1 << n); |
|
1493 | 1494 | } |
|
1494 | 1495 | |
|
1495 | 1496 | fn union(&mut self, other: &Self) { |
|
1496 | 1497 | if *self != *other { |
|
1497 | 1498 | (*self) |= *other; |
|
1498 | 1499 | } |
|
1499 | 1500 | } |
|
1500 | 1501 | |
|
1501 | 1502 | fn is_full_range(&self, n: usize) -> bool { |
|
1502 | 1503 | *self + 1 == (1 << n) |
|
1503 | 1504 | } |
|
1504 | 1505 | |
|
1505 | 1506 | fn is_empty(&self) -> bool { |
|
1506 | 1507 | *self == 0 |
|
1507 | 1508 | } |
|
1508 | 1509 | |
|
1509 | 1510 | fn poison(&mut self) { |
|
1510 | 1511 | *self = U8_POISON; |
|
1511 | 1512 | } |
|
1512 | 1513 | |
|
1513 | 1514 | fn is_poisoned(&self) -> bool { |
|
1514 | 1515 | // equality comparison would be tempting but would not resist |
|
1515 | 1516 | // operations after poisoning (even if these should be bogus). |
|
1516 | 1517 | *self >= U8_POISON |
|
1517 | 1518 | } |
|
1518 | 1519 | } |
|
1519 | 1520 | |
|
1520 | 1521 | /// A poisonable bit set whose capacity is not known at compile time but |
|
1521 | 1522 | /// is constant after initial construction |
|
1522 | 1523 | /// |
|
1523 | 1524 | /// This can be way further optimized if performance assessments (speed |
|
1524 | 1525 | /// and/or RAM) require it. |
|
1525 | 1526 | /// As far as RAM is concerned, for large vectors of these, the main problem |
|
1526 | 1527 | /// would be the repetition of set_size in each item. We would need a trait |
|
1527 | 1528 | /// to abstract over the idea of a vector of such bit sets to do better. |
|
1528 | 1529 | #[derive(Clone, PartialEq)] |
|
1529 | 1530 | struct NonStaticPoisonableBitSet { |
|
1530 | 1531 | set_size: usize, |
|
1531 | 1532 | bit_set: Vec<u64>, |
|
1532 | 1533 | } |
|
1533 | 1534 | |
|
1534 | 1535 | /// Number of `u64` needed for a [`NonStaticPoisonableBitSet`] of given size |
|
1535 | 1536 | fn non_static_poisonable_inner_len(set_size: usize) -> usize { |
|
1536 | 1537 | 1 + (set_size + 1) / 64 |
|
1537 | 1538 | } |
|
1538 | 1539 | |
|
1539 | 1540 | impl NonStaticPoisonableBitSet { |
|
1540 | 1541 | /// The index of the sub-bit set for the given n, and the index inside |
|
1541 | 1542 | /// the latter |
|
1542 | 1543 | fn index(&self, n: usize) -> (usize, usize) { |
|
1543 | 1544 | (n / 64, n % 64) |
|
1544 | 1545 | } |
|
1545 | 1546 | } |
|
1546 | 1547 | |
|
1547 | 1548 | /// Mock implementation to ensure that the trait makes sense |
|
1548 | 1549 | impl PoisonableBitSet for NonStaticPoisonableBitSet { |
|
1549 | 1550 | fn vec_of_empty(set_size: usize, vec_len: usize) -> Vec<Self> { |
|
1550 | 1551 | let tmpl = Self { |
|
1551 | 1552 | set_size, |
|
1552 | 1553 | bit_set: vec![0u64; non_static_poisonable_inner_len(set_size)], |
|
1553 | 1554 | }; |
|
1554 | 1555 | vec![tmpl; vec_len] |
|
1555 | 1556 | } |
|
1556 | 1557 | |
|
1557 | 1558 | fn size(&self) -> usize { |
|
1558 | 1559 | 8 + self.bit_set.len() * 8 |
|
1559 | 1560 | } |
|
1560 | 1561 | |
|
1561 | 1562 | fn capacity(&self) -> usize { |
|
1562 | 1563 | self.set_size |
|
1563 | 1564 | } |
|
1564 | 1565 | |
|
1565 | 1566 | fn add(&mut self, n: usize) { |
|
1566 | 1567 | let (sub_bs, bit_pos) = self.index(n); |
|
1567 | 1568 | self.bit_set[sub_bs] |= 1 << bit_pos |
|
1568 | 1569 | } |
|
1569 | 1570 | |
|
1570 | 1571 | fn discard(&mut self, n: usize) { |
|
1571 | 1572 | let (sub_bs, bit_pos) = self.index(n); |
|
1572 | 1573 | self.bit_set[sub_bs] |= u64::MAX - (1 << bit_pos) |
|
1573 | 1574 | } |
|
1574 | 1575 | |
|
1575 | 1576 | fn union(&mut self, other: &Self) { |
|
1576 | 1577 | assert!( |
|
1577 | 1578 | self.set_size == other.set_size, |
|
1578 | 1579 | "Binary operations on bit sets can only be done on same size" |
|
1579 | 1580 | ); |
|
1580 | 1581 | for i in 0..self.bit_set.len() - 1 { |
|
1581 | 1582 | self.bit_set[i] |= other.bit_set[i] |
|
1582 | 1583 | } |
|
1583 | 1584 | } |
|
1584 | 1585 | |
|
1585 | 1586 | fn is_full_range(&self, n: usize) -> bool { |
|
1586 | 1587 | let (sub_bs, bit_pos) = self.index(n); |
|
1587 | 1588 | self.bit_set[..sub_bs].iter().all(|bs| *bs == u64::MAX) |
|
1588 | 1589 | && self.bit_set[sub_bs] == (1 << (bit_pos + 1)) - 1 |
|
1589 | 1590 | } |
|
1590 | 1591 | |
|
1591 | 1592 | fn is_empty(&self) -> bool { |
|
1592 | 1593 | self.bit_set.iter().all(|bs| *bs == 0u64) |
|
1593 | 1594 | } |
|
1594 | 1595 | |
|
1595 | 1596 | fn poison(&mut self) { |
|
1596 | 1597 | let (sub_bs, bit_pos) = self.index(self.set_size); |
|
1597 | 1598 | self.bit_set[sub_bs] = 1 << bit_pos; |
|
1598 | 1599 | } |
|
1599 | 1600 | |
|
1600 | 1601 | fn is_poisoned(&self) -> bool { |
|
1601 | 1602 | let (sub_bs, bit_pos) = self.index(self.set_size); |
|
1602 | 1603 | self.bit_set[sub_bs] >= 1 << bit_pos |
|
1603 | 1604 | } |
|
1604 | 1605 | } |
|
1605 | 1606 | |
|
1606 | 1607 | /// Set of roots of all non-public phases |
|
1607 | 1608 | pub type RootsPerPhase = [Vec<Revision>; Phase::non_public_phases().len()]; |
|
1608 | 1609 | |
|
1609 | 1610 | #[derive(Debug, Copy, Clone, PartialEq, Eq, Ord, PartialOrd, Hash)] |
|
1610 | 1611 | pub enum Phase { |
|
1611 | 1612 | Public = 0, |
|
1612 | 1613 | Draft = 1, |
|
1613 | 1614 | Secret = 2, |
|
1614 | 1615 | Archived = 3, |
|
1615 | 1616 | Internal = 4, |
|
1616 | 1617 | } |
|
1617 | 1618 | |
|
1618 | 1619 | impl TryFrom<usize> for Phase { |
|
1619 | 1620 | type Error = RevlogError; |
|
1620 | 1621 | |
|
1621 | 1622 | fn try_from(value: usize) -> Result<Self, Self::Error> { |
|
1622 | 1623 | Ok(match value { |
|
1623 | 1624 | 0 => Self::Public, |
|
1624 | 1625 | 1 => Self::Draft, |
|
1625 | 1626 | 2 => Self::Secret, |
|
1626 | 1627 | 32 => Self::Archived, |
|
1627 | 1628 | 96 => Self::Internal, |
|
1628 | 1629 | v => { |
|
1629 | 1630 | return Err(RevlogError::corrupted(format!( |
|
1630 | 1631 | "invalid phase value {}", |
|
1631 | 1632 | v |
|
1632 | 1633 | ))) |
|
1633 | 1634 | } |
|
1634 | 1635 | }) |
|
1635 | 1636 | } |
|
1636 | 1637 | } |
|
1637 | 1638 | |
|
1638 | 1639 | impl Phase { |
|
1639 | 1640 | pub const fn all_phases() -> &'static [Self] { |
|
1640 | 1641 | &[ |
|
1641 | 1642 | Self::Public, |
|
1642 | 1643 | Self::Draft, |
|
1643 | 1644 | Self::Secret, |
|
1644 | 1645 | Self::Archived, |
|
1645 | 1646 | Self::Internal, |
|
1646 | 1647 | ] |
|
1647 | 1648 | } |
|
1648 | 1649 | pub const fn non_public_phases() -> &'static [Self] { |
|
1649 | 1650 | &[Self::Draft, Self::Secret, Self::Archived, Self::Internal] |
|
1650 | 1651 | } |
|
1651 | 1652 | } |
|
1652 | 1653 | |
|
1653 | 1654 | fn inline_scan(bytes: &[u8]) -> (usize, Vec<usize>) { |
|
1654 | 1655 | let mut offset: usize = 0; |
|
1655 | 1656 | let mut offsets = Vec::new(); |
|
1656 | 1657 | |
|
1657 | 1658 | while offset + INDEX_ENTRY_SIZE <= bytes.len() { |
|
1658 | 1659 | offsets.push(offset); |
|
1659 | 1660 | let end = offset + INDEX_ENTRY_SIZE; |
|
1660 | 1661 | let entry = IndexEntry { |
|
1661 | 1662 | bytes: &bytes[offset..end], |
|
1662 | 1663 | }; |
|
1663 | 1664 | |
|
1664 | 1665 | offset += INDEX_ENTRY_SIZE + entry.compressed_len() as usize; |
|
1665 | 1666 | } |
|
1666 | 1667 | (offset, offsets) |
|
1667 | 1668 | } |
|
1668 | 1669 | |
|
1669 | 1670 | impl super::RevlogIndex for Index { |
|
1670 | 1671 | fn len(&self) -> usize { |
|
1671 | 1672 | self.len() |
|
1672 | 1673 | } |
|
1673 | 1674 | |
|
1674 | 1675 | fn node(&self, rev: Revision) -> Option<&Node> { |
|
1675 | 1676 | if rev == NULL_REVISION { |
|
1676 | 1677 | return Some(&NULL_NODE); |
|
1677 | 1678 | } |
|
1678 | 1679 | self.get_entry(rev).map(|entry| entry.hash()) |
|
1679 | 1680 | } |
|
1680 | 1681 | } |
|
1681 | 1682 | |
|
1682 | 1683 | #[derive(Debug)] |
|
1683 | 1684 | pub struct IndexEntry<'a> { |
|
1684 | 1685 | bytes: &'a [u8], |
|
1685 | 1686 | } |
|
1686 | 1687 | |
|
1687 | 1688 | impl<'a> IndexEntry<'a> { |
|
1688 | 1689 | /// Return the offset of the data. |
|
1689 | 1690 | pub fn offset(&self) -> usize { |
|
1690 | 1691 | let mut bytes = [0; 8]; |
|
1691 | 1692 | bytes[2..8].copy_from_slice(&self.bytes[0..=5]); |
|
1692 | 1693 | BigEndian::read_u64(&bytes[..]) as usize |
|
1693 | 1694 | } |
|
1694 | 1695 | pub fn raw_offset(&self) -> u64 { |
|
1695 | 1696 | BigEndian::read_u64(&self.bytes[0..8]) |
|
1696 | 1697 | } |
|
1697 | 1698 | |
|
1698 | 1699 | /// Same result (except potentially for rev 0) as C `index_get_start()` |
|
1699 | 1700 | fn c_start(&self) -> u64 { |
|
1700 | 1701 | self.raw_offset() >> 16 |
|
1701 | 1702 | } |
|
1702 | 1703 | |
|
1703 | 1704 | pub fn flags(&self) -> u16 { |
|
1704 | 1705 | BigEndian::read_u16(&self.bytes[6..=7]) |
|
1705 | 1706 | } |
|
1706 | 1707 | |
|
1707 | 1708 | /// Return the compressed length of the data. |
|
1708 | 1709 | pub fn compressed_len(&self) -> u32 { |
|
1709 | 1710 | BigEndian::read_u32(&self.bytes[8..=11]) |
|
1710 | 1711 | } |
|
1711 | 1712 | |
|
1712 | 1713 | /// Return the uncompressed length of the data. |
|
1713 | 1714 | pub fn uncompressed_len(&self) -> i32 { |
|
1714 | 1715 | BigEndian::read_i32(&self.bytes[12..=15]) |
|
1715 | 1716 | } |
|
1716 | 1717 | |
|
1717 | 1718 | /// Return the revision upon which the data has been derived. |
|
1718 | 1719 | pub fn base_revision_or_base_of_delta_chain(&self) -> UncheckedRevision { |
|
1719 | 1720 | // TODO Maybe return an Option when base_revision == rev? |
|
1720 | 1721 | // Requires to add rev to IndexEntry |
|
1721 | 1722 | |
|
1722 | 1723 | BigEndian::read_i32(&self.bytes[16..]).into() |
|
1723 | 1724 | } |
|
1724 | 1725 | |
|
1725 | 1726 | pub fn link_revision(&self) -> UncheckedRevision { |
|
1726 | 1727 | BigEndian::read_i32(&self.bytes[20..]).into() |
|
1727 | 1728 | } |
|
1728 | 1729 | |
|
1729 | 1730 | pub fn p1(&self) -> UncheckedRevision { |
|
1730 | 1731 | BigEndian::read_i32(&self.bytes[24..]).into() |
|
1731 | 1732 | } |
|
1732 | 1733 | |
|
1733 | 1734 | pub fn p2(&self) -> UncheckedRevision { |
|
1734 | 1735 | BigEndian::read_i32(&self.bytes[28..]).into() |
|
1735 | 1736 | } |
|
1736 | 1737 | |
|
1737 | 1738 | /// Return the hash of revision's full text. |
|
1738 | 1739 | /// |
|
1739 | 1740 | /// Currently, SHA-1 is used and only the first 20 bytes of this field |
|
1740 | 1741 | /// are used. |
|
1741 | 1742 | pub fn hash(&self) -> &'a Node { |
|
1742 | 1743 | (&self.bytes[32..52]).try_into().unwrap() |
|
1743 | 1744 | } |
|
1744 | 1745 | |
|
1745 | 1746 | pub fn as_bytes(&self) -> &'a [u8] { |
|
1746 | 1747 | self.bytes |
|
1747 | 1748 | } |
|
1748 | 1749 | } |
|
1749 | 1750 | |
|
1750 | 1751 | #[cfg(test)] |
|
1751 | 1752 | pub use tests::IndexEntryBuilder; |
|
1752 | 1753 | |
|
1753 | 1754 | #[cfg(test)] |
|
1754 | 1755 | mod tests { |
|
1755 | 1756 | use super::*; |
|
1756 | 1757 | use crate::node::NULL_NODE; |
|
1757 | 1758 | |
|
1758 | 1759 | #[cfg(test)] |
|
1759 | 1760 | #[derive(Debug, Copy, Clone)] |
|
1760 | 1761 | pub struct IndexEntryBuilder { |
|
1761 | 1762 | is_first: bool, |
|
1762 | 1763 | is_inline: bool, |
|
1763 | 1764 | is_general_delta: bool, |
|
1764 | 1765 | version: u16, |
|
1765 | 1766 | offset: usize, |
|
1766 | 1767 | compressed_len: usize, |
|
1767 | 1768 | uncompressed_len: usize, |
|
1768 | 1769 | base_revision_or_base_of_delta_chain: Revision, |
|
1769 | 1770 | link_revision: Revision, |
|
1770 | 1771 | p1: Revision, |
|
1771 | 1772 | p2: Revision, |
|
1772 | 1773 | node: Node, |
|
1773 | 1774 | } |
|
1774 | 1775 | |
|
1775 | 1776 | #[cfg(test)] |
|
1776 | 1777 | impl IndexEntryBuilder { |
|
1777 | 1778 | #[allow(clippy::new_without_default)] |
|
1778 | 1779 | pub fn new() -> Self { |
|
1779 | 1780 | Self { |
|
1780 | 1781 | is_first: false, |
|
1781 | 1782 | is_inline: false, |
|
1782 | 1783 | is_general_delta: true, |
|
1783 | 1784 | version: 1, |
|
1784 | 1785 | offset: 0, |
|
1785 | 1786 | compressed_len: 0, |
|
1786 | 1787 | uncompressed_len: 0, |
|
1787 | 1788 | base_revision_or_base_of_delta_chain: Revision(0), |
|
1788 | 1789 | link_revision: Revision(0), |
|
1789 | 1790 | p1: NULL_REVISION, |
|
1790 | 1791 | p2: NULL_REVISION, |
|
1791 | 1792 | node: NULL_NODE, |
|
1792 | 1793 | } |
|
1793 | 1794 | } |
|
1794 | 1795 | |
|
1795 | 1796 | pub fn is_first(&mut self, value: bool) -> &mut Self { |
|
1796 | 1797 | self.is_first = value; |
|
1797 | 1798 | self |
|
1798 | 1799 | } |
|
1799 | 1800 | |
|
1800 | 1801 | pub fn with_inline(&mut self, value: bool) -> &mut Self { |
|
1801 | 1802 | self.is_inline = value; |
|
1802 | 1803 | self |
|
1803 | 1804 | } |
|
1804 | 1805 | |
|
1805 | 1806 | pub fn with_general_delta(&mut self, value: bool) -> &mut Self { |
|
1806 | 1807 | self.is_general_delta = value; |
|
1807 | 1808 | self |
|
1808 | 1809 | } |
|
1809 | 1810 | |
|
1810 | 1811 | pub fn with_version(&mut self, value: u16) -> &mut Self { |
|
1811 | 1812 | self.version = value; |
|
1812 | 1813 | self |
|
1813 | 1814 | } |
|
1814 | 1815 | |
|
1815 | 1816 | pub fn with_offset(&mut self, value: usize) -> &mut Self { |
|
1816 | 1817 | self.offset = value; |
|
1817 | 1818 | self |
|
1818 | 1819 | } |
|
1819 | 1820 | |
|
1820 | 1821 | pub fn with_compressed_len(&mut self, value: usize) -> &mut Self { |
|
1821 | 1822 | self.compressed_len = value; |
|
1822 | 1823 | self |
|
1823 | 1824 | } |
|
1824 | 1825 | |
|
1825 | 1826 | pub fn with_uncompressed_len(&mut self, value: usize) -> &mut Self { |
|
1826 | 1827 | self.uncompressed_len = value; |
|
1827 | 1828 | self |
|
1828 | 1829 | } |
|
1829 | 1830 | |
|
1830 | 1831 | pub fn with_base_revision_or_base_of_delta_chain( |
|
1831 | 1832 | &mut self, |
|
1832 | 1833 | value: Revision, |
|
1833 | 1834 | ) -> &mut Self { |
|
1834 | 1835 | self.base_revision_or_base_of_delta_chain = value; |
|
1835 | 1836 | self |
|
1836 | 1837 | } |
|
1837 | 1838 | |
|
1838 | 1839 | pub fn with_link_revision(&mut self, value: Revision) -> &mut Self { |
|
1839 | 1840 | self.link_revision = value; |
|
1840 | 1841 | self |
|
1841 | 1842 | } |
|
1842 | 1843 | |
|
1843 | 1844 | pub fn with_p1(&mut self, value: Revision) -> &mut Self { |
|
1844 | 1845 | self.p1 = value; |
|
1845 | 1846 | self |
|
1846 | 1847 | } |
|
1847 | 1848 | |
|
1848 | 1849 | pub fn with_p2(&mut self, value: Revision) -> &mut Self { |
|
1849 | 1850 | self.p2 = value; |
|
1850 | 1851 | self |
|
1851 | 1852 | } |
|
1852 | 1853 | |
|
1853 | 1854 | pub fn with_node(&mut self, value: Node) -> &mut Self { |
|
1854 | 1855 | self.node = value; |
|
1855 | 1856 | self |
|
1856 | 1857 | } |
|
1857 | 1858 | |
|
1858 | 1859 | pub fn build(&self) -> Vec<u8> { |
|
1859 | 1860 | let mut bytes = Vec::with_capacity(INDEX_ENTRY_SIZE); |
|
1860 | 1861 | if self.is_first { |
|
1861 | 1862 | bytes.extend(match (self.is_general_delta, self.is_inline) { |
|
1862 | 1863 | (false, false) => [0u8, 0], |
|
1863 | 1864 | (false, true) => [0u8, 1], |
|
1864 | 1865 | (true, false) => [0u8, 2], |
|
1865 | 1866 | (true, true) => [0u8, 3], |
|
1866 | 1867 | }); |
|
1867 | 1868 | bytes.extend(self.version.to_be_bytes()); |
|
1868 | 1869 | // Remaining offset bytes. |
|
1869 | 1870 | bytes.extend([0u8; 2]); |
|
1870 | 1871 | } else { |
|
1871 | 1872 | // Offset stored on 48 bits (6 bytes) |
|
1872 | 1873 | bytes.extend(&(self.offset as u64).to_be_bytes()[2..]); |
|
1873 | 1874 | } |
|
1874 | 1875 | bytes.extend([0u8; 2]); // Revision flags. |
|
1875 | 1876 | bytes.extend((self.compressed_len as u32).to_be_bytes()); |
|
1876 | 1877 | bytes.extend((self.uncompressed_len as u32).to_be_bytes()); |
|
1877 | 1878 | bytes.extend( |
|
1878 | 1879 | self.base_revision_or_base_of_delta_chain.0.to_be_bytes(), |
|
1879 | 1880 | ); |
|
1880 | 1881 | bytes.extend(self.link_revision.0.to_be_bytes()); |
|
1881 | 1882 | bytes.extend(self.p1.0.to_be_bytes()); |
|
1882 | 1883 | bytes.extend(self.p2.0.to_be_bytes()); |
|
1883 | 1884 | bytes.extend(self.node.as_bytes()); |
|
1884 | 1885 | bytes.extend(vec![0u8; 12]); |
|
1885 | 1886 | bytes |
|
1886 | 1887 | } |
|
1887 | 1888 | } |
|
1888 | 1889 | |
|
1889 | 1890 | pub fn is_inline(index_bytes: &[u8]) -> bool { |
|
1890 | 1891 | IndexHeader::parse(index_bytes) |
|
1891 | 1892 | .expect("too short") |
|
1892 | 1893 | .unwrap() |
|
1893 | 1894 | .format_flags() |
|
1894 | 1895 | .is_inline() |
|
1895 | 1896 | } |
|
1896 | 1897 | |
|
1897 | 1898 | pub fn uses_generaldelta(index_bytes: &[u8]) -> bool { |
|
1898 | 1899 | IndexHeader::parse(index_bytes) |
|
1899 | 1900 | .expect("too short") |
|
1900 | 1901 | .unwrap() |
|
1901 | 1902 | .format_flags() |
|
1902 | 1903 | .uses_generaldelta() |
|
1903 | 1904 | } |
|
1904 | 1905 | |
|
1905 | 1906 | pub fn get_version(index_bytes: &[u8]) -> u16 { |
|
1906 | 1907 | IndexHeader::parse(index_bytes) |
|
1907 | 1908 | .expect("too short") |
|
1908 | 1909 | .unwrap() |
|
1909 | 1910 | .format_version() |
|
1910 | 1911 | } |
|
1911 | 1912 | |
|
1912 | 1913 | #[test] |
|
1913 | 1914 | fn flags_when_no_inline_flag_test() { |
|
1914 | 1915 | let bytes = IndexEntryBuilder::new() |
|
1915 | 1916 | .is_first(true) |
|
1916 | 1917 | .with_general_delta(false) |
|
1917 | 1918 | .with_inline(false) |
|
1918 | 1919 | .build(); |
|
1919 | 1920 | |
|
1920 | 1921 | assert!(!is_inline(&bytes)); |
|
1921 | 1922 | assert!(!uses_generaldelta(&bytes)); |
|
1922 | 1923 | } |
|
1923 | 1924 | |
|
1924 | 1925 | #[test] |
|
1925 | 1926 | fn flags_when_inline_flag_test() { |
|
1926 | 1927 | let bytes = IndexEntryBuilder::new() |
|
1927 | 1928 | .is_first(true) |
|
1928 | 1929 | .with_general_delta(false) |
|
1929 | 1930 | .with_inline(true) |
|
1930 | 1931 | .build(); |
|
1931 | 1932 | |
|
1932 | 1933 | assert!(is_inline(&bytes)); |
|
1933 | 1934 | assert!(!uses_generaldelta(&bytes)); |
|
1934 | 1935 | } |
|
1935 | 1936 | |
|
1936 | 1937 | #[test] |
|
1937 | 1938 | fn flags_when_inline_and_generaldelta_flags_test() { |
|
1938 | 1939 | let bytes = IndexEntryBuilder::new() |
|
1939 | 1940 | .is_first(true) |
|
1940 | 1941 | .with_general_delta(true) |
|
1941 | 1942 | .with_inline(true) |
|
1942 | 1943 | .build(); |
|
1943 | 1944 | |
|
1944 | 1945 | assert!(is_inline(&bytes)); |
|
1945 | 1946 | assert!(uses_generaldelta(&bytes)); |
|
1946 | 1947 | } |
|
1947 | 1948 | |
|
1948 | 1949 | #[test] |
|
1949 | 1950 | fn test_offset() { |
|
1950 | 1951 | let bytes = IndexEntryBuilder::new().with_offset(1).build(); |
|
1951 | 1952 | let entry = IndexEntry { bytes: &bytes }; |
|
1952 | 1953 | |
|
1953 | 1954 | assert_eq!(entry.offset(), 1) |
|
1954 | 1955 | } |
|
1955 | 1956 | |
|
1956 | 1957 | #[test] |
|
1957 | 1958 | fn test_compressed_len() { |
|
1958 | 1959 | let bytes = IndexEntryBuilder::new().with_compressed_len(1).build(); |
|
1959 | 1960 | let entry = IndexEntry { bytes: &bytes }; |
|
1960 | 1961 | |
|
1961 | 1962 | assert_eq!(entry.compressed_len(), 1) |
|
1962 | 1963 | } |
|
1963 | 1964 | |
|
1964 | 1965 | #[test] |
|
1965 | 1966 | fn test_uncompressed_len() { |
|
1966 | 1967 | let bytes = IndexEntryBuilder::new().with_uncompressed_len(1).build(); |
|
1967 | 1968 | let entry = IndexEntry { bytes: &bytes }; |
|
1968 | 1969 | |
|
1969 | 1970 | assert_eq!(entry.uncompressed_len(), 1) |
|
1970 | 1971 | } |
|
1971 | 1972 | |
|
1972 | 1973 | #[test] |
|
1973 | 1974 | fn test_base_revision_or_base_of_delta_chain() { |
|
1974 | 1975 | let bytes = IndexEntryBuilder::new() |
|
1975 | 1976 | .with_base_revision_or_base_of_delta_chain(Revision(1)) |
|
1976 | 1977 | .build(); |
|
1977 | 1978 | let entry = IndexEntry { bytes: &bytes }; |
|
1978 | 1979 | |
|
1979 | 1980 | assert_eq!(entry.base_revision_or_base_of_delta_chain(), 1.into()) |
|
1980 | 1981 | } |
|
1981 | 1982 | |
|
1982 | 1983 | #[test] |
|
1983 | 1984 | fn link_revision_test() { |
|
1984 | 1985 | let bytes = IndexEntryBuilder::new() |
|
1985 | 1986 | .with_link_revision(Revision(123)) |
|
1986 | 1987 | .build(); |
|
1987 | 1988 | |
|
1988 | 1989 | let entry = IndexEntry { bytes: &bytes }; |
|
1989 | 1990 | |
|
1990 | 1991 | assert_eq!(entry.link_revision(), 123.into()); |
|
1991 | 1992 | } |
|
1992 | 1993 | |
|
1993 | 1994 | #[test] |
|
1994 | 1995 | fn p1_test() { |
|
1995 | 1996 | let bytes = IndexEntryBuilder::new().with_p1(Revision(123)).build(); |
|
1996 | 1997 | |
|
1997 | 1998 | let entry = IndexEntry { bytes: &bytes }; |
|
1998 | 1999 | |
|
1999 | 2000 | assert_eq!(entry.p1(), 123.into()); |
|
2000 | 2001 | } |
|
2001 | 2002 | |
|
2002 | 2003 | #[test] |
|
2003 | 2004 | fn p2_test() { |
|
2004 | 2005 | let bytes = IndexEntryBuilder::new().with_p2(Revision(123)).build(); |
|
2005 | 2006 | |
|
2006 | 2007 | let entry = IndexEntry { bytes: &bytes }; |
|
2007 | 2008 | |
|
2008 | 2009 | assert_eq!(entry.p2(), 123.into()); |
|
2009 | 2010 | } |
|
2010 | 2011 | |
|
2011 | 2012 | #[test] |
|
2012 | 2013 | fn node_test() { |
|
2013 | 2014 | let node = Node::from_hex("0123456789012345678901234567890123456789") |
|
2014 | 2015 | .unwrap(); |
|
2015 | 2016 | let bytes = IndexEntryBuilder::new().with_node(node).build(); |
|
2016 | 2017 | |
|
2017 | 2018 | let entry = IndexEntry { bytes: &bytes }; |
|
2018 | 2019 | |
|
2019 | 2020 | assert_eq!(*entry.hash(), node); |
|
2020 | 2021 | } |
|
2021 | 2022 | |
|
2022 | 2023 | #[test] |
|
2023 | 2024 | fn version_test() { |
|
2024 | 2025 | let bytes = IndexEntryBuilder::new() |
|
2025 | 2026 | .is_first(true) |
|
2026 | 2027 | .with_version(2) |
|
2027 | 2028 | .build(); |
|
2028 | 2029 | |
|
2029 | 2030 | assert_eq!(get_version(&bytes), 2) |
|
2030 | 2031 | } |
|
2031 | 2032 | } |
General Comments 0
You need to be logged in to leave comments.
Login now