Show More
@@ -1,5 +1,6 b'' | |||
|
1 | 1 | use std::fmt::Debug; |
|
2 | 2 | use std::ops::Deref; |
|
3 | use std::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; | |
|
3 | 4 | |
|
4 | 5 | use byteorder::{BigEndian, ByteOrder}; |
|
5 | 6 | use bytes_cast::{unaligned, BytesCast}; |
@@ -225,8 +226,9 b' pub struct Index {' | |||
|
225 | 226 | bytes: IndexData, |
|
226 | 227 | /// Offsets of starts of index blocks. |
|
227 | 228 | /// Only needed when the index is interleaved with data. |
|
228 | offsets: Option<Vec<usize>>, | |
|
229 | offsets: RwLock<Option<Vec<usize>>>, | |
|
229 | 230 | uses_generaldelta: bool, |
|
231 | is_inline: bool, | |
|
230 | 232 | } |
|
231 | 233 | |
|
232 | 234 | impl Debug for Index { |
@@ -294,8 +296,9 b' impl Index {' | |||
|
294 | 296 | if offset == bytes.len() { |
|
295 | 297 | Ok(Self { |
|
296 | 298 | bytes: IndexData::new(bytes), |
|
297 | offsets: Some(offsets), | |
|
299 | offsets: RwLock::new(Some(offsets)), | |
|
298 | 300 | uses_generaldelta, |
|
301 | is_inline: true, | |
|
299 | 302 | }) |
|
300 | 303 | } else { |
|
301 | 304 | Err(HgError::corrupted("unexpected inline revlog length")) |
@@ -303,8 +306,9 b' impl Index {' | |||
|
303 | 306 | } else { |
|
304 | 307 | Ok(Self { |
|
305 | 308 | bytes: IndexData::new(bytes), |
|
306 | offsets: None, | |
|
309 | offsets: RwLock::new(None), | |
|
307 | 310 | uses_generaldelta, |
|
311 | is_inline: false, | |
|
308 | 312 | }) |
|
309 | 313 | } |
|
310 | 314 | } |
@@ -315,7 +319,7 b' impl Index {' | |||
|
315 | 319 | |
|
316 | 320 | /// Value of the inline flag. |
|
317 | 321 | pub fn is_inline(&self) -> bool { |
|
318 |
self. |
|
|
322 | self.is_inline | |
|
319 | 323 | } |
|
320 | 324 | |
|
321 | 325 | /// Return a slice of bytes if `revlog` is inline. Panic if not. |
@@ -328,13 +332,35 b' impl Index {' | |||
|
328 | 332 | |
|
329 | 333 | /// Return number of entries of the revlog index. |
|
330 | 334 | pub fn len(&self) -> usize { |
|
331 | if let Some(offsets) = &self.offsets { | |
|
335 | if let Some(offsets) = &*self.get_offsets() { | |
|
332 | 336 | offsets.len() |
|
333 | 337 | } else { |
|
334 | 338 | self.bytes.len() / INDEX_ENTRY_SIZE |
|
335 | 339 | } |
|
336 | 340 | } |
|
337 | 341 | |
|
342 | pub fn get_offsets(&self) -> RwLockReadGuard<Option<Vec<usize>>> { | |
|
343 | if self.is_inline() { | |
|
344 | { | |
|
345 | // Wrap in a block to drop the read guard | |
|
346 | // TODO perf? | |
|
347 | let mut offsets = self.offsets.write().unwrap(); | |
|
348 | if offsets.is_none() { | |
|
349 | offsets.replace(inline_scan(&self.bytes.bytes).1); | |
|
350 | } | |
|
351 | } | |
|
352 | } | |
|
353 | self.offsets.read().unwrap() | |
|
354 | } | |
|
355 | ||
|
356 | pub fn get_offsets_mut(&mut self) -> RwLockWriteGuard<Option<Vec<usize>>> { | |
|
357 | let mut offsets = self.offsets.write().unwrap(); | |
|
358 | if self.is_inline() && offsets.is_none() { | |
|
359 | offsets.replace(inline_scan(&self.bytes.bytes).1); | |
|
360 | } | |
|
361 | offsets | |
|
362 | } | |
|
363 | ||
|
338 | 364 | /// Returns `true` if the `Index` has zero `entries`. |
|
339 | 365 | pub fn is_empty(&self) -> bool { |
|
340 | 366 | self.len() == 0 |
@@ -346,8 +372,8 b' impl Index {' | |||
|
346 | 372 | if rev == NULL_REVISION { |
|
347 | 373 | return None; |
|
348 | 374 | } |
|
349 | Some(if let Some(offsets) = &self.offsets { | |
|
350 | self.get_entry_inline(rev, offsets) | |
|
375 | Some(if let Some(offsets) = &*self.get_offsets() { | |
|
376 | self.get_entry_inline(rev, offsets.as_ref()) | |
|
351 | 377 | } else { |
|
352 | 378 | self.get_entry_separated(rev) |
|
353 | 379 | }) |
@@ -393,7 +419,7 b' impl Index {' | |||
|
393 | 419 | ) -> Result<(), RevlogError> { |
|
394 | 420 | revision_data.validate()?; |
|
395 | 421 | let new_offset = self.bytes.len(); |
|
396 |
if let Some(offsets) = self.offset |
|
|
422 | if let Some(offsets) = &mut *self.get_offsets_mut() { | |
|
397 | 423 | offsets.push(new_offset) |
|
398 | 424 | } |
|
399 | 425 | self.bytes.added.extend(revision_data.into_v1().as_bytes()); |
@@ -401,12 +427,37 b' impl Index {' | |||
|
401 | 427 | } |
|
402 | 428 | |
|
403 | 429 | pub fn remove(&mut self, rev: Revision) -> Result<(), RevlogError> { |
|
404 | self.bytes.remove(rev, self.offsets.as_deref())?; | |
|
405 | if let Some(offsets) = self.offsets.as_mut() { | |
|
430 | let offsets = self.get_offsets().clone(); | |
|
431 | self.bytes.remove(rev, offsets.as_deref())?; | |
|
432 | if let Some(offsets) = &mut *self.get_offsets_mut() { | |
|
406 | 433 | offsets.truncate(rev.0 as usize) |
|
407 | 434 | } |
|
408 | 435 | Ok(()) |
|
409 | 436 | } |
|
437 | ||
|
438 | pub fn clear_caches(&mut self) { | |
|
439 | // We need to get the 'inline' value from Python at init and use this | |
|
440 | // instead of offsets to determine whether we're inline since we might | |
|
441 | // clear caches. This implies re-populating the offsets on-demand. | |
|
442 | self.offsets = RwLock::new(None); | |
|
443 | } | |
|
444 | } | |
|
445 | ||
|
446 | fn inline_scan(bytes: &[u8]) -> (usize, Vec<usize>) { | |
|
447 | let mut offset: usize = 0; | |
|
448 | let mut offsets = Vec::new(); | |
|
449 | ||
|
450 | while offset + INDEX_ENTRY_SIZE <= bytes.len() { | |
|
451 | offsets.push(offset); | |
|
452 | let end = offset + INDEX_ENTRY_SIZE; | |
|
453 | let entry = IndexEntry { | |
|
454 | bytes: &bytes[offset..end], | |
|
455 | offset_override: None, | |
|
456 | }; | |
|
457 | ||
|
458 | offset += INDEX_ENTRY_SIZE + entry.compressed_len() as usize; | |
|
459 | } | |
|
460 | (offset, offsets) | |
|
410 | 461 | } |
|
411 | 462 | |
|
412 | 463 | impl super::RevlogIndex for Index { |
@@ -205,6 +205,7 b' py_class!(pub class MixedIndex |py| {' | |||
|
205 | 205 | self.nt(py).borrow_mut().take(); |
|
206 | 206 | self.docket(py).borrow_mut().take(); |
|
207 | 207 | self.nodemap_mmap(py).borrow_mut().take(); |
|
208 | self.index(py).borrow_mut().clear_caches(); | |
|
208 | 209 | self.call_cindex(py, "clearcaches", args, kw) |
|
209 | 210 | } |
|
210 | 211 |
General Comments 0
You need to be logged in to leave comments.
Login now