##// END OF EJS Templates
rust-revlog: rename `start` to `data_start`...
Raphaël Gomès -
r53193:e2319309 default
parent child Browse files
Show More
@@ -1,1351 +1,1351
1 1 //! A layer of lower-level revlog functionality to encapsulate most of the
2 2 //! IO work and expensive operations.
3 3 use std::{
4 4 borrow::Cow,
5 5 cell::RefCell,
6 6 io::{ErrorKind, Seek, SeekFrom, Write},
7 7 ops::Deref,
8 8 path::PathBuf,
9 9 sync::{Arc, Mutex},
10 10 };
11 11
12 12 use schnellru::{ByMemoryUsage, LruMap};
13 13 use sha1::{Digest, Sha1};
14 14
15 15 use crate::{
16 16 errors::{HgError, IoResultExt},
17 17 exit_codes,
18 18 transaction::Transaction,
19 19 vfs::Vfs,
20 20 };
21 21
22 22 use super::{
23 23 compression::{
24 24 uncompressed_zstd_data, CompressionConfig, Compressor, NoneCompressor,
25 25 ZlibCompressor, ZstdCompressor, ZLIB_BYTE, ZSTD_BYTE,
26 26 },
27 27 file_io::{DelayedBuffer, FileHandle, RandomAccessFile, WriteHandles},
28 28 index::{Index, IndexHeader, INDEX_ENTRY_SIZE},
29 29 node::{NODE_BYTES_LENGTH, NULL_NODE},
30 30 options::{RevlogDataConfig, RevlogDeltaConfig, RevlogFeatureConfig},
31 31 BaseRevision, Node, Revision, RevlogEntry, RevlogError, RevlogIndex,
32 32 UncheckedRevision, NULL_REVISION, NULL_REVLOG_ENTRY_FLAGS,
33 33 };
34 34
35 35 /// Matches the `_InnerRevlog` class in the Python code, as an arbitrary
36 36 /// boundary to incrementally rewrite higher-level revlog functionality in
37 37 /// Rust.
38 38 pub struct InnerRevlog {
39 39 /// When index and data are not interleaved: bytes of the revlog index.
40 40 /// When index and data are interleaved (inline revlog): bytes of the
41 41 /// revlog index and data.
42 42 pub index: Index,
43 43 /// The store vfs that is used to interact with the filesystem
44 44 vfs: Box<dyn Vfs>,
45 45 /// The index file path, relative to the vfs root
46 46 pub index_file: PathBuf,
47 47 /// The data file path, relative to the vfs root (same as `index_file`
48 48 /// if inline)
49 49 data_file: PathBuf,
50 50 /// Data config that applies to this revlog
51 51 data_config: RevlogDataConfig,
52 52 /// Delta config that applies to this revlog
53 53 delta_config: RevlogDeltaConfig,
54 54 /// Feature config that applies to this revlog
55 55 feature_config: RevlogFeatureConfig,
56 56 /// A view into this revlog's data file
57 57 segment_file: RandomAccessFile,
58 58 /// A cache of uncompressed chunks that have previously been restored.
59 59 /// Its eviction policy is defined in [`Self::new`].
60 60 uncompressed_chunk_cache: Option<UncompressedChunkCache>,
61 61 /// Used to keep track of the actual target during diverted writes
62 62 /// for the changelog
63 63 original_index_file: Option<PathBuf>,
64 64 /// Write handles to the index and data files
65 65 /// XXX why duplicate from `index` and `segment_file`?
66 66 writing_handles: Option<WriteHandles>,
67 67 /// See [`DelayedBuffer`].
68 68 delayed_buffer: Option<Arc<Mutex<DelayedBuffer>>>,
69 69 /// Whether this revlog is inline. XXX why duplicate from `index`?
70 70 pub inline: bool,
71 71 /// A cache of the last revision, which is usually accessed multiple
72 72 /// times.
73 73 pub last_revision_cache: Mutex<Option<SingleRevisionCache>>,
74 74 }
75 75
76 76 impl InnerRevlog {
77 77 pub fn new(
78 78 vfs: Box<dyn Vfs>,
79 79 index: Index,
80 80 index_file: PathBuf,
81 81 data_file: PathBuf,
82 82 data_config: RevlogDataConfig,
83 83 delta_config: RevlogDeltaConfig,
84 84 feature_config: RevlogFeatureConfig,
85 85 ) -> Self {
86 86 assert!(index_file.is_relative());
87 87 assert!(data_file.is_relative());
88 88 let segment_file = RandomAccessFile::new(
89 89 dyn_clone::clone_box(&*vfs),
90 90 if index.is_inline() {
91 91 index_file.to_owned()
92 92 } else {
93 93 data_file.to_owned()
94 94 },
95 95 );
96 96
97 97 let uncompressed_chunk_cache =
98 98 data_config.uncompressed_cache_factor.map(
99 99 // Arbitrary initial value
100 100 // TODO check if using a hasher specific to integers is useful
101 101 |_factor| RefCell::new(LruMap::with_memory_budget(65536)),
102 102 );
103 103
104 104 let inline = index.is_inline();
105 105 Self {
106 106 index,
107 107 vfs,
108 108 index_file,
109 109 data_file,
110 110 data_config,
111 111 delta_config,
112 112 feature_config,
113 113 segment_file,
114 114 uncompressed_chunk_cache,
115 115 original_index_file: None,
116 116 writing_handles: None,
117 117 delayed_buffer: None,
118 118 inline,
119 119 last_revision_cache: Mutex::new(None),
120 120 }
121 121 }
122 122
123 123 /// Return number of entries of the revlog index
124 124 pub fn len(&self) -> usize {
125 125 self.index.len()
126 126 }
127 127
128 128 /// Return `true` if this revlog has no entries
129 129 pub fn is_empty(&self) -> bool {
130 130 self.len() == 0
131 131 }
132 132
133 133 /// Return whether this revlog is inline (mixed index and data)
134 134 pub fn is_inline(&self) -> bool {
135 135 self.inline
136 136 }
137 137
138 138 /// Clear all caches from this revlog
139 139 pub fn clear_cache(&mut self) {
140 140 assert!(!self.is_delaying());
141 141 if let Some(cache) = self.uncompressed_chunk_cache.as_ref() {
142 142 // We don't clear the allocation here because it's probably faster.
143 143 // We could change our minds later if this ends up being a problem
144 144 // with regards to memory consumption.
145 145 cache.borrow_mut().clear();
146 146 }
147 147 }
148 148
149 149 /// Return an entry for the null revision
150 150 pub fn make_null_entry(&self) -> RevlogEntry {
151 151 RevlogEntry {
152 152 revlog: self,
153 153 rev: NULL_REVISION,
154 154 uncompressed_len: 0,
155 155 p1: NULL_REVISION,
156 156 p2: NULL_REVISION,
157 157 flags: NULL_REVLOG_ENTRY_FLAGS,
158 158 hash: NULL_NODE,
159 159 }
160 160 }
161 161
162 162 /// Return the [`RevlogEntry`] for a [`Revision`] that is known to exist
163 163 pub fn get_entry(
164 164 &self,
165 165 rev: Revision,
166 166 ) -> Result<RevlogEntry, RevlogError> {
167 167 if rev == NULL_REVISION {
168 168 return Ok(self.make_null_entry());
169 169 }
170 170 let index_entry = self
171 171 .index
172 172 .get_entry(rev)
173 173 .ok_or_else(|| RevlogError::InvalidRevision(rev.to_string()))?;
174 174 let p1 =
175 175 self.index.check_revision(index_entry.p1()).ok_or_else(|| {
176 176 RevlogError::corrupted(format!(
177 177 "p1 for rev {} is invalid",
178 178 rev
179 179 ))
180 180 })?;
181 181 let p2 =
182 182 self.index.check_revision(index_entry.p2()).ok_or_else(|| {
183 183 RevlogError::corrupted(format!(
184 184 "p2 for rev {} is invalid",
185 185 rev
186 186 ))
187 187 })?;
188 188 let entry = RevlogEntry {
189 189 revlog: self,
190 190 rev,
191 191 uncompressed_len: index_entry.uncompressed_len(),
192 192 p1,
193 193 p2,
194 194 flags: index_entry.flags(),
195 195 hash: *index_entry.hash(),
196 196 };
197 197 Ok(entry)
198 198 }
199 199
200 200 /// Return the [`RevlogEntry`] for `rev`. If `rev` fails to check, this
201 201 /// returns a [`RevlogError`].
202 202 pub fn get_entry_for_unchecked_rev(
203 203 &self,
204 204 rev: UncheckedRevision,
205 205 ) -> Result<RevlogEntry, RevlogError> {
206 206 if rev == NULL_REVISION.into() {
207 207 return Ok(self.make_null_entry());
208 208 }
209 209 let rev = self.index.check_revision(rev).ok_or_else(|| {
210 210 RevlogError::corrupted(format!("rev {} is invalid", rev))
211 211 })?;
212 212 self.get_entry(rev)
213 213 }
214 214
215 215 /// Is the revlog currently delaying the visibility of written data?
216 216 ///
217 217 /// The delaying mechanism can be either in-memory or written on disk in a
218 218 /// side-file.
219 219 pub fn is_delaying(&self) -> bool {
220 220 self.delayed_buffer.is_some() || self.original_index_file.is_some()
221 221 }
222 222
223 223 /// The offset of the data chunk for this revision
224 224 #[inline(always)]
225 pub fn start(&self, rev: Revision) -> usize {
225 pub fn data_start(&self, rev: Revision) -> usize {
226 226 self.index.start(
227 227 rev,
228 228 &self
229 229 .index
230 230 .get_entry(rev)
231 231 .unwrap_or_else(|| self.index.make_null_entry()),
232 232 )
233 233 }
234 234
235 235 /// The length of the data chunk for this revision
236 236 #[inline(always)]
237 237 pub fn data_compressed_length(&self, rev: Revision) -> usize {
238 238 self.index
239 239 .get_entry(rev)
240 240 .unwrap_or_else(|| self.index.make_null_entry())
241 241 .compressed_len() as usize
242 242 }
243 243
244 244 /// The end of the data chunk for this revision
245 245 #[inline(always)]
246 246 pub fn end(&self, rev: Revision) -> usize {
247 self.start(rev) + self.data_compressed_length(rev)
247 self.data_start(rev) + self.data_compressed_length(rev)
248 248 }
249 249
250 250 /// Return the delta parent of the given revision
251 251 pub fn delta_parent(&self, rev: Revision) -> Revision {
252 252 let base = self
253 253 .index
254 254 .get_entry(rev)
255 255 .unwrap()
256 256 .base_revision_or_base_of_delta_chain();
257 257 if base.0 == rev.0 {
258 258 NULL_REVISION
259 259 } else if self.delta_config.general_delta {
260 260 Revision(base.0)
261 261 } else {
262 262 Revision(rev.0 - 1)
263 263 }
264 264 }
265 265
266 266 /// Return whether `rev` points to a snapshot revision (i.e. does not have
267 267 /// a delta base).
268 268 pub fn is_snapshot(&self, rev: Revision) -> Result<bool, RevlogError> {
269 269 if !self.delta_config.sparse_revlog {
270 270 return Ok(self.delta_parent(rev) == NULL_REVISION);
271 271 }
272 272 self.index.is_snapshot_unchecked(rev)
273 273 }
274 274
275 275 /// Return the delta chain for `rev` according to this revlog's config.
276 276 /// See [`Index::delta_chain`] for more information.
277 277 pub fn delta_chain(
278 278 &self,
279 279 rev: Revision,
280 280 stop_rev: Option<Revision>,
281 281 ) -> Result<(Vec<Revision>, bool), HgError> {
282 282 self.index.delta_chain(
283 283 rev,
284 284 stop_rev,
285 285 self.delta_config.general_delta.into(),
286 286 )
287 287 }
288 288
289 289 fn compressor(&self) -> Result<Box<dyn Compressor>, HgError> {
290 290 // TODO cache the compressor?
291 291 Ok(match self.feature_config.compression_engine {
292 292 CompressionConfig::Zlib { level } => {
293 293 Box::new(ZlibCompressor::new(level))
294 294 }
295 295 CompressionConfig::Zstd { level, threads } => {
296 296 Box::new(ZstdCompressor::new(level, threads))
297 297 }
298 298 CompressionConfig::None => Box::new(NoneCompressor),
299 299 })
300 300 }
301 301
302 302 /// Generate a possibly-compressed representation of data.
303 303 /// Returns `None` if the data was not compressed.
304 304 pub fn compress<'data>(
305 305 &self,
306 306 data: &'data [u8],
307 307 ) -> Result<Option<Cow<'data, [u8]>>, RevlogError> {
308 308 if data.is_empty() {
309 309 return Ok(Some(data.into()));
310 310 }
311 311 let res = self.compressor()?.compress(data)?;
312 312 if let Some(compressed) = res {
313 313 // The revlog compressor added the header in the returned data.
314 314 return Ok(Some(compressed.into()));
315 315 }
316 316
317 317 if data[0] == b'\0' {
318 318 return Ok(Some(data.into()));
319 319 }
320 320 Ok(None)
321 321 }
322 322
323 323 /// Decompress a revlog chunk.
324 324 ///
325 325 /// The chunk is expected to begin with a header identifying the
326 326 /// format type so it can be routed to an appropriate decompressor.
327 327 pub fn decompress<'a>(
328 328 &'a self,
329 329 data: &'a [u8],
330 330 ) -> Result<Cow<[u8]>, RevlogError> {
331 331 if data.is_empty() {
332 332 return Ok(data.into());
333 333 }
334 334
335 335 // Revlogs are read much more frequently than they are written and many
336 336 // chunks only take microseconds to decompress, so performance is
337 337 // important here.
338 338
339 339 let header = data[0];
340 340 match header {
341 341 // Settings don't matter as they only affect compression
342 342 ZLIB_BYTE => Ok(ZlibCompressor::new(0).decompress(data)?.into()),
343 343 // Settings don't matter as they only affect compression
344 344 ZSTD_BYTE => {
345 345 Ok(ZstdCompressor::new(0, 0).decompress(data)?.into())
346 346 }
347 347 b'\0' => Ok(data.into()),
348 348 b'u' => Ok((&data[1..]).into()),
349 349 other => Err(HgError::UnsupportedFeature(format!(
350 350 "unknown compression header '{}'",
351 351 other
352 352 ))
353 353 .into()),
354 354 }
355 355 }
356 356
357 357 /// Obtain a segment of raw data corresponding to a range of revisions.
358 358 ///
359 359 /// Requests for data may be satisfied by a cache.
360 360 ///
361 361 /// Returns a 2-tuple of (offset, data) for the requested range of
362 362 /// revisions. Offset is the integer offset from the beginning of the
363 363 /// revlog and data is a slice of the raw byte data.
364 364 ///
365 365 /// Callers will need to call `self.start(rev)` and `self.length(rev)`
366 366 /// to determine where each revision's data begins and ends.
367 367 pub fn get_segment_for_revs(
368 368 &self,
369 369 start_rev: Revision,
370 370 end_rev: Revision,
371 371 ) -> Result<(usize, Vec<u8>), HgError> {
372 372 let start = if start_rev == NULL_REVISION {
373 373 0
374 374 } else {
375 375 let start_entry = self
376 376 .index
377 377 .get_entry(start_rev)
378 378 .expect("null revision segment");
379 379 self.index.start(start_rev, &start_entry)
380 380 };
381 381 let end_entry = self
382 382 .index
383 383 .get_entry(end_rev)
384 384 .expect("null revision segment");
385 385 let end = self.index.start(end_rev, &end_entry)
386 386 + self.data_compressed_length(end_rev);
387 387
388 388 let length = end - start;
389 389
390 390 // XXX should we use mmap instead of doing this for platforms that
391 391 // support madvise/populate?
392 392 Ok((start, self.segment_file.read_chunk(start, length)?))
393 393 }
394 394
395 395 /// Return the uncompressed raw data for `rev`
396 396 pub fn chunk_for_rev(&self, rev: Revision) -> Result<Arc<[u8]>, HgError> {
397 397 if let Some(cache) = self.uncompressed_chunk_cache.as_ref() {
398 398 if let Some(chunk) = cache.borrow_mut().get(&rev) {
399 399 return Ok(chunk.clone());
400 400 }
401 401 }
402 402 // TODO revlogv2 should check the compression mode
403 403 let data = self.get_segment_for_revs(rev, rev)?.1;
404 404 let uncompressed = self.decompress(&data).map_err(|e| {
405 405 HgError::abort(
406 406 format!("revlog decompression error: {}", e),
407 407 exit_codes::ABORT,
408 408 None,
409 409 )
410 410 })?;
411 411 let uncompressed: Arc<[u8]> = Arc::from(uncompressed.into_owned());
412 412 if let Some(cache) = self.uncompressed_chunk_cache.as_ref() {
413 413 cache.borrow_mut().insert(rev, uncompressed.clone());
414 414 }
415 415 Ok(uncompressed)
416 416 }
417 417
418 418 /// Execute `func` within a read context for the data file, meaning that
419 419 /// the read handle will be taken and discarded after the operation.
420 420 pub fn with_read<R>(
421 421 &self,
422 422 func: impl FnOnce() -> Result<R, RevlogError>,
423 423 ) -> Result<R, RevlogError> {
424 424 self.enter_reading_context()?;
425 425 let res = func();
426 426 self.exit_reading_context();
427 427 res.map_err(Into::into)
428 428 }
429 429
430 430 /// `pub` only for use in hg-cpython
431 431 #[doc(hidden)]
432 432 pub fn enter_reading_context(&self) -> Result<(), HgError> {
433 433 if self.is_empty() {
434 434 // Nothing to be read
435 435 return Ok(());
436 436 }
437 437 if self.delayed_buffer.is_some() && self.is_inline() {
438 438 return Err(HgError::abort(
439 439 "revlog with delayed write should not be inline",
440 440 exit_codes::ABORT,
441 441 None,
442 442 ));
443 443 }
444 444 self.segment_file.get_read_handle()?;
445 445 Ok(())
446 446 }
447 447
448 448 /// `pub` only for use in hg-cpython
449 449 #[doc(hidden)]
450 450 pub fn exit_reading_context(&self) {
451 451 self.segment_file.exit_reading_context()
452 452 }
453 453
454 454 /// Fill the buffer returned by `get_buffer` with the possibly un-validated
455 455 /// raw text for a revision. It can be already validated if it comes
456 456 /// from the cache.
457 457 pub fn raw_text<G, T>(
458 458 &self,
459 459 rev: Revision,
460 460 get_buffer: G,
461 461 ) -> Result<(), RevlogError>
462 462 where
463 463 G: FnOnce(
464 464 usize,
465 465 &mut dyn FnMut(
466 466 &mut dyn RevisionBuffer<Target = T>,
467 467 ) -> Result<(), RevlogError>,
468 468 ) -> Result<(), RevlogError>,
469 469 {
470 470 let entry = &self.get_entry(rev)?;
471 471 let raw_size = entry.uncompressed_len();
472 472 let mut mutex_guard = self
473 473 .last_revision_cache
474 474 .lock()
475 475 .expect("lock should not be held");
476 476 let cached_rev = if let Some((_node, rev, data)) = &*mutex_guard {
477 477 Some((*rev, data.deref().as_ref()))
478 478 } else {
479 479 None
480 480 };
481 481 if let Some(cache) = &self.uncompressed_chunk_cache {
482 482 let cache = &mut cache.borrow_mut();
483 483 if let Some(size) = raw_size {
484 484 // Dynamically update the uncompressed_chunk_cache size to the
485 485 // largest revision we've seen in this revlog.
486 486 // Do it *before* restoration in case the current revision
487 487 // is the largest.
488 488 let factor = self
489 489 .data_config
490 490 .uncompressed_cache_factor
491 491 .expect("cache should not exist without factor");
492 492 let candidate_size = (size as f64 * factor) as usize;
493 493 let limiter_mut = cache.limiter_mut();
494 494 if candidate_size > limiter_mut.max_memory_usage() {
495 495 std::mem::swap(
496 496 limiter_mut,
497 497 &mut ByMemoryUsage::new(candidate_size),
498 498 );
499 499 }
500 500 }
501 501 }
502 502 entry.rawdata(cached_rev, get_buffer)?;
503 503 // drop cache to save memory, the caller is expected to update
504 504 // the revision cache after validating the text
505 505 mutex_guard.take();
506 506 Ok(())
507 507 }
508 508
509 509 /// Only `pub` for `hg-cpython`.
510 510 /// Obtain decompressed raw data for the specified revisions that are
511 511 /// assumed to be in ascending order.
512 512 ///
513 513 /// Returns a list with decompressed data for each requested revision.
514 514 #[doc(hidden)]
515 515 pub fn chunks(
516 516 &self,
517 517 revs: Vec<Revision>,
518 518 target_size: Option<u64>,
519 519 ) -> Result<Vec<Arc<[u8]>>, RevlogError> {
520 520 if revs.is_empty() {
521 521 return Ok(vec![]);
522 522 }
523 523 let mut fetched_revs = vec![];
524 524 let mut chunks = Vec::with_capacity(revs.len());
525 525
526 526 match self.uncompressed_chunk_cache.as_ref() {
527 527 Some(cache) => {
528 528 let mut cache = cache.borrow_mut();
529 529 for rev in revs.iter() {
530 530 match cache.get(rev) {
531 531 Some(hit) => chunks.push((*rev, hit.to_owned())),
532 532 None => fetched_revs.push(*rev),
533 533 }
534 534 }
535 535 }
536 536 None => fetched_revs = revs,
537 537 }
538 538
539 539 let already_cached = chunks.len();
540 540
541 541 let sliced_chunks = if fetched_revs.is_empty() {
542 542 vec![]
543 543 } else if !self.data_config.with_sparse_read || self.is_inline() {
544 544 vec![fetched_revs]
545 545 } else {
546 546 self.slice_chunk(&fetched_revs, target_size)?
547 547 };
548 548
549 549 self.with_read(|| {
550 550 for revs_chunk in sliced_chunks {
551 551 let first_rev = revs_chunk[0];
552 552 // Skip trailing revisions with empty diff
553 553 let last_rev_idx = revs_chunk
554 554 .iter()
555 555 .rposition(|r| self.data_compressed_length(*r) != 0)
556 556 .unwrap_or(revs_chunk.len() - 1);
557 557
558 558 let last_rev = revs_chunk[last_rev_idx];
559 559
560 560 let (offset, data) =
561 561 self.get_segment_for_revs(first_rev, last_rev)?;
562 562
563 563 let revs_chunk = &revs_chunk[..=last_rev_idx];
564 564
565 565 for rev in revs_chunk {
566 let chunk_start = self.start(*rev);
566 let chunk_start = self.data_start(*rev);
567 567 let chunk_length = self.data_compressed_length(*rev);
568 568 // TODO revlogv2 should check the compression mode
569 569 let bytes = &data[chunk_start - offset..][..chunk_length];
570 570 let chunk = if !bytes.is_empty() && bytes[0] == ZSTD_BYTE {
571 571 // If we're using `zstd`, we want to try a more
572 572 // specialized decompression
573 573 let entry = self.index.get_entry(*rev).unwrap();
574 574 let is_delta = entry
575 575 .base_revision_or_base_of_delta_chain()
576 576 != (*rev).into();
577 577 let uncompressed = uncompressed_zstd_data(
578 578 bytes,
579 579 is_delta,
580 580 entry.uncompressed_len(),
581 581 )?;
582 582 Cow::Owned(uncompressed)
583 583 } else {
584 584 // Otherwise just fallback to generic decompression.
585 585 self.decompress(bytes)?
586 586 };
587 587
588 588 chunks.push((*rev, chunk.into()));
589 589 }
590 590 }
591 591 Ok(())
592 592 })?;
593 593
594 594 if let Some(cache) = self.uncompressed_chunk_cache.as_ref() {
595 595 let mut cache = cache.borrow_mut();
596 596 for (rev, chunk) in chunks.iter().skip(already_cached) {
597 597 cache.insert(*rev, chunk.clone());
598 598 }
599 599 }
600 600 // Use stable sort here since it's *mostly* sorted
601 601 chunks.sort_by(|a, b| a.0.cmp(&b.0));
602 602 Ok(chunks.into_iter().map(|(_r, chunk)| chunk).collect())
603 603 }
604 604
605 605 /// Slice revs to reduce the amount of unrelated data to be read from disk.
606 606 ///
607 607 /// ``revs`` is sliced into groups that should be read in one time.
608 608 /// Assume that revs are sorted.
609 609 ///
610 610 /// The initial chunk is sliced until the overall density
611 611 /// (payload/chunks-span ratio) is above
612 612 /// `revlog.data_config.sr_density_threshold`.
613 613 /// No gap smaller than `revlog.data_config.sr_min_gap_size` is skipped.
614 614 ///
615 615 /// If `target_size` is set, no chunk larger than `target_size`
616 616 /// will be returned.
617 617 /// For consistency with other slicing choices, this limit won't go lower
618 618 /// than `revlog.data_config.sr_min_gap_size`.
619 619 ///
620 620 /// If individual revision chunks are larger than this limit, they will
621 621 /// still be raised individually.
622 622 pub fn slice_chunk(
623 623 &self,
624 624 revs: &[Revision],
625 625 target_size: Option<u64>,
626 626 ) -> Result<Vec<Vec<Revision>>, RevlogError> {
627 627 let target_size =
628 628 target_size.map(|size| size.max(self.data_config.sr_min_gap_size));
629 629
630 630 let target_density = self.data_config.sr_density_threshold;
631 631 let min_gap_size = self.data_config.sr_min_gap_size as usize;
632 632 let to_density = self.index.slice_chunk_to_density(
633 633 revs,
634 634 target_density,
635 635 min_gap_size,
636 636 );
637 637
638 638 let mut sliced = vec![];
639 639
640 640 for chunk in to_density {
641 641 sliced.extend(
642 642 self.slice_chunk_to_size(&chunk, target_size)?
643 643 .into_iter()
644 644 .map(ToOwned::to_owned),
645 645 );
646 646 }
647 647
648 648 Ok(sliced)
649 649 }
650 650
651 651 /// Slice revs to match the target size
652 652 ///
653 653 /// This is intended to be used on chunks that density slicing selected,
654 654 /// but that are still too large compared to the read guarantee of revlogs.
655 655 /// This might happen when the "minimal gap size" interrupted the slicing
656 656 /// or when chains are built in a way that create large blocks next to
657 657 /// each other.
658 658 fn slice_chunk_to_size<'a>(
659 659 &self,
660 660 revs: &'a [Revision],
661 661 target_size: Option<u64>,
662 662 ) -> Result<Vec<&'a [Revision]>, RevlogError> {
663 let mut start_data = self.start(revs[0]);
663 let mut start_data = self.data_start(revs[0]);
664 664 let end_data = self.end(revs[revs.len() - 1]);
665 665 let full_span = end_data - start_data;
666 666
667 667 let nothing_to_do = target_size
668 668 .map(|size| full_span <= size as usize)
669 669 .unwrap_or(true);
670 670
671 671 if nothing_to_do {
672 672 return Ok(vec![revs]);
673 673 }
674 674 let target_size = target_size.expect("target_size is set") as usize;
675 675
676 676 let mut start_rev_idx = 0;
677 677 let mut end_rev_idx = 1;
678 678 let mut chunks = vec![];
679 679
680 680 for (idx, rev) in revs.iter().enumerate().skip(1) {
681 681 let span = self.end(*rev) - start_data;
682 682 let is_snapshot = self.is_snapshot(*rev)?;
683 683 if span <= target_size && is_snapshot {
684 684 end_rev_idx = idx + 1;
685 685 } else {
686 686 let chunk =
687 687 self.trim_chunk(revs, start_rev_idx, Some(end_rev_idx));
688 688 if !chunk.is_empty() {
689 689 chunks.push(chunk);
690 690 }
691 691 start_rev_idx = idx;
692 start_data = self.start(*rev);
692 start_data = self.data_start(*rev);
693 693 end_rev_idx = idx + 1;
694 694 }
695 695 if !is_snapshot {
696 696 break;
697 697 }
698 698 }
699 699
700 700 // For the others, we use binary slicing to quickly converge towards
701 701 // valid chunks (otherwise, we might end up looking for the start/end
702 702 // of many revisions). This logic is not looking for the perfect
703 703 // slicing point, it quickly converges towards valid chunks.
704 704 let number_of_items = revs.len();
705 705
706 706 while (end_data - start_data) > target_size {
707 707 end_rev_idx = number_of_items;
708 708 if number_of_items - start_rev_idx <= 1 {
709 709 // Protect against individual chunks larger than the limit
710 710 break;
711 711 }
712 712 let mut local_end_data = self.end(revs[end_rev_idx - 1]);
713 713 let mut span = local_end_data - start_data;
714 714 while span > target_size {
715 715 if end_rev_idx - start_rev_idx <= 1 {
716 716 // Protect against individual chunks larger than the limit
717 717 break;
718 718 }
719 719 end_rev_idx -= (end_rev_idx - start_rev_idx) / 2;
720 720 local_end_data = self.end(revs[end_rev_idx - 1]);
721 721 span = local_end_data - start_data;
722 722 }
723 723 let chunk =
724 724 self.trim_chunk(revs, start_rev_idx, Some(end_rev_idx));
725 725 if !chunk.is_empty() {
726 726 chunks.push(chunk);
727 727 }
728 728 start_rev_idx = end_rev_idx;
729 start_data = self.start(revs[start_rev_idx]);
729 start_data = self.data_start(revs[start_rev_idx]);
730 730 }
731 731
732 732 let chunk = self.trim_chunk(revs, start_rev_idx, None);
733 733 if !chunk.is_empty() {
734 734 chunks.push(chunk);
735 735 }
736 736
737 737 Ok(chunks)
738 738 }
739 739
740 740 /// Returns `revs[startidx..endidx]` without empty trailing revs
741 741 fn trim_chunk<'a>(
742 742 &self,
743 743 revs: &'a [Revision],
744 744 start_rev_idx: usize,
745 745 end_rev_idx: Option<usize>,
746 746 ) -> &'a [Revision] {
747 747 let mut end_rev_idx = end_rev_idx.unwrap_or(revs.len());
748 748
749 749 // If we have a non-empty delta candidate, there is nothing to trim
750 750 if revs[end_rev_idx - 1].0 < self.len() as BaseRevision {
751 751 // Trim empty revs at the end, except the very first rev of a chain
752 752 while end_rev_idx > 1
753 753 && end_rev_idx > start_rev_idx
754 754 && self.data_compressed_length(revs[end_rev_idx - 1]) == 0
755 755 {
756 756 end_rev_idx -= 1
757 757 }
758 758 }
759 759
760 760 &revs[start_rev_idx..end_rev_idx]
761 761 }
762 762
763 763 /// Check the hash of some given data against the recorded hash.
764 764 pub fn check_hash(
765 765 &self,
766 766 p1: Revision,
767 767 p2: Revision,
768 768 expected: &[u8],
769 769 data: &[u8],
770 770 ) -> bool {
771 771 let e1 = self.index.get_entry(p1);
772 772 let h1 = match e1 {
773 773 Some(ref entry) => entry.hash(),
774 774 None => &NULL_NODE,
775 775 };
776 776 let e2 = self.index.get_entry(p2);
777 777 let h2 = match e2 {
778 778 Some(ref entry) => entry.hash(),
779 779 None => &NULL_NODE,
780 780 };
781 781
782 782 hash(data, h1.as_bytes(), h2.as_bytes()) == expected
783 783 }
784 784
785 785 /// Returns whether we are currently in a [`Self::with_write`] context
786 786 pub fn is_writing(&self) -> bool {
787 787 self.writing_handles.is_some()
788 788 }
789 789
790 790 /// Open the revlog files for writing
791 791 ///
792 792 /// Adding content to a revlog should be done within this context.
793 793 /// TODO try using `BufRead` and `BufWrite` and see if performance improves
794 794 pub fn with_write<R>(
795 795 &mut self,
796 796 transaction: &mut impl Transaction,
797 797 data_end: Option<usize>,
798 798 func: impl FnOnce() -> R,
799 799 ) -> Result<R, HgError> {
800 800 if self.is_writing() {
801 801 return Ok(func());
802 802 }
803 803 self.enter_writing_context(data_end, transaction)
804 804 .inspect_err(|_| {
805 805 self.exit_writing_context();
806 806 })?;
807 807 let res = func();
808 808 self.exit_writing_context();
809 809 Ok(res)
810 810 }
811 811
812 812 /// `pub` only for use in hg-cpython
813 813 #[doc(hidden)]
814 814 pub fn exit_writing_context(&mut self) {
815 815 self.writing_handles.take();
816 816 self.segment_file.writing_handle.take();
817 817 self.segment_file.reading_handle.take();
818 818 }
819 819
820 820 /// `pub` only for use in hg-cpython
821 821 #[doc(hidden)]
822 822 pub fn python_writing_handles(&self) -> Option<&WriteHandles> {
823 823 self.writing_handles.as_ref()
824 824 }
825 825
826 826 /// `pub` only for use in hg-cpython
827 827 #[doc(hidden)]
828 828 pub fn enter_writing_context(
829 829 &mut self,
830 830 data_end: Option<usize>,
831 831 transaction: &mut impl Transaction,
832 832 ) -> Result<(), HgError> {
833 833 let data_size = if self.is_empty() {
834 834 0
835 835 } else {
836 836 self.end(Revision((self.len() - 1) as BaseRevision))
837 837 };
838 838 let data_handle = if !self.is_inline() {
839 839 let data_handle = match self.vfs.open_write(&self.data_file) {
840 840 Ok(mut f) => {
841 841 if let Some(end) = data_end {
842 842 f.seek(SeekFrom::Start(end as u64))
843 843 .when_reading_file(&self.data_file)?;
844 844 } else {
845 845 f.seek(SeekFrom::End(0))
846 846 .when_reading_file(&self.data_file)?;
847 847 }
848 848 f
849 849 }
850 850 Err(e) => match e {
851 851 HgError::IoError { error, context } => {
852 852 if error.kind() != ErrorKind::NotFound {
853 853 return Err(HgError::IoError { error, context });
854 854 }
855 855 self.vfs.create(&self.data_file, true)?
856 856 }
857 857 e => return Err(e),
858 858 },
859 859 };
860 860 transaction.add(&self.data_file, data_size);
861 861 Some(FileHandle::from_file(
862 862 data_handle,
863 863 dyn_clone::clone_box(&*self.vfs),
864 864 &self.data_file,
865 865 ))
866 866 } else {
867 867 None
868 868 };
869 869 let index_size = self.len() * INDEX_ENTRY_SIZE;
870 870 let index_handle = self.index_write_handle()?;
871 871 if self.is_inline() {
872 872 transaction.add(&self.index_file, data_size);
873 873 } else {
874 874 transaction.add(&self.index_file, index_size);
875 875 }
876 876 self.writing_handles = Some(WriteHandles {
877 877 index_handle: index_handle.clone(),
878 878 data_handle: data_handle.clone(),
879 879 });
880 880 *self.segment_file.reading_handle.borrow_mut() = if self.is_inline() {
881 881 Some(index_handle)
882 882 } else {
883 883 data_handle
884 884 };
885 885 Ok(())
886 886 }
887 887
888 888 /// Get a write handle to the index, sought to the end of its data.
889 889 fn index_write_handle(&self) -> Result<FileHandle, HgError> {
890 890 let res = if self.delayed_buffer.is_none() {
891 891 if self.data_config.check_ambig {
892 892 self.vfs.open_check_ambig(&self.index_file)
893 893 } else {
894 894 self.vfs.open_write(&self.index_file)
895 895 }
896 896 } else {
897 897 self.vfs.open_write(&self.index_file)
898 898 };
899 899 match res {
900 900 Ok(mut handle) => {
901 901 handle
902 902 .seek(SeekFrom::End(0))
903 903 .when_reading_file(&self.index_file)?;
904 904 Ok(
905 905 if let Some(delayed_buffer) = self.delayed_buffer.as_ref()
906 906 {
907 907 FileHandle::from_file_delayed(
908 908 handle,
909 909 dyn_clone::clone_box(&*self.vfs),
910 910 &self.index_file,
911 911 delayed_buffer.clone(),
912 912 )?
913 913 } else {
914 914 FileHandle::from_file(
915 915 handle,
916 916 dyn_clone::clone_box(&*self.vfs),
917 917 &self.index_file,
918 918 )
919 919 },
920 920 )
921 921 }
922 922 Err(e) => match e {
923 923 HgError::IoError { error, context } => {
924 924 if error.kind() != ErrorKind::NotFound {
925 925 return Err(HgError::IoError { error, context });
926 926 };
927 927 if let Some(delayed_buffer) = self.delayed_buffer.as_ref()
928 928 {
929 929 FileHandle::new_delayed(
930 930 dyn_clone::clone_box(&*self.vfs),
931 931 &self.index_file,
932 932 true,
933 933 delayed_buffer.clone(),
934 934 )
935 935 } else {
936 936 FileHandle::new(
937 937 dyn_clone::clone_box(&*self.vfs),
938 938 &self.index_file,
939 939 true,
940 940 true,
941 941 )
942 942 }
943 943 }
944 944 e => Err(e),
945 945 },
946 946 }
947 947 }
948 948
949 949 /// Split the data of an inline revlog into an index and a data file
950 950 pub fn split_inline(
951 951 &mut self,
952 952 header: IndexHeader,
953 953 new_index_file_path: Option<PathBuf>,
954 954 ) -> Result<PathBuf, RevlogError> {
955 955 assert!(self.delayed_buffer.is_none());
956 956 let existing_handles = self.writing_handles.is_some();
957 957 if let Some(handles) = &mut self.writing_handles {
958 958 handles.index_handle.flush()?;
959 959 self.writing_handles.take();
960 960 self.segment_file.writing_handle.take();
961 961 }
962 962 let mut new_data_file_handle =
963 963 self.vfs.create(&self.data_file, true)?;
964 964 // Drop any potential data, possibly redundant with the VFS impl.
965 965 new_data_file_handle
966 966 .set_len(0)
967 967 .when_writing_file(&self.data_file)?;
968 968
969 969 self.with_read(|| -> Result<(), RevlogError> {
970 970 for r in 0..self.index.len() {
971 971 let rev = Revision(r as BaseRevision);
972 972 let rev_segment = self.get_segment_for_revs(rev, rev)?.1;
973 973 new_data_file_handle
974 974 .write_all(&rev_segment)
975 975 .when_writing_file(&self.data_file)?;
976 976 }
977 977 new_data_file_handle
978 978 .flush()
979 979 .when_writing_file(&self.data_file)?;
980 980 Ok(())
981 981 })?;
982 982
983 983 if let Some(index_path) = new_index_file_path {
984 984 self.index_file = index_path
985 985 }
986 986
987 987 let mut new_index_handle = self.vfs.create(&self.index_file, true)?;
988 988 let mut new_data = Vec::with_capacity(self.len() * INDEX_ENTRY_SIZE);
989 989 for r in 0..self.len() {
990 990 let rev = Revision(r as BaseRevision);
991 991 let entry = self.index.entry_binary(rev).unwrap_or_else(|| {
992 992 panic!(
993 993 "entry {} should exist in {}",
994 994 r,
995 995 self.index_file.display()
996 996 )
997 997 });
998 998 if r == 0 {
999 999 new_data.extend(header.header_bytes);
1000 1000 }
1001 1001 new_data.extend(entry);
1002 1002 }
1003 1003 new_index_handle
1004 1004 .write_all(&new_data)
1005 1005 .when_writing_file(&self.index_file)?;
1006 1006 // Replace the index with a new one because the buffer contains inline
1007 1007 // data
1008 1008 self.index = Index::new(Box::new(new_data), header)?;
1009 1009 self.inline = false;
1010 1010
1011 1011 self.segment_file = RandomAccessFile::new(
1012 1012 dyn_clone::clone_box(&*self.vfs),
1013 1013 self.data_file.to_owned(),
1014 1014 );
1015 1015 if existing_handles {
1016 1016 // Switched from inline to conventional, reopen the index
1017 1017 let new_data_handle = Some(FileHandle::from_file(
1018 1018 new_data_file_handle,
1019 1019 dyn_clone::clone_box(&*self.vfs),
1020 1020 &self.data_file,
1021 1021 ));
1022 1022 self.writing_handles = Some(WriteHandles {
1023 1023 index_handle: self.index_write_handle()?,
1024 1024 data_handle: new_data_handle.clone(),
1025 1025 });
1026 1026 *self.segment_file.writing_handle.borrow_mut() = new_data_handle;
1027 1027 }
1028 1028
1029 1029 Ok(self.index_file.to_owned())
1030 1030 }
1031 1031
1032 1032 /// Write a new entry to this revlog.
1033 1033 /// - `entry` is the index bytes
1034 1034 /// - `header_and_data` is the compression header and the revision data
1035 1035 /// - `offset` is the position in the data file to write to
1036 1036 /// - `index_end` is the overwritten position in the index in revlog-v2,
1037 1037 /// since the format may allow a rewrite of garbage data at the end.
1038 1038 /// - `data_end` is the overwritten position in the data-file in revlog-v2,
1039 1039 /// since the format may allow a rewrite of garbage data at the end.
1040 1040 ///
1041 1041 /// XXX Why do we have `data_end` *and* `offset`? Same question in Python
1042 1042 pub fn write_entry(
1043 1043 &mut self,
1044 1044 mut transaction: impl Transaction,
1045 1045 entry: &[u8],
1046 1046 header_and_data: (&[u8], &[u8]),
1047 1047 mut offset: usize,
1048 1048 index_end: Option<u64>,
1049 1049 data_end: Option<u64>,
1050 1050 ) -> Result<(u64, Option<u64>), HgError> {
1051 1051 let current_revision = self.len() - 1;
1052 1052 let canonical_index_file = self.canonical_index_file();
1053 1053
1054 1054 let is_inline = self.is_inline();
1055 1055 let handles = match &mut self.writing_handles {
1056 1056 None => {
1057 1057 return Err(HgError::abort(
1058 1058 "adding revision outside of the `with_write` context",
1059 1059 exit_codes::ABORT,
1060 1060 None,
1061 1061 ));
1062 1062 }
1063 1063 Some(handles) => handles,
1064 1064 };
1065 1065 let index_handle = &mut handles.index_handle;
1066 1066 let data_handle = &mut handles.data_handle;
1067 1067 if let Some(end) = index_end {
1068 1068 index_handle
1069 1069 .seek(SeekFrom::Start(end))
1070 1070 .when_reading_file(&self.index_file)?;
1071 1071 } else {
1072 1072 index_handle
1073 1073 .seek(SeekFrom::End(0))
1074 1074 .when_reading_file(&self.index_file)?;
1075 1075 }
1076 1076 if let Some(data_handle) = data_handle {
1077 1077 if let Some(end) = data_end {
1078 1078 data_handle
1079 1079 .seek(SeekFrom::Start(end))
1080 1080 .when_reading_file(&self.data_file)?;
1081 1081 } else {
1082 1082 data_handle
1083 1083 .seek(SeekFrom::End(0))
1084 1084 .when_reading_file(&self.data_file)?;
1085 1085 }
1086 1086 }
1087 1087 let (header, data) = header_and_data;
1088 1088
1089 1089 if !is_inline {
1090 1090 transaction.add(&self.data_file, offset);
1091 1091 transaction
1092 1092 .add(&canonical_index_file, current_revision * entry.len());
1093 1093 let data_handle = data_handle
1094 1094 .as_mut()
1095 1095 .expect("data handle should exist when not inline");
1096 1096 if !header.is_empty() {
1097 1097 data_handle.write_all(header)?;
1098 1098 }
1099 1099 data_handle.write_all(data)?;
1100 1100 match &mut self.delayed_buffer {
1101 1101 Some(buf) => {
1102 1102 buf.lock()
1103 1103 .expect("propagate the panic")
1104 1104 .buffer
1105 1105 .write_all(entry)
1106 1106 .expect("write to delay buffer should succeed");
1107 1107 }
1108 1108 None => index_handle.write_all(entry)?,
1109 1109 }
1110 1110 } else if self.delayed_buffer.is_some() {
1111 1111 return Err(HgError::abort(
1112 1112 "invalid delayed write on inline revlog",
1113 1113 exit_codes::ABORT,
1114 1114 None,
1115 1115 ));
1116 1116 } else {
1117 1117 offset += current_revision * entry.len();
1118 1118 transaction.add(&canonical_index_file, offset);
1119 1119 index_handle.write_all(entry)?;
1120 1120 index_handle.write_all(header)?;
1121 1121 index_handle.write_all(data)?;
1122 1122 }
1123 1123 let data_position = match data_handle {
1124 1124 Some(h) => Some(h.position()?),
1125 1125 None => None,
1126 1126 };
1127 1127 Ok((index_handle.position()?, data_position))
1128 1128 }
1129 1129
1130 1130 /// Return the real target index file and not the temporary when diverting
1131 1131 pub fn canonical_index_file(&self) -> PathBuf {
1132 1132 self.original_index_file
1133 1133 .as_ref()
1134 1134 .map(ToOwned::to_owned)
1135 1135 .unwrap_or_else(|| self.index_file.to_owned())
1136 1136 }
1137 1137
1138 1138 /// Return the path to the diverted index
1139 1139 fn diverted_index(&self) -> PathBuf {
1140 1140 self.index_file.with_extension("i.a")
1141 1141 }
1142 1142
1143 1143 /// True if we're in a [`Self::with_write`] or [`Self::with_read`] context
1144 1144 pub fn is_open(&self) -> bool {
1145 1145 self.segment_file.is_open()
1146 1146 }
1147 1147
1148 1148 /// Set this revlog to delay its writes to a buffer
1149 1149 pub fn delay(&mut self) -> Result<Option<PathBuf>, HgError> {
1150 1150 assert!(!self.is_open());
1151 1151 if self.is_inline() {
1152 1152 return Err(HgError::abort(
1153 1153 "revlog with delayed write should not be inline",
1154 1154 exit_codes::ABORT,
1155 1155 None,
1156 1156 ));
1157 1157 }
1158 1158 if self.delayed_buffer.is_some() || self.original_index_file.is_some()
1159 1159 {
1160 1160 // Delay or divert already happening
1161 1161 return Ok(None);
1162 1162 }
1163 1163 if self.is_empty() {
1164 1164 self.original_index_file = Some(self.index_file.to_owned());
1165 1165 self.index_file = self.diverted_index();
1166 1166 if self.vfs.exists(&self.index_file) {
1167 1167 self.vfs.unlink(&self.index_file)?;
1168 1168 }
1169 1169 Ok(Some(self.index_file.to_owned()))
1170 1170 } else {
1171 1171 self.delayed_buffer =
1172 1172 Some(Arc::new(Mutex::new(DelayedBuffer::default())));
1173 1173 Ok(None)
1174 1174 }
1175 1175 }
1176 1176
1177 1177 /// Write the pending data (in memory) if any to the diverted index file
1178 1178 /// (on disk temporary file)
1179 1179 pub fn write_pending(
1180 1180 &mut self,
1181 1181 ) -> Result<(Option<PathBuf>, bool), HgError> {
1182 1182 assert!(!self.is_open());
1183 1183 if self.is_inline() {
1184 1184 return Err(HgError::abort(
1185 1185 "revlog with delayed write should not be inline",
1186 1186 exit_codes::ABORT,
1187 1187 None,
1188 1188 ));
1189 1189 }
1190 1190 if self.original_index_file.is_some() {
1191 1191 return Ok((None, true));
1192 1192 }
1193 1193 let mut any_pending = false;
1194 1194 let pending_index_file = self.diverted_index();
1195 1195 if self.vfs.exists(&pending_index_file) {
1196 1196 self.vfs.unlink(&pending_index_file)?;
1197 1197 }
1198 1198 self.vfs.copy(&self.index_file, &pending_index_file)?;
1199 1199 if let Some(delayed_buffer) = self.delayed_buffer.take() {
1200 1200 let mut index_file_handle =
1201 1201 self.vfs.open_write(&pending_index_file)?;
1202 1202 index_file_handle
1203 1203 .seek(SeekFrom::End(0))
1204 1204 .when_writing_file(&pending_index_file)?;
1205 1205 let delayed_data =
1206 1206 &delayed_buffer.lock().expect("propagate the panic").buffer;
1207 1207 index_file_handle
1208 1208 .write_all(delayed_data)
1209 1209 .when_writing_file(&pending_index_file)?;
1210 1210 any_pending = true;
1211 1211 }
1212 1212 self.original_index_file = Some(self.index_file.to_owned());
1213 1213 self.index_file = pending_index_file;
1214 1214 Ok((Some(self.index_file.to_owned()), any_pending))
1215 1215 }
1216 1216
1217 1217 /// Overwrite the canonical file with the diverted file, or write out the
1218 1218 /// delayed buffer.
1219 1219 /// Returns an error if the revlog is neither diverted nor delayed.
1220 1220 pub fn finalize_pending(&mut self) -> Result<PathBuf, HgError> {
1221 1221 assert!(!self.is_open());
1222 1222 if self.is_inline() {
1223 1223 return Err(HgError::abort(
1224 1224 "revlog with delayed write should not be inline",
1225 1225 exit_codes::ABORT,
1226 1226 None,
1227 1227 ));
1228 1228 }
1229 1229 match (
1230 1230 self.delayed_buffer.as_ref(),
1231 1231 self.original_index_file.as_ref(),
1232 1232 ) {
1233 1233 (None, None) => {
1234 1234 return Err(HgError::abort(
1235 1235 "neither delay nor divert found on this revlog",
1236 1236 exit_codes::ABORT,
1237 1237 None,
1238 1238 ));
1239 1239 }
1240 1240 (Some(delay), None) => {
1241 1241 let mut index_file_handle =
1242 1242 self.vfs.open_write(&self.index_file)?;
1243 1243 index_file_handle
1244 1244 .seek(SeekFrom::End(0))
1245 1245 .when_writing_file(&self.index_file)?;
1246 1246 index_file_handle
1247 1247 .write_all(
1248 1248 &delay.lock().expect("propagate the panic").buffer,
1249 1249 )
1250 1250 .when_writing_file(&self.index_file)?;
1251 1251 self.delayed_buffer = None;
1252 1252 }
1253 1253 (None, Some(divert)) => {
1254 1254 if self.vfs.exists(&self.index_file) {
1255 1255 self.vfs.rename(&self.index_file, divert, true)?;
1256 1256 }
1257 1257 divert.clone_into(&mut self.index_file);
1258 1258 self.original_index_file = None;
1259 1259 }
1260 1260 (Some(_), Some(_)) => unreachable!(
1261 1261 "{} is in an inconsistent state of both delay and divert",
1262 1262 self.canonical_index_file().display(),
1263 1263 ),
1264 1264 }
1265 1265 Ok(self.canonical_index_file())
1266 1266 }
1267 1267
1268 1268 /// `pub` only for `hg-cpython`. This is made a different method than
1269 1269 /// [`Revlog::index`] in case there is a different invariant that pops up
1270 1270 /// later.
1271 1271 #[doc(hidden)]
1272 1272 pub fn shared_index(&self) -> &Index {
1273 1273 &self.index
1274 1274 }
1275 1275 }
1276 1276
1277 1277 /// The use of a [`Refcell`] assumes that a given revlog will only
1278 1278 /// be accessed (read or write) by a single thread.
1279 1279 type UncompressedChunkCache =
1280 1280 RefCell<LruMap<Revision, Arc<[u8]>, ByMemoryUsage>>;
1281 1281
1282 1282 /// The node, revision and data for the last revision we've seen. Speeds up
1283 1283 /// a lot of sequential operations of the revlog.
1284 1284 ///
1285 1285 /// The data is not just bytes since it can come from Python and we want to
1286 1286 /// avoid copies if possible.
1287 1287 type SingleRevisionCache =
1288 1288 (Node, Revision, Box<dyn Deref<Target = [u8]> + Send>);
1289 1289
1290 1290 /// A way of progressively filling a buffer with revision data, then return
1291 1291 /// that buffer. Used to abstract away Python-allocated code to reduce copying
1292 1292 /// for performance reasons.
1293 1293 pub trait RevisionBuffer {
1294 1294 /// The owned buffer type to return
1295 1295 type Target;
1296 1296 /// Copies the slice into the buffer
1297 1297 fn extend_from_slice(&mut self, slice: &[u8]);
1298 1298 /// Returns the now finished owned buffer
1299 1299 fn finish(self) -> Self::Target;
1300 1300 }
1301 1301
1302 1302 /// A simple vec-based buffer. This is uselessly complicated for the pure Rust
1303 1303 /// case, but it's the price to pay for Python compatibility.
1304 1304 #[derive(Debug)]
1305 1305 pub(super) struct CoreRevisionBuffer {
1306 1306 buf: Vec<u8>,
1307 1307 }
1308 1308
1309 1309 impl CoreRevisionBuffer {
1310 1310 pub fn new() -> Self {
1311 1311 Self { buf: vec![] }
1312 1312 }
1313 1313
1314 1314 #[inline]
1315 1315 pub fn resize(&mut self, size: usize) {
1316 1316 self.buf.reserve_exact(size - self.buf.capacity());
1317 1317 }
1318 1318 }
1319 1319
1320 1320 impl RevisionBuffer for CoreRevisionBuffer {
1321 1321 type Target = Vec<u8>;
1322 1322
1323 1323 #[inline]
1324 1324 fn extend_from_slice(&mut self, slice: &[u8]) {
1325 1325 self.buf.extend_from_slice(slice);
1326 1326 }
1327 1327
1328 1328 #[inline]
1329 1329 fn finish(self) -> Self::Target {
1330 1330 self.buf
1331 1331 }
1332 1332 }
1333 1333
1334 1334 /// Calculate the hash of a revision given its data and its parents.
1335 1335 pub fn hash(
1336 1336 data: &[u8],
1337 1337 p1_hash: &[u8],
1338 1338 p2_hash: &[u8],
1339 1339 ) -> [u8; NODE_BYTES_LENGTH] {
1340 1340 let mut hasher = Sha1::new();
1341 1341 let (a, b) = (p1_hash, p2_hash);
1342 1342 if a > b {
1343 1343 hasher.update(b);
1344 1344 hasher.update(a);
1345 1345 } else {
1346 1346 hasher.update(a);
1347 1347 hasher.update(b);
1348 1348 }
1349 1349 hasher.update(data);
1350 1350 *hasher.finalize().as_ref()
1351 1351 }
General Comments 0
You need to be logged in to leave comments. Login now