##// END OF EJS Templates
rust-index: add Sync bound to all relevant mmap-derived values...
Raphaël Gomès -
r52126:ca81cd96 default
parent child Browse files
Show More
@@ -1,1975 +1,1975 b''
1 1 use std::collections::hash_map::RandomState;
2 2 use std::collections::{HashMap, HashSet};
3 3 use std::fmt::Debug;
4 4 use std::ops::Deref;
5 5 use std::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard};
6 6
7 7 use byteorder::{BigEndian, ByteOrder};
8 8 use bytes_cast::{unaligned, BytesCast};
9 9
10 10 use super::REVIDX_KNOWN_FLAGS;
11 11 use crate::errors::HgError;
12 12 use crate::node::{NODE_BYTES_LENGTH, NULL_NODE, STORED_NODE_ID_BYTES};
13 13 use crate::revlog::node::Node;
14 14 use crate::revlog::{Revision, NULL_REVISION};
15 15 use crate::{
16 16 dagops, BaseRevision, FastHashMap, Graph, GraphError, RevlogError,
17 17 RevlogIndex, UncheckedRevision,
18 18 };
19 19
20 20 pub const INDEX_ENTRY_SIZE: usize = 64;
21 21 pub const COMPRESSION_MODE_INLINE: u8 = 2;
22 22
23 23 #[derive(Debug)]
24 24 pub struct IndexHeader {
25 25 pub(super) header_bytes: [u8; 4],
26 26 }
27 27
28 28 #[derive(Copy, Clone)]
29 29 pub struct IndexHeaderFlags {
30 30 flags: u16,
31 31 }
32 32
33 33 /// Corresponds to the high bits of `_format_flags` in python
34 34 impl IndexHeaderFlags {
35 35 /// Corresponds to FLAG_INLINE_DATA in python
36 36 pub fn is_inline(self) -> bool {
37 37 self.flags & 1 != 0
38 38 }
39 39 /// Corresponds to FLAG_GENERALDELTA in python
40 40 pub fn uses_generaldelta(self) -> bool {
41 41 self.flags & 2 != 0
42 42 }
43 43 }
44 44
45 45 /// Corresponds to the INDEX_HEADER structure,
46 46 /// which is parsed as a `header` variable in `_loadindex` in `revlog.py`
47 47 impl IndexHeader {
48 48 fn format_flags(&self) -> IndexHeaderFlags {
49 49 // No "unknown flags" check here, unlike in python. Maybe there should
50 50 // be.
51 51 IndexHeaderFlags {
52 52 flags: BigEndian::read_u16(&self.header_bytes[0..2]),
53 53 }
54 54 }
55 55
56 56 /// The only revlog version currently supported by rhg.
57 57 const REVLOGV1: u16 = 1;
58 58
59 59 /// Corresponds to `_format_version` in Python.
60 60 fn format_version(&self) -> u16 {
61 61 BigEndian::read_u16(&self.header_bytes[2..4])
62 62 }
63 63
64 64 pub fn parse(index_bytes: &[u8]) -> Result<Option<IndexHeader>, HgError> {
65 65 if index_bytes.is_empty() {
66 66 return Ok(None);
67 67 }
68 68 if index_bytes.len() < 4 {
69 69 return Err(HgError::corrupted(
70 70 "corrupted revlog: can't read the index format header",
71 71 ));
72 72 }
73 73 Ok(Some(IndexHeader {
74 74 header_bytes: {
75 75 let bytes: [u8; 4] =
76 76 index_bytes[0..4].try_into().expect("impossible");
77 77 bytes
78 78 },
79 79 }))
80 80 }
81 81 }
82 82
83 83 /// Abstracts the access to the index bytes since they can be spread between
84 84 /// the immutable (bytes) part and the mutable (added) part if any appends
85 85 /// happened. This makes it transparent for the callers.
86 86 struct IndexData {
87 87 /// Immutable bytes, most likely taken from disk
88 bytes: Box<dyn Deref<Target = [u8]> + Send>,
88 bytes: Box<dyn Deref<Target = [u8]> + Send + Sync>,
89 89 /// Used when stripping index contents, keeps track of the start of the
90 90 /// first stripped revision, which is used to give a slice of the
91 91 /// `bytes` field.
92 92 truncation: Option<usize>,
93 93 /// Bytes that were added after reading the index
94 94 added: Vec<u8>,
95 95 }
96 96
97 97 impl IndexData {
98 pub fn new(bytes: Box<dyn Deref<Target = [u8]> + Send>) -> Self {
98 pub fn new(bytes: Box<dyn Deref<Target = [u8]> + Send + Sync>) -> Self {
99 99 Self {
100 100 bytes,
101 101 truncation: None,
102 102 added: vec![],
103 103 }
104 104 }
105 105
106 106 pub fn len(&self) -> usize {
107 107 match self.truncation {
108 108 Some(truncation) => truncation + self.added.len(),
109 109 None => self.bytes.len() + self.added.len(),
110 110 }
111 111 }
112 112
113 113 fn remove(
114 114 &mut self,
115 115 rev: Revision,
116 116 offsets: Option<&[usize]>,
117 117 ) -> Result<(), RevlogError> {
118 118 let rev = rev.0 as usize;
119 119 let truncation = if let Some(offsets) = offsets {
120 120 offsets[rev]
121 121 } else {
122 122 rev * INDEX_ENTRY_SIZE
123 123 };
124 124 if truncation < self.bytes.len() {
125 125 self.truncation = Some(truncation);
126 126 self.added.clear();
127 127 } else {
128 128 self.added.truncate(truncation - self.bytes.len());
129 129 }
130 130 Ok(())
131 131 }
132 132
133 133 fn is_new(&self) -> bool {
134 134 self.bytes.is_empty()
135 135 }
136 136 }
137 137
138 138 impl std::ops::Index<std::ops::Range<usize>> for IndexData {
139 139 type Output = [u8];
140 140
141 141 fn index(&self, index: std::ops::Range<usize>) -> &Self::Output {
142 142 let start = index.start;
143 143 let end = index.end;
144 144 let immutable_len = match self.truncation {
145 145 Some(truncation) => truncation,
146 146 None => self.bytes.len(),
147 147 };
148 148 if start < immutable_len {
149 149 if end > immutable_len {
150 150 panic!("index data cannot span existing and added ranges");
151 151 }
152 152 &self.bytes[index]
153 153 } else {
154 154 &self.added[start - immutable_len..end - immutable_len]
155 155 }
156 156 }
157 157 }
158 158
159 159 #[derive(Debug, PartialEq, Eq)]
160 160 pub struct RevisionDataParams {
161 161 pub flags: u16,
162 162 pub data_offset: u64,
163 163 pub data_compressed_length: i32,
164 164 pub data_uncompressed_length: i32,
165 165 pub data_delta_base: i32,
166 166 pub link_rev: i32,
167 167 pub parent_rev_1: i32,
168 168 pub parent_rev_2: i32,
169 169 pub node_id: [u8; NODE_BYTES_LENGTH],
170 170 pub _sidedata_offset: u64,
171 171 pub _sidedata_compressed_length: i32,
172 172 pub data_compression_mode: u8,
173 173 pub _sidedata_compression_mode: u8,
174 174 pub _rank: i32,
175 175 }
176 176
177 177 impl Default for RevisionDataParams {
178 178 fn default() -> Self {
179 179 Self {
180 180 flags: 0,
181 181 data_offset: 0,
182 182 data_compressed_length: 0,
183 183 data_uncompressed_length: 0,
184 184 data_delta_base: -1,
185 185 link_rev: -1,
186 186 parent_rev_1: -1,
187 187 parent_rev_2: -1,
188 188 node_id: [0; NODE_BYTES_LENGTH],
189 189 _sidedata_offset: 0,
190 190 _sidedata_compressed_length: 0,
191 191 data_compression_mode: COMPRESSION_MODE_INLINE,
192 192 _sidedata_compression_mode: COMPRESSION_MODE_INLINE,
193 193 _rank: -1,
194 194 }
195 195 }
196 196 }
197 197
198 198 #[derive(BytesCast)]
199 199 #[repr(C)]
200 200 pub struct RevisionDataV1 {
201 201 data_offset_or_flags: unaligned::U64Be,
202 202 data_compressed_length: unaligned::I32Be,
203 203 data_uncompressed_length: unaligned::I32Be,
204 204 data_delta_base: unaligned::I32Be,
205 205 link_rev: unaligned::I32Be,
206 206 parent_rev_1: unaligned::I32Be,
207 207 parent_rev_2: unaligned::I32Be,
208 208 node_id: [u8; STORED_NODE_ID_BYTES],
209 209 }
210 210
211 211 fn _static_assert_size_of_revision_data_v1() {
212 212 let _ = std::mem::transmute::<RevisionDataV1, [u8; 64]>;
213 213 }
214 214
215 215 impl RevisionDataParams {
216 216 pub fn validate(&self) -> Result<(), RevlogError> {
217 217 if self.flags & !REVIDX_KNOWN_FLAGS != 0 {
218 218 return Err(RevlogError::corrupted(format!(
219 219 "unknown revlog index flags: {}",
220 220 self.flags
221 221 )));
222 222 }
223 223 if self.data_compression_mode != COMPRESSION_MODE_INLINE {
224 224 return Err(RevlogError::corrupted(format!(
225 225 "invalid data compression mode: {}",
226 226 self.data_compression_mode
227 227 )));
228 228 }
229 229 // FIXME isn't this only for v2 or changelog v2?
230 230 if self._sidedata_compression_mode != COMPRESSION_MODE_INLINE {
231 231 return Err(RevlogError::corrupted(format!(
232 232 "invalid sidedata compression mode: {}",
233 233 self._sidedata_compression_mode
234 234 )));
235 235 }
236 236 Ok(())
237 237 }
238 238
239 239 pub fn into_v1(self) -> RevisionDataV1 {
240 240 let data_offset_or_flags = self.data_offset << 16 | self.flags as u64;
241 241 let mut node_id = [0; STORED_NODE_ID_BYTES];
242 242 node_id[..NODE_BYTES_LENGTH].copy_from_slice(&self.node_id);
243 243 RevisionDataV1 {
244 244 data_offset_or_flags: data_offset_or_flags.into(),
245 245 data_compressed_length: self.data_compressed_length.into(),
246 246 data_uncompressed_length: self.data_uncompressed_length.into(),
247 247 data_delta_base: self.data_delta_base.into(),
248 248 link_rev: self.link_rev.into(),
249 249 parent_rev_1: self.parent_rev_1.into(),
250 250 parent_rev_2: self.parent_rev_2.into(),
251 251 node_id,
252 252 }
253 253 }
254 254 }
255 255
256 256 /// A Revlog index
257 257 pub struct Index {
258 258 bytes: IndexData,
259 259 /// Offsets of starts of index blocks.
260 260 /// Only needed when the index is interleaved with data.
261 261 offsets: RwLock<Option<Vec<usize>>>,
262 262 uses_generaldelta: bool,
263 263 is_inline: bool,
264 264 /// Cache of the head revisions in this index, kept in sync. Should
265 265 /// be accessed via the [`Self::head_revs`] method.
266 266 head_revs: Vec<Revision>,
267 267 /// Cache of the last filtered revisions in this index, used to make sure
268 268 /// we haven't changed filters when returning the cached `head_revs`.
269 269 filtered_revs: HashSet<Revision>,
270 270 }
271 271
272 272 impl Debug for Index {
273 273 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
274 274 f.debug_struct("Index")
275 275 .field("offsets", &self.offsets)
276 276 .field("uses_generaldelta", &self.uses_generaldelta)
277 277 .finish()
278 278 }
279 279 }
280 280
281 281 impl Graph for Index {
282 282 fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
283 283 let err = || GraphError::ParentOutOfRange(rev);
284 284 match self.get_entry(rev) {
285 285 Some(entry) => {
286 286 // The C implementation checks that the parents are valid
287 287 // before returning
288 288 Ok([
289 289 self.check_revision(entry.p1()).ok_or_else(err)?,
290 290 self.check_revision(entry.p2()).ok_or_else(err)?,
291 291 ])
292 292 }
293 293 None => Ok([NULL_REVISION, NULL_REVISION]),
294 294 }
295 295 }
296 296 }
297 297
298 298 /// A cache suitable for find_snapshots
299 299 ///
300 300 /// Logically equivalent to a mapping whose keys are [`BaseRevision`] and
301 301 /// values sets of [`BaseRevision`]
302 302 ///
303 303 /// TODO the dubious part is insisting that errors must be RevlogError
304 304 /// we would probably need to sprinkle some magic here, such as an associated
305 305 /// type that would be Into<RevlogError> but even that would not be
306 306 /// satisfactory, as errors potentially have nothing to do with the revlog.
307 307 pub trait SnapshotsCache {
308 308 fn insert_for(
309 309 &mut self,
310 310 rev: BaseRevision,
311 311 value: BaseRevision,
312 312 ) -> Result<(), RevlogError>;
313 313 }
314 314
315 315 impl SnapshotsCache for FastHashMap<BaseRevision, HashSet<BaseRevision>> {
316 316 fn insert_for(
317 317 &mut self,
318 318 rev: BaseRevision,
319 319 value: BaseRevision,
320 320 ) -> Result<(), RevlogError> {
321 321 let all_values = self.entry(rev).or_insert_with(HashSet::new);
322 322 all_values.insert(value);
323 323 Ok(())
324 324 }
325 325 }
326 326
327 327 impl Index {
328 328 /// Create an index from bytes.
329 329 /// Calculate the start of each entry when is_inline is true.
330 330 pub fn new(
331 bytes: Box<dyn Deref<Target = [u8]> + Send>,
331 bytes: Box<dyn Deref<Target = [u8]> + Send + Sync>,
332 332 default_header: IndexHeader,
333 333 ) -> Result<Self, HgError> {
334 334 let header =
335 335 IndexHeader::parse(bytes.as_ref())?.unwrap_or(default_header);
336 336
337 337 if header.format_version() != IndexHeader::REVLOGV1 {
338 338 // A proper new version should have had a repo/store
339 339 // requirement.
340 340 return Err(HgError::corrupted("unsupported revlog version"));
341 341 }
342 342
343 343 // This is only correct because we know version is REVLOGV1.
344 344 // In v2 we always use generaldelta, while in v0 we never use
345 345 // generaldelta. Similar for [is_inline] (it's only used in v1).
346 346 let uses_generaldelta = header.format_flags().uses_generaldelta();
347 347
348 348 if header.format_flags().is_inline() {
349 349 let mut offset: usize = 0;
350 350 let mut offsets = Vec::new();
351 351
352 352 while offset + INDEX_ENTRY_SIZE <= bytes.len() {
353 353 offsets.push(offset);
354 354 let end = offset + INDEX_ENTRY_SIZE;
355 355 let entry = IndexEntry {
356 356 bytes: &bytes[offset..end],
357 357 offset_override: None,
358 358 };
359 359
360 360 offset += INDEX_ENTRY_SIZE + entry.compressed_len() as usize;
361 361 }
362 362
363 363 if offset == bytes.len() {
364 364 Ok(Self {
365 365 bytes: IndexData::new(bytes),
366 366 offsets: RwLock::new(Some(offsets)),
367 367 uses_generaldelta,
368 368 is_inline: true,
369 369 head_revs: vec![],
370 370 filtered_revs: HashSet::new(),
371 371 })
372 372 } else {
373 373 Err(HgError::corrupted("unexpected inline revlog length"))
374 374 }
375 375 } else {
376 376 Ok(Self {
377 377 bytes: IndexData::new(bytes),
378 378 offsets: RwLock::new(None),
379 379 uses_generaldelta,
380 380 is_inline: false,
381 381 head_revs: vec![],
382 382 filtered_revs: HashSet::new(),
383 383 })
384 384 }
385 385 }
386 386
387 387 pub fn uses_generaldelta(&self) -> bool {
388 388 self.uses_generaldelta
389 389 }
390 390
391 391 /// Value of the inline flag.
392 392 pub fn is_inline(&self) -> bool {
393 393 self.is_inline
394 394 }
395 395
396 396 /// Return a slice of bytes if `revlog` is inline. Panic if not.
397 397 pub fn data(&self, start: usize, end: usize) -> &[u8] {
398 398 if !self.is_inline() {
399 399 panic!("tried to access data in the index of a revlog that is not inline");
400 400 }
401 401 &self.bytes[start..end]
402 402 }
403 403
404 404 /// Return number of entries of the revlog index.
405 405 pub fn len(&self) -> usize {
406 406 if let Some(offsets) = &*self.get_offsets() {
407 407 offsets.len()
408 408 } else {
409 409 self.bytes.len() / INDEX_ENTRY_SIZE
410 410 }
411 411 }
412 412
413 413 pub fn get_offsets(&self) -> RwLockReadGuard<Option<Vec<usize>>> {
414 414 if self.is_inline() {
415 415 {
416 416 // Wrap in a block to drop the read guard
417 417 // TODO perf?
418 418 let mut offsets = self.offsets.write().unwrap();
419 419 if offsets.is_none() {
420 420 offsets.replace(inline_scan(&self.bytes.bytes).1);
421 421 }
422 422 }
423 423 }
424 424 self.offsets.read().unwrap()
425 425 }
426 426
427 427 pub fn get_offsets_mut(&mut self) -> RwLockWriteGuard<Option<Vec<usize>>> {
428 428 let mut offsets = self.offsets.write().unwrap();
429 429 if self.is_inline() && offsets.is_none() {
430 430 offsets.replace(inline_scan(&self.bytes.bytes).1);
431 431 }
432 432 offsets
433 433 }
434 434
435 435 /// Returns `true` if the `Index` has zero `entries`.
436 436 pub fn is_empty(&self) -> bool {
437 437 self.len() == 0
438 438 }
439 439
440 440 /// Return the index entry corresponding to the given revision or `None`
441 441 /// for [`NULL_REVISION`]
442 442 ///
443 443 /// The specified revision being of the checked type, it always exists
444 444 /// if it was validated by this index.
445 445 pub fn get_entry(&self, rev: Revision) -> Option<IndexEntry> {
446 446 if rev == NULL_REVISION {
447 447 return None;
448 448 }
449 449 Some(if let Some(offsets) = &*self.get_offsets() {
450 450 self.get_entry_inline(rev, offsets.as_ref())
451 451 } else {
452 452 self.get_entry_separated(rev)
453 453 })
454 454 }
455 455
456 456 /// Return the binary content of the index entry for the given revision
457 457 ///
458 458 /// See [get_entry()](`Self::get_entry()`) for cases when `None` is
459 459 /// returned.
460 460 pub fn entry_binary(&self, rev: Revision) -> Option<&[u8]> {
461 461 self.get_entry(rev).map(|e| {
462 462 let bytes = e.as_bytes();
463 463 if rev.0 == 0 {
464 464 &bytes[4..]
465 465 } else {
466 466 bytes
467 467 }
468 468 })
469 469 }
470 470
471 471 pub fn entry_as_params(
472 472 &self,
473 473 rev: UncheckedRevision,
474 474 ) -> Option<RevisionDataParams> {
475 475 let rev = self.check_revision(rev)?;
476 476 self.get_entry(rev).map(|e| RevisionDataParams {
477 477 flags: e.flags(),
478 478 data_offset: if rev.0 == 0 && !self.bytes.is_new() {
479 479 e.flags() as u64
480 480 } else {
481 481 e.raw_offset()
482 482 },
483 483 data_compressed_length: e.compressed_len().try_into().unwrap(),
484 484 data_uncompressed_length: e.uncompressed_len(),
485 485 data_delta_base: e.base_revision_or_base_of_delta_chain().0,
486 486 link_rev: e.link_revision().0,
487 487 parent_rev_1: e.p1().0,
488 488 parent_rev_2: e.p2().0,
489 489 node_id: e.hash().as_bytes().try_into().unwrap(),
490 490 ..Default::default()
491 491 })
492 492 }
493 493
494 494 fn get_entry_inline(
495 495 &self,
496 496 rev: Revision,
497 497 offsets: &[usize],
498 498 ) -> IndexEntry {
499 499 let start = offsets[rev.0 as usize];
500 500 let end = start + INDEX_ENTRY_SIZE;
501 501 let bytes = &self.bytes[start..end];
502 502
503 503 // See IndexEntry for an explanation of this override.
504 504 let offset_override = Some(end);
505 505
506 506 IndexEntry {
507 507 bytes,
508 508 offset_override,
509 509 }
510 510 }
511 511
512 512 fn get_entry_separated(&self, rev: Revision) -> IndexEntry {
513 513 let start = rev.0 as usize * INDEX_ENTRY_SIZE;
514 514 let end = start + INDEX_ENTRY_SIZE;
515 515 let bytes = &self.bytes[start..end];
516 516
517 517 // Override the offset of the first revision as its bytes are used
518 518 // for the index's metadata (saving space because it is always 0)
519 519 let offset_override = if rev == Revision(0) { Some(0) } else { None };
520 520
521 521 IndexEntry {
522 522 bytes,
523 523 offset_override,
524 524 }
525 525 }
526 526
527 527 fn null_entry(&self) -> IndexEntry {
528 528 IndexEntry {
529 529 bytes: &[0; INDEX_ENTRY_SIZE],
530 530 offset_override: Some(0),
531 531 }
532 532 }
533 533
534 534 /// Return the head revisions of this index
535 535 pub fn head_revs(&mut self) -> Result<Vec<Revision>, GraphError> {
536 536 self.head_revs_filtered(&HashSet::new())
537 537 }
538 538
539 539 /// Return the head revisions of this index
540 540 pub fn head_revs_filtered(
541 541 &mut self,
542 542 filtered_revs: &HashSet<Revision>,
543 543 ) -> Result<Vec<Revision>, GraphError> {
544 544 if !self.head_revs.is_empty() && filtered_revs == &self.filtered_revs {
545 545 return Ok(self.head_revs.to_owned());
546 546 }
547 547 let mut revs: HashSet<Revision, RandomState> =
548 548 if filtered_revs.is_empty() {
549 549 (0..self.len())
550 550 .into_iter()
551 551 .map(|i| Revision(i as BaseRevision))
552 552 .collect()
553 553 } else {
554 554 (0..self.len())
555 555 .into_iter()
556 556 .filter_map(|i| {
557 557 let r = Revision(i as BaseRevision);
558 558 if filtered_revs.contains(&r) {
559 559 None
560 560 } else {
561 561 Some(r)
562 562 }
563 563 })
564 564 .collect()
565 565 };
566 566 dagops::retain_heads(self, &mut revs)?;
567 567 if self.is_empty() {
568 568 revs.insert(NULL_REVISION);
569 569 }
570 570 let mut as_vec: Vec<Revision> =
571 571 revs.into_iter().map(Into::into).collect();
572 572 as_vec.sort_unstable();
573 573 self.head_revs = as_vec.to_owned();
574 574 self.filtered_revs = filtered_revs.to_owned();
575 575 Ok(as_vec)
576 576 }
577 577
578 578 /// Obtain the delta chain for a revision.
579 579 ///
580 580 /// `stop_rev` specifies a revision to stop at. If not specified, we
581 581 /// stop at the base of the chain.
582 582 ///
583 583 /// Returns a 2-tuple of (chain, stopped) where `chain` is a vec of
584 584 /// revs in ascending order and `stopped` is a bool indicating whether
585 585 /// `stoprev` was hit.
586 586 pub fn delta_chain(
587 587 &self,
588 588 rev: Revision,
589 589 stop_rev: Option<Revision>,
590 590 ) -> Result<(Vec<Revision>, bool), HgError> {
591 591 let mut current_rev = rev;
592 592 let mut entry = self.get_entry(rev).unwrap();
593 593 let mut chain = vec![];
594 594 while current_rev.0 != entry.base_revision_or_base_of_delta_chain().0
595 595 && stop_rev.map(|r| r != current_rev).unwrap_or(true)
596 596 {
597 597 chain.push(current_rev);
598 598 let new_rev = if self.uses_generaldelta() {
599 599 entry.base_revision_or_base_of_delta_chain()
600 600 } else {
601 601 UncheckedRevision(current_rev.0 - 1)
602 602 };
603 603 current_rev = self.check_revision(new_rev).ok_or_else(|| {
604 604 HgError::corrupted(format!("Revision {new_rev} out of range"))
605 605 })?;
606 606 if current_rev.0 == NULL_REVISION.0 {
607 607 break;
608 608 }
609 609 entry = self.get_entry(current_rev).unwrap()
610 610 }
611 611
612 612 let stopped = if stop_rev.map(|r| current_rev == r).unwrap_or(false) {
613 613 true
614 614 } else {
615 615 chain.push(current_rev);
616 616 false
617 617 };
618 618 chain.reverse();
619 619 Ok((chain, stopped))
620 620 }
621 621
622 622 pub fn find_snapshots(
623 623 &self,
624 624 start_rev: UncheckedRevision,
625 625 end_rev: UncheckedRevision,
626 626 cache: &mut impl SnapshotsCache,
627 627 ) -> Result<(), RevlogError> {
628 628 let mut start_rev = start_rev.0;
629 629 let mut end_rev = end_rev.0;
630 630 end_rev += 1;
631 631 let len = self.len().try_into().unwrap();
632 632 if end_rev > len {
633 633 end_rev = len;
634 634 }
635 635 if start_rev < 0 {
636 636 start_rev = 0;
637 637 }
638 638 for rev in start_rev..end_rev {
639 639 if !self.is_snapshot_unchecked(Revision(rev))? {
640 640 continue;
641 641 }
642 642 let mut base = self
643 643 .get_entry(Revision(rev))
644 644 .unwrap()
645 645 .base_revision_or_base_of_delta_chain();
646 646 if base.0 == rev {
647 647 base = NULL_REVISION.into();
648 648 }
649 649 cache.insert_for(base.0, rev)?;
650 650 }
651 651 Ok(())
652 652 }
653 653
654 654 /// TODO move this to the trait probably, along with other things
655 655 pub fn append(
656 656 &mut self,
657 657 revision_data: RevisionDataParams,
658 658 ) -> Result<(), RevlogError> {
659 659 revision_data.validate()?;
660 660 let new_offset = self.bytes.len();
661 661 if let Some(offsets) = &mut *self.get_offsets_mut() {
662 662 offsets.push(new_offset)
663 663 }
664 664 self.bytes.added.extend(revision_data.into_v1().as_bytes());
665 665 self.head_revs.clear();
666 666 Ok(())
667 667 }
668 668
669 669 pub fn pack_header(&self, header: i32) -> [u8; 4] {
670 670 header.to_be_bytes()
671 671 }
672 672
673 673 pub fn remove(&mut self, rev: Revision) -> Result<(), RevlogError> {
674 674 let offsets = self.get_offsets().clone();
675 675 self.bytes.remove(rev, offsets.as_deref())?;
676 676 if let Some(offsets) = &mut *self.get_offsets_mut() {
677 677 offsets.truncate(rev.0 as usize)
678 678 }
679 679 self.head_revs.clear();
680 680 Ok(())
681 681 }
682 682
683 683 pub fn clear_caches(&mut self) {
684 684 // We need to get the 'inline' value from Python at init and use this
685 685 // instead of offsets to determine whether we're inline since we might
686 686 // clear caches. This implies re-populating the offsets on-demand.
687 687 self.offsets = RwLock::new(None);
688 688 self.head_revs.clear();
689 689 }
690 690
691 691 /// Unchecked version of `is_snapshot`.
692 692 /// Assumes the caller checked that `rev` is within a valid revision range.
693 693 pub fn is_snapshot_unchecked(
694 694 &self,
695 695 mut rev: Revision,
696 696 ) -> Result<bool, RevlogError> {
697 697 while rev.0 >= 0 {
698 698 let entry = self.get_entry(rev).unwrap();
699 699 let mut base = entry.base_revision_or_base_of_delta_chain().0;
700 700 if base == rev.0 {
701 701 base = NULL_REVISION.0;
702 702 }
703 703 if base == NULL_REVISION.0 {
704 704 return Ok(true);
705 705 }
706 706 let [mut p1, mut p2] = self
707 707 .parents(rev)
708 708 .map_err(|_| RevlogError::InvalidRevision)?;
709 709 while let Some(p1_entry) = self.get_entry(p1) {
710 710 if p1_entry.compressed_len() != 0 || p1.0 == 0 {
711 711 break;
712 712 }
713 713 let parent_base =
714 714 p1_entry.base_revision_or_base_of_delta_chain();
715 715 if parent_base.0 == p1.0 {
716 716 break;
717 717 }
718 718 p1 = self
719 719 .check_revision(parent_base)
720 720 .ok_or(RevlogError::InvalidRevision)?;
721 721 }
722 722 while let Some(p2_entry) = self.get_entry(p2) {
723 723 if p2_entry.compressed_len() != 0 || p2.0 == 0 {
724 724 break;
725 725 }
726 726 let parent_base =
727 727 p2_entry.base_revision_or_base_of_delta_chain();
728 728 if parent_base.0 == p2.0 {
729 729 break;
730 730 }
731 731 p2 = self
732 732 .check_revision(parent_base)
733 733 .ok_or(RevlogError::InvalidRevision)?;
734 734 }
735 735 if base == p1.0 || base == p2.0 {
736 736 return Ok(false);
737 737 }
738 738 rev = self
739 739 .check_revision(base.into())
740 740 .ok_or(RevlogError::InvalidRevision)?;
741 741 }
742 742 Ok(rev == NULL_REVISION)
743 743 }
744 744
745 745 /// Return whether the given revision is a snapshot. Returns an error if
746 746 /// `rev` is not within a valid revision range.
747 747 pub fn is_snapshot(
748 748 &self,
749 749 rev: UncheckedRevision,
750 750 ) -> Result<bool, RevlogError> {
751 751 let rev = self
752 752 .check_revision(rev)
753 753 .ok_or_else(|| RevlogError::corrupted("test"))?;
754 754 self.is_snapshot_unchecked(rev)
755 755 }
756 756
757 757 /// Slice revs to reduce the amount of unrelated data to be read from disk.
758 758 ///
759 759 /// The index is sliced into groups that should be read in one time.
760 760 ///
761 761 /// The initial chunk is sliced until the overall density
762 762 /// (payload/chunks-span ratio) is above `target_density`.
763 763 /// No gap smaller than `min_gap_size` is skipped.
764 764 pub fn slice_chunk_to_density(
765 765 &self,
766 766 revs: &[Revision],
767 767 target_density: f64,
768 768 min_gap_size: usize,
769 769 ) -> Vec<Vec<Revision>> {
770 770 if revs.is_empty() {
771 771 return vec![];
772 772 }
773 773 if revs.len() == 1 {
774 774 return vec![revs.to_owned()];
775 775 }
776 776 let delta_chain_span = self.segment_span(revs);
777 777 if delta_chain_span < min_gap_size {
778 778 return vec![revs.to_owned()];
779 779 }
780 780 let entries: Vec<_> = revs
781 781 .iter()
782 782 .map(|r| {
783 783 (*r, self.get_entry(*r).unwrap_or_else(|| self.null_entry()))
784 784 })
785 785 .collect();
786 786
787 787 let mut read_data = delta_chain_span;
788 788 let chain_payload: u32 =
789 789 entries.iter().map(|(_r, e)| e.compressed_len()).sum();
790 790 let mut density = if delta_chain_span > 0 {
791 791 chain_payload as f64 / delta_chain_span as f64
792 792 } else {
793 793 1.0
794 794 };
795 795
796 796 if density >= target_density {
797 797 return vec![revs.to_owned()];
798 798 }
799 799
800 800 // Store the gaps in a heap to have them sorted by decreasing size
801 801 let mut gaps = Vec::new();
802 802 let mut previous_end = None;
803 803
804 804 for (i, (_rev, entry)) in entries.iter().enumerate() {
805 805 let start = entry.c_start() as usize;
806 806 let length = entry.compressed_len();
807 807
808 808 // Skip empty revisions to form larger holes
809 809 if length == 0 {
810 810 continue;
811 811 }
812 812
813 813 if let Some(end) = previous_end {
814 814 let gap_size = start - end;
815 815 // Only consider holes that are large enough
816 816 if gap_size > min_gap_size {
817 817 gaps.push((gap_size, i));
818 818 }
819 819 }
820 820 previous_end = Some(start + length as usize);
821 821 }
822 822 if gaps.is_empty() {
823 823 return vec![revs.to_owned()];
824 824 }
825 825 // sort the gaps to pop them from largest to small
826 826 gaps.sort_unstable();
827 827
828 828 // Collect the indices of the largest holes until
829 829 // the density is acceptable
830 830 let mut selected = vec![];
831 831 while let Some((gap_size, gap_id)) = gaps.pop() {
832 832 if density >= target_density {
833 833 break;
834 834 }
835 835 selected.push(gap_id);
836 836
837 837 // The gap sizes are stored as negatives to be sorted decreasingly
838 838 // by the heap
839 839 read_data -= gap_size;
840 840 density = if read_data > 0 {
841 841 chain_payload as f64 / read_data as f64
842 842 } else {
843 843 1.0
844 844 };
845 845 if density >= target_density {
846 846 break;
847 847 }
848 848 }
849 849 selected.sort_unstable();
850 850 selected.push(revs.len());
851 851
852 852 // Cut the revs at collected indices
853 853 let mut previous_idx = 0;
854 854 let mut chunks = vec![];
855 855 for idx in selected {
856 856 let chunk = self.trim_chunk(&entries, previous_idx, idx);
857 857 if !chunk.is_empty() {
858 858 chunks.push(chunk.iter().map(|(rev, _entry)| *rev).collect());
859 859 }
860 860 previous_idx = idx;
861 861 }
862 862 let chunk = self.trim_chunk(&entries, previous_idx, entries.len());
863 863 if !chunk.is_empty() {
864 864 chunks.push(chunk.iter().map(|(rev, _entry)| *rev).collect());
865 865 }
866 866
867 867 chunks
868 868 }
869 869
870 870 /// Get the byte span of a segment of sorted revisions.
871 871 ///
872 872 /// Occurrences of [`NULL_REVISION`] are ignored at the beginning of
873 873 /// the `revs` segment.
874 874 ///
875 875 /// panics:
876 876 /// - if `revs` is empty or only made of `NULL_REVISION`
877 877 /// - if cannot retrieve entry for the last or first not null element of
878 878 /// `revs`.
879 879 fn segment_span(&self, revs: &[Revision]) -> usize {
880 880 if revs.is_empty() {
881 881 return 0;
882 882 }
883 883 let last_entry = &self.get_entry(revs[revs.len() - 1]).unwrap();
884 884 let end = last_entry.c_start() + last_entry.compressed_len() as u64;
885 885 let first_rev = revs.iter().find(|r| r.0 != NULL_REVISION.0).unwrap();
886 886 let start = if (*first_rev).0 == 0 {
887 887 0
888 888 } else {
889 889 self.get_entry(*first_rev).unwrap().c_start()
890 890 };
891 891 (end - start) as usize
892 892 }
893 893
894 894 /// Returns `&revs[startidx..endidx]` without empty trailing revs
895 895 fn trim_chunk<'a>(
896 896 &'a self,
897 897 revs: &'a [(Revision, IndexEntry)],
898 898 start: usize,
899 899 mut end: usize,
900 900 ) -> &'a [(Revision, IndexEntry)] {
901 901 // Trim empty revs at the end, except the very first rev of a chain
902 902 let last_rev = revs[end - 1].0;
903 903 if last_rev.0 < self.len() as BaseRevision {
904 904 while end > 1
905 905 && end > start
906 906 && revs[end - 1].1.compressed_len() == 0
907 907 {
908 908 end -= 1
909 909 }
910 910 }
911 911 &revs[start..end]
912 912 }
913 913
914 914 /// Computes the set of revisions for each non-public phase from `roots`,
915 915 /// which are the last known roots for each non-public phase.
916 916 pub fn compute_phases_map_sets(
917 917 &self,
918 918 roots: HashMap<Phase, Vec<Revision>>,
919 919 ) -> Result<(usize, RootsPerPhase), GraphError> {
920 920 let mut phases = HashMap::new();
921 921 let mut min_phase_rev = NULL_REVISION;
922 922
923 923 for phase in Phase::non_public_phases() {
924 924 if let Some(phase_roots) = roots.get(phase) {
925 925 let min_rev =
926 926 self.add_roots_get_min(phase_roots, &mut phases, *phase);
927 927 if min_rev != NULL_REVISION
928 928 && (min_phase_rev == NULL_REVISION
929 929 || min_rev < min_phase_rev)
930 930 {
931 931 min_phase_rev = min_rev;
932 932 }
933 933 } else {
934 934 continue;
935 935 };
936 936 }
937 937 let mut phase_sets: RootsPerPhase = Default::default();
938 938
939 939 if min_phase_rev == NULL_REVISION {
940 940 min_phase_rev = Revision(self.len() as BaseRevision);
941 941 }
942 942
943 943 for rev in min_phase_rev.0..self.len() as BaseRevision {
944 944 let rev = Revision(rev);
945 945 let [p1, p2] = self.parents(rev)?;
946 946
947 947 const DEFAULT_PHASE: &Phase = &Phase::Public;
948 948 if p1.0 >= 0
949 949 && phases.get(&p1).unwrap_or(DEFAULT_PHASE)
950 950 > phases.get(&rev).unwrap_or(DEFAULT_PHASE)
951 951 {
952 952 phases.insert(rev, phases[&p1]);
953 953 }
954 954 if p2.0 >= 0
955 955 && phases.get(&p2).unwrap_or(DEFAULT_PHASE)
956 956 > phases.get(&rev).unwrap_or(DEFAULT_PHASE)
957 957 {
958 958 phases.insert(rev, phases[&p2]);
959 959 }
960 960 let set = match phases.get(&rev).unwrap_or(DEFAULT_PHASE) {
961 961 Phase::Public => continue,
962 962 phase => &mut phase_sets[*phase as usize - 1],
963 963 };
964 964 set.insert(rev);
965 965 }
966 966
967 967 Ok((self.len(), phase_sets))
968 968 }
969 969
970 970 fn add_roots_get_min(
971 971 &self,
972 972 phase_roots: &[Revision],
973 973 phases: &mut HashMap<Revision, Phase>,
974 974 phase: Phase,
975 975 ) -> Revision {
976 976 let mut min_rev = NULL_REVISION;
977 977
978 978 for root in phase_roots {
979 979 phases.insert(*root, phase);
980 980 if min_rev == NULL_REVISION || min_rev > *root {
981 981 min_rev = *root;
982 982 }
983 983 }
984 984 min_rev
985 985 }
986 986
987 987 /// Return `(heads(::(<roots> and <roots>::<heads>)))`
988 988 /// If `include_path` is `true`, return `(<roots>::<heads>)`."""
989 989 ///
990 990 /// `min_root` and `roots` are unchecked since they are just used as
991 991 /// a bound or for comparison and don't need to represent a valid revision.
992 992 /// In practice, the only invalid revision passed is the working directory
993 993 /// revision ([`i32::MAX`]).
994 994 pub fn reachable_roots(
995 995 &self,
996 996 min_root: UncheckedRevision,
997 997 mut heads: Vec<Revision>,
998 998 roots: HashSet<UncheckedRevision>,
999 999 include_path: bool,
1000 1000 ) -> Result<HashSet<Revision>, GraphError> {
1001 1001 if roots.is_empty() {
1002 1002 return Ok(HashSet::new());
1003 1003 }
1004 1004 let mut reachable = HashSet::new();
1005 1005 let mut seen = HashMap::new();
1006 1006
1007 1007 while let Some(rev) = heads.pop() {
1008 1008 if roots.contains(&rev.into()) {
1009 1009 reachable.insert(rev);
1010 1010 if !include_path {
1011 1011 continue;
1012 1012 }
1013 1013 }
1014 1014 let parents = self.parents(rev)?;
1015 1015 seen.insert(rev, parents);
1016 1016 for parent in parents {
1017 1017 if parent.0 >= min_root.0 && !seen.contains_key(&parent) {
1018 1018 heads.push(parent);
1019 1019 }
1020 1020 }
1021 1021 }
1022 1022 if !include_path {
1023 1023 return Ok(reachable);
1024 1024 }
1025 1025 let mut revs: Vec<_> = seen.keys().collect();
1026 1026 revs.sort_unstable();
1027 1027 for rev in revs {
1028 1028 for parent in seen[rev] {
1029 1029 if reachable.contains(&parent) {
1030 1030 reachable.insert(*rev);
1031 1031 }
1032 1032 }
1033 1033 }
1034 1034 Ok(reachable)
1035 1035 }
1036 1036
1037 1037 /// Given a (possibly overlapping) set of revs, return all the
1038 1038 /// common ancestors heads: `heads(::args[0] and ::a[1] and ...)`
1039 1039 pub fn common_ancestor_heads(
1040 1040 &self,
1041 1041 revisions: &[Revision],
1042 1042 ) -> Result<Vec<Revision>, GraphError> {
1043 1043 // given that revisions is expected to be small, we find this shortcut
1044 1044 // potentially acceptable, especially given that `hg-cpython` could
1045 1045 // very much bypass this, constructing a vector of unique values from
1046 1046 // the onset.
1047 1047 let as_set: HashSet<Revision> = revisions.iter().copied().collect();
1048 1048 // Besides deduplicating, the C version also implements the shortcut
1049 1049 // for `NULL_REVISION`:
1050 1050 if as_set.contains(&NULL_REVISION) {
1051 1051 return Ok(vec![]);
1052 1052 }
1053 1053
1054 1054 let revisions: Vec<Revision> = as_set.into_iter().collect();
1055 1055
1056 1056 if revisions.len() < 8 {
1057 1057 self.find_gca_candidates::<u8>(&revisions)
1058 1058 } else if revisions.len() < 64 {
1059 1059 self.find_gca_candidates::<u64>(&revisions)
1060 1060 } else {
1061 1061 self.find_gca_candidates::<NonStaticPoisonableBitSet>(&revisions)
1062 1062 }
1063 1063 }
1064 1064
1065 1065 pub fn ancestors(
1066 1066 &self,
1067 1067 revisions: &[Revision],
1068 1068 ) -> Result<Vec<Revision>, GraphError> {
1069 1069 self.find_deepest_revs(&self.common_ancestor_heads(revisions)?)
1070 1070 }
1071 1071
1072 1072 /// Given a disjoint set of revs, return all candidates for the
1073 1073 /// greatest common ancestor. In revset notation, this is the set
1074 1074 /// `heads(::a and ::b and ...)`
1075 1075 fn find_gca_candidates<BS: PoisonableBitSet + Clone>(
1076 1076 &self,
1077 1077 revs: &[Revision],
1078 1078 ) -> Result<Vec<Revision>, GraphError> {
1079 1079 if revs.is_empty() {
1080 1080 return Ok(vec![]);
1081 1081 }
1082 1082 let revcount = revs.len();
1083 1083 let mut candidates = vec![];
1084 1084 let max_rev = revs.iter().max().unwrap();
1085 1085
1086 1086 let mut seen = BS::vec_of_empty(revs.len(), (max_rev.0 + 1) as usize);
1087 1087
1088 1088 for (idx, rev) in revs.iter().enumerate() {
1089 1089 seen[rev.0 as usize].add(idx);
1090 1090 }
1091 1091 let mut current_rev = *max_rev;
1092 1092 // Number of revisions whose inspection in the main loop
1093 1093 // will give a result or trigger inspection of other revisions
1094 1094 let mut interesting = revcount;
1095 1095
1096 1096 // The algorithm works on a vector of bit sets, indexed by revision
1097 1097 // numbers and iterated on reverse order.
1098 1098 // An entry in this vector is poisoned if and only if the corresponding
1099 1099 // revision is a common, yet not maximal ancestor.
1100 1100
1101 1101 // The principle of the algorithm is as follows:
1102 1102 // For a revision `r`, when entering the loop, `seen[r]` is either
1103 1103 // poisoned or the sub set of `revs` of which `r` is an ancestor.
1104 1104 // In this sub set is full, then `r` is a solution and its parents
1105 1105 // have to be poisoned.
1106 1106 //
1107 1107 // At each iteration, the bit sets of the parents are updated by
1108 1108 // union with `seen[r]`.
1109 1109 // As we walk the index from the end, we are sure we have encountered
1110 1110 // all children of `r` before `r`, hence we know that `seen[r]` is
1111 1111 // fully computed.
1112 1112 //
1113 1113 // On top of that there are several optimizations that make reading
1114 1114 // less obvious than the comment above:
1115 1115 // - The `interesting` counter allows to break early
1116 1116 // - The loop starts from `max(revs)`
1117 1117 // - Early return in case it is detected that one of the incoming revs
1118 1118 // is a common ancestor of all of them.
1119 1119 while current_rev.0 >= 0 && interesting > 0 {
1120 1120 let current_seen = seen[current_rev.0 as usize].clone();
1121 1121
1122 1122 if current_seen.is_empty() {
1123 1123 current_rev = Revision(current_rev.0 - 1);
1124 1124 continue;
1125 1125 }
1126 1126 let mut poison = current_seen.is_poisoned();
1127 1127 if !poison {
1128 1128 interesting -= 1;
1129 1129 if current_seen.is_full_range(revcount) {
1130 1130 candidates.push(current_rev);
1131 1131 poison = true;
1132 1132
1133 1133 // Being a common ancestor, if `current_rev` is among
1134 1134 // the input revisions, it is *the* answer.
1135 1135 for rev in revs {
1136 1136 if *rev == current_rev {
1137 1137 return Ok(candidates);
1138 1138 }
1139 1139 }
1140 1140 }
1141 1141 }
1142 1142 for parent in self.parents(current_rev)? {
1143 1143 if parent == NULL_REVISION {
1144 1144 continue;
1145 1145 }
1146 1146 let parent_seen = &mut seen[parent.0 as usize];
1147 1147 if poison {
1148 1148 // this block is logically equivalent to poisoning parent
1149 1149 // and counting it as non interesting if it
1150 1150 // has been seen before (hence counted then as interesting)
1151 1151 if !parent_seen.is_empty() && !parent_seen.is_poisoned() {
1152 1152 interesting -= 1;
1153 1153 }
1154 1154 parent_seen.poison();
1155 1155 } else {
1156 1156 if parent_seen.is_empty() {
1157 1157 interesting += 1;
1158 1158 }
1159 1159 parent_seen.union(&current_seen);
1160 1160 }
1161 1161 }
1162 1162
1163 1163 current_rev = Revision(current_rev.0 - 1);
1164 1164 }
1165 1165
1166 1166 Ok(candidates)
1167 1167 }
1168 1168
1169 1169 /// Given a disjoint set of revs, return the subset with the longest path
1170 1170 /// to the root.
1171 1171 fn find_deepest_revs(
1172 1172 &self,
1173 1173 revs: &[Revision],
1174 1174 ) -> Result<Vec<Revision>, GraphError> {
1175 1175 // TODO replace this all with just comparing rank?
1176 1176 // Also, the original implementations in C/Python are cryptic, not
1177 1177 // even sure we actually need this?
1178 1178 if revs.len() <= 1 {
1179 1179 return Ok(revs.to_owned());
1180 1180 }
1181 1181 let max_rev = revs.iter().max().unwrap().0;
1182 1182 let mut interesting = HashMap::new();
1183 1183 let mut seen = vec![0; max_rev as usize + 1];
1184 1184 let mut depth = vec![0; max_rev as usize + 1];
1185 1185 let mut mapping = vec![];
1186 1186 let mut revs = revs.to_owned();
1187 1187 revs.sort_unstable();
1188 1188
1189 1189 for (idx, rev) in revs.iter().enumerate() {
1190 1190 depth[rev.0 as usize] = 1;
1191 1191 let shift = 1 << idx;
1192 1192 seen[rev.0 as usize] = shift;
1193 1193 interesting.insert(shift, 1);
1194 1194 mapping.push((shift, *rev));
1195 1195 }
1196 1196
1197 1197 let mut current_rev = Revision(max_rev);
1198 1198 while current_rev.0 >= 0 && interesting.len() > 1 {
1199 1199 let current_depth = depth[current_rev.0 as usize];
1200 1200 if current_depth == 0 {
1201 1201 current_rev = Revision(current_rev.0 - 1);
1202 1202 continue;
1203 1203 }
1204 1204
1205 1205 let current_seen = seen[current_rev.0 as usize];
1206 1206 for parent in self.parents(current_rev)? {
1207 1207 if parent == NULL_REVISION {
1208 1208 continue;
1209 1209 }
1210 1210 let parent_seen = seen[parent.0 as usize];
1211 1211 let parent_depth = depth[parent.0 as usize];
1212 1212 if parent_depth <= current_depth {
1213 1213 depth[parent.0 as usize] = current_depth + 1;
1214 1214 if parent_seen != current_seen {
1215 1215 *interesting.get_mut(&current_seen).unwrap() += 1;
1216 1216 seen[parent.0 as usize] = current_seen;
1217 1217 if parent_seen != 0 {
1218 1218 let parent_interesting =
1219 1219 interesting.get_mut(&parent_seen).unwrap();
1220 1220 *parent_interesting -= 1;
1221 1221 if *parent_interesting == 0 {
1222 1222 interesting.remove(&parent_seen);
1223 1223 }
1224 1224 }
1225 1225 }
1226 1226 } else if current_depth == parent_depth - 1 {
1227 1227 let either_seen = parent_seen | current_seen;
1228 1228 if either_seen == parent_seen {
1229 1229 continue;
1230 1230 }
1231 1231 seen[parent.0 as usize] = either_seen;
1232 1232 interesting
1233 1233 .entry(either_seen)
1234 1234 .and_modify(|v| *v += 1)
1235 1235 .or_insert(1);
1236 1236 *interesting.get_mut(&parent_seen).unwrap() -= 1;
1237 1237 if interesting[&parent_seen] == 0 {
1238 1238 interesting.remove(&parent_seen);
1239 1239 }
1240 1240 }
1241 1241 }
1242 1242 *interesting.get_mut(&current_seen).unwrap() -= 1;
1243 1243 if interesting[&current_seen] == 0 {
1244 1244 interesting.remove(&current_seen);
1245 1245 }
1246 1246
1247 1247 current_rev = Revision(current_rev.0 - 1);
1248 1248 }
1249 1249
1250 1250 if interesting.len() != 1 {
1251 1251 return Ok(vec![]);
1252 1252 }
1253 1253 let mask = interesting.keys().next().unwrap();
1254 1254
1255 1255 Ok(mapping
1256 1256 .into_iter()
1257 1257 .filter_map(|(shift, rev)| {
1258 1258 if (mask & shift) != 0 {
1259 1259 return Some(rev);
1260 1260 }
1261 1261 None
1262 1262 })
1263 1263 .collect())
1264 1264 }
1265 1265 }
1266 1266
1267 1267 /// The kind of functionality needed by find_gca_candidates
1268 1268 ///
1269 1269 /// This is a bit mask which can be declared to be "poisoned", which callers
1270 1270 /// interpret to break out of some loops.
1271 1271 ///
1272 1272 /// The maximum capacity of the bit mask is up to the actual implementation
1273 1273 trait PoisonableBitSet: Sized + PartialEq {
1274 1274 /// Return a vector of exactly n elements, initialized to be empty.
1275 1275 ///
1276 1276 /// Optimization can vastly depend on implementation. Those being `Copy`
1277 1277 /// and having constant capacity typically can have a very simple
1278 1278 /// implementation.
1279 1279 fn vec_of_empty(sets_size: usize, vec_len: usize) -> Vec<Self>;
1280 1280
1281 1281 /// The size of the bit mask in memory
1282 1282 fn size(&self) -> usize;
1283 1283
1284 1284 /// The number of elements that can be represented in the set.
1285 1285 ///
1286 1286 /// Another way to put it is that it is the highest integer `C` such that
1287 1287 /// the set is guaranteed to always be a subset of the integer range
1288 1288 /// `[0, C)`
1289 1289 fn capacity(&self) -> usize;
1290 1290
1291 1291 /// Declare `n` to belong to the set
1292 1292 fn add(&mut self, n: usize);
1293 1293
1294 1294 /// Declare `n` not to belong to the set
1295 1295 fn discard(&mut self, n: usize);
1296 1296
1297 1297 /// Replace this bit set by its union with other
1298 1298 fn union(&mut self, other: &Self);
1299 1299
1300 1300 /// Poison the bit set
1301 1301 ///
1302 1302 /// Interpretation up to the caller
1303 1303 fn poison(&mut self);
1304 1304
1305 1305 /// Is the bit set poisoned?
1306 1306 ///
1307 1307 /// Interpretation is up to the caller
1308 1308 fn is_poisoned(&self) -> bool;
1309 1309
1310 1310 /// Is the bit set empty?
1311 1311 fn is_empty(&self) -> bool;
1312 1312
1313 1313 /// return `true` if and only if the bit is the full range `[0, n)`
1314 1314 /// of integers
1315 1315 fn is_full_range(&self, n: usize) -> bool;
1316 1316 }
1317 1317
1318 1318 const U64_POISON: u64 = 1 << 63;
1319 1319 const U8_POISON: u8 = 1 << 7;
1320 1320
1321 1321 impl PoisonableBitSet for u64 {
1322 1322 fn vec_of_empty(_sets_size: usize, vec_len: usize) -> Vec<Self> {
1323 1323 vec![0u64; vec_len]
1324 1324 }
1325 1325
1326 1326 fn size(&self) -> usize {
1327 1327 8
1328 1328 }
1329 1329
1330 1330 fn capacity(&self) -> usize {
1331 1331 63
1332 1332 }
1333 1333
1334 1334 fn add(&mut self, n: usize) {
1335 1335 (*self) |= 1u64 << n;
1336 1336 }
1337 1337
1338 1338 fn discard(&mut self, n: usize) {
1339 1339 (*self) &= u64::MAX - (1u64 << n);
1340 1340 }
1341 1341
1342 1342 fn union(&mut self, other: &Self) {
1343 1343 if *self != *other {
1344 1344 (*self) |= *other;
1345 1345 }
1346 1346 }
1347 1347
1348 1348 fn is_full_range(&self, n: usize) -> bool {
1349 1349 *self + 1 == (1u64 << n)
1350 1350 }
1351 1351
1352 1352 fn is_empty(&self) -> bool {
1353 1353 *self == 0
1354 1354 }
1355 1355
1356 1356 fn poison(&mut self) {
1357 1357 *self = U64_POISON;
1358 1358 }
1359 1359
1360 1360 fn is_poisoned(&self) -> bool {
1361 1361 // equality comparison would be tempting but would not resist
1362 1362 // operations after poisoning (even if these should be bogus).
1363 1363 *self >= U64_POISON
1364 1364 }
1365 1365 }
1366 1366
1367 1367 impl PoisonableBitSet for u8 {
1368 1368 fn vec_of_empty(_sets_size: usize, vec_len: usize) -> Vec<Self> {
1369 1369 vec![0; vec_len]
1370 1370 }
1371 1371
1372 1372 fn size(&self) -> usize {
1373 1373 1
1374 1374 }
1375 1375
1376 1376 fn capacity(&self) -> usize {
1377 1377 7
1378 1378 }
1379 1379
1380 1380 fn add(&mut self, n: usize) {
1381 1381 (*self) |= 1 << n;
1382 1382 }
1383 1383
1384 1384 fn discard(&mut self, n: usize) {
1385 1385 (*self) &= u8::MAX - (1 << n);
1386 1386 }
1387 1387
1388 1388 fn union(&mut self, other: &Self) {
1389 1389 if *self != *other {
1390 1390 (*self) |= *other;
1391 1391 }
1392 1392 }
1393 1393
1394 1394 fn is_full_range(&self, n: usize) -> bool {
1395 1395 *self + 1 == (1 << n)
1396 1396 }
1397 1397
1398 1398 fn is_empty(&self) -> bool {
1399 1399 *self == 0
1400 1400 }
1401 1401
1402 1402 fn poison(&mut self) {
1403 1403 *self = U8_POISON;
1404 1404 }
1405 1405
1406 1406 fn is_poisoned(&self) -> bool {
1407 1407 // equality comparison would be tempting but would not resist
1408 1408 // operations after poisoning (even if these should be bogus).
1409 1409 *self >= U8_POISON
1410 1410 }
1411 1411 }
1412 1412
1413 1413 /// A poisonable bit set whose capacity is not known at compile time but
1414 1414 /// is constant after initial construction
1415 1415 ///
1416 1416 /// This can be way further optimized if performance assessments (speed
1417 1417 /// and/or RAM) require it.
1418 1418 /// As far as RAM is concerned, for large vectors of these, the main problem
1419 1419 /// would be the repetition of set_size in each item. We would need a trait
1420 1420 /// to abstract over the idea of a vector of such bit sets to do better.
1421 1421 #[derive(Clone, PartialEq)]
1422 1422 struct NonStaticPoisonableBitSet {
1423 1423 set_size: usize,
1424 1424 bit_set: Vec<u64>,
1425 1425 }
1426 1426
1427 1427 /// Number of `u64` needed for a [`NonStaticPoisonableBitSet`] of given size
1428 1428 fn non_static_poisonable_inner_len(set_size: usize) -> usize {
1429 1429 1 + (set_size + 1) / 64
1430 1430 }
1431 1431
1432 1432 impl NonStaticPoisonableBitSet {
1433 1433 /// The index of the sub-bit set for the given n, and the index inside
1434 1434 /// the latter
1435 1435 fn index(&self, n: usize) -> (usize, usize) {
1436 1436 (n / 64, n % 64)
1437 1437 }
1438 1438 }
1439 1439
1440 1440 /// Mock implementation to ensure that the trait makes sense
1441 1441 impl PoisonableBitSet for NonStaticPoisonableBitSet {
1442 1442 fn vec_of_empty(set_size: usize, vec_len: usize) -> Vec<Self> {
1443 1443 let tmpl = Self {
1444 1444 set_size,
1445 1445 bit_set: vec![0u64; non_static_poisonable_inner_len(set_size)],
1446 1446 };
1447 1447 vec![tmpl; vec_len]
1448 1448 }
1449 1449
1450 1450 fn size(&self) -> usize {
1451 1451 8 + self.bit_set.len() * 8
1452 1452 }
1453 1453
1454 1454 fn capacity(&self) -> usize {
1455 1455 self.set_size
1456 1456 }
1457 1457
1458 1458 fn add(&mut self, n: usize) {
1459 1459 let (sub_bs, bit_pos) = self.index(n);
1460 1460 self.bit_set[sub_bs] |= 1 << bit_pos
1461 1461 }
1462 1462
1463 1463 fn discard(&mut self, n: usize) {
1464 1464 let (sub_bs, bit_pos) = self.index(n);
1465 1465 self.bit_set[sub_bs] |= u64::MAX - (1 << bit_pos)
1466 1466 }
1467 1467
1468 1468 fn union(&mut self, other: &Self) {
1469 1469 assert!(
1470 1470 self.set_size == other.set_size,
1471 1471 "Binary operations on bit sets can only be done on same size"
1472 1472 );
1473 1473 for i in 0..self.bit_set.len() - 1 {
1474 1474 self.bit_set[i] |= other.bit_set[i]
1475 1475 }
1476 1476 }
1477 1477
1478 1478 fn is_full_range(&self, n: usize) -> bool {
1479 1479 let (sub_bs, bit_pos) = self.index(n);
1480 1480 self.bit_set[..sub_bs].iter().all(|bs| *bs == u64::MAX)
1481 1481 && self.bit_set[sub_bs] == (1 << (bit_pos + 1)) - 1
1482 1482 }
1483 1483
1484 1484 fn is_empty(&self) -> bool {
1485 1485 self.bit_set.iter().all(|bs| *bs == 0u64)
1486 1486 }
1487 1487
1488 1488 fn poison(&mut self) {
1489 1489 let (sub_bs, bit_pos) = self.index(self.set_size);
1490 1490 self.bit_set[sub_bs] = 1 << bit_pos;
1491 1491 }
1492 1492
1493 1493 fn is_poisoned(&self) -> bool {
1494 1494 let (sub_bs, bit_pos) = self.index(self.set_size);
1495 1495 self.bit_set[sub_bs] >= 1 << bit_pos
1496 1496 }
1497 1497 }
1498 1498
1499 1499 /// Set of roots of all non-public phases
1500 1500 pub type RootsPerPhase = [HashSet<Revision>; Phase::non_public_phases().len()];
1501 1501
1502 1502 #[derive(Debug, Copy, Clone, PartialEq, Eq, Ord, PartialOrd, Hash)]
1503 1503 pub enum Phase {
1504 1504 Public = 0,
1505 1505 Draft = 1,
1506 1506 Secret = 2,
1507 1507 Archived = 3,
1508 1508 Internal = 4,
1509 1509 }
1510 1510
1511 1511 impl TryFrom<usize> for Phase {
1512 1512 type Error = RevlogError;
1513 1513
1514 1514 fn try_from(value: usize) -> Result<Self, Self::Error> {
1515 1515 Ok(match value {
1516 1516 0 => Self::Public,
1517 1517 1 => Self::Draft,
1518 1518 2 => Self::Secret,
1519 1519 32 => Self::Archived,
1520 1520 96 => Self::Internal,
1521 1521 v => {
1522 1522 return Err(RevlogError::corrupted(format!(
1523 1523 "invalid phase value {}",
1524 1524 v
1525 1525 )))
1526 1526 }
1527 1527 })
1528 1528 }
1529 1529 }
1530 1530
1531 1531 impl Phase {
1532 1532 pub const fn all_phases() -> &'static [Self] {
1533 1533 &[
1534 1534 Self::Public,
1535 1535 Self::Draft,
1536 1536 Self::Secret,
1537 1537 Self::Archived,
1538 1538 Self::Internal,
1539 1539 ]
1540 1540 }
1541 1541 pub const fn non_public_phases() -> &'static [Self] {
1542 1542 &[Self::Draft, Self::Secret, Self::Archived, Self::Internal]
1543 1543 }
1544 1544 }
1545 1545
1546 1546 fn inline_scan(bytes: &[u8]) -> (usize, Vec<usize>) {
1547 1547 let mut offset: usize = 0;
1548 1548 let mut offsets = Vec::new();
1549 1549
1550 1550 while offset + INDEX_ENTRY_SIZE <= bytes.len() {
1551 1551 offsets.push(offset);
1552 1552 let end = offset + INDEX_ENTRY_SIZE;
1553 1553 let entry = IndexEntry {
1554 1554 bytes: &bytes[offset..end],
1555 1555 offset_override: None,
1556 1556 };
1557 1557
1558 1558 offset += INDEX_ENTRY_SIZE + entry.compressed_len() as usize;
1559 1559 }
1560 1560 (offset, offsets)
1561 1561 }
1562 1562
1563 1563 impl super::RevlogIndex for Index {
1564 1564 fn len(&self) -> usize {
1565 1565 self.len()
1566 1566 }
1567 1567
1568 1568 fn node(&self, rev: Revision) -> Option<&Node> {
1569 1569 if rev == NULL_REVISION {
1570 1570 return Some(&NULL_NODE);
1571 1571 }
1572 1572 self.get_entry(rev).map(|entry| entry.hash())
1573 1573 }
1574 1574 }
1575 1575
1576 1576 #[derive(Debug)]
1577 1577 pub struct IndexEntry<'a> {
1578 1578 bytes: &'a [u8],
1579 1579 /// Allows to override the offset value of the entry.
1580 1580 ///
1581 1581 /// For interleaved index and data, the offset stored in the index
1582 1582 /// corresponds to the separated data offset.
1583 1583 /// It has to be overridden with the actual offset in the interleaved
1584 1584 /// index which is just after the index block.
1585 1585 ///
1586 1586 /// For separated index and data, the offset stored in the first index
1587 1587 /// entry is mixed with the index headers.
1588 1588 /// It has to be overridden with 0.
1589 1589 offset_override: Option<usize>,
1590 1590 }
1591 1591
1592 1592 impl<'a> IndexEntry<'a> {
1593 1593 /// Return the offset of the data.
1594 1594 pub fn offset(&self) -> usize {
1595 1595 if let Some(offset_override) = self.offset_override {
1596 1596 offset_override
1597 1597 } else {
1598 1598 let mut bytes = [0; 8];
1599 1599 bytes[2..8].copy_from_slice(&self.bytes[0..=5]);
1600 1600 BigEndian::read_u64(&bytes[..]) as usize
1601 1601 }
1602 1602 }
1603 1603 pub fn raw_offset(&self) -> u64 {
1604 1604 BigEndian::read_u64(&self.bytes[0..8])
1605 1605 }
1606 1606
1607 1607 /// Same result (except potentially for rev 0) as C `index_get_start()`
1608 1608 fn c_start(&self) -> u64 {
1609 1609 self.raw_offset() >> 16
1610 1610 }
1611 1611
1612 1612 pub fn flags(&self) -> u16 {
1613 1613 BigEndian::read_u16(&self.bytes[6..=7])
1614 1614 }
1615 1615
1616 1616 /// Return the compressed length of the data.
1617 1617 pub fn compressed_len(&self) -> u32 {
1618 1618 BigEndian::read_u32(&self.bytes[8..=11])
1619 1619 }
1620 1620
1621 1621 /// Return the uncompressed length of the data.
1622 1622 pub fn uncompressed_len(&self) -> i32 {
1623 1623 BigEndian::read_i32(&self.bytes[12..=15])
1624 1624 }
1625 1625
1626 1626 /// Return the revision upon which the data has been derived.
1627 1627 pub fn base_revision_or_base_of_delta_chain(&self) -> UncheckedRevision {
1628 1628 // TODO Maybe return an Option when base_revision == rev?
1629 1629 // Requires to add rev to IndexEntry
1630 1630
1631 1631 BigEndian::read_i32(&self.bytes[16..]).into()
1632 1632 }
1633 1633
1634 1634 pub fn link_revision(&self) -> UncheckedRevision {
1635 1635 BigEndian::read_i32(&self.bytes[20..]).into()
1636 1636 }
1637 1637
1638 1638 pub fn p1(&self) -> UncheckedRevision {
1639 1639 BigEndian::read_i32(&self.bytes[24..]).into()
1640 1640 }
1641 1641
1642 1642 pub fn p2(&self) -> UncheckedRevision {
1643 1643 BigEndian::read_i32(&self.bytes[28..]).into()
1644 1644 }
1645 1645
1646 1646 /// Return the hash of revision's full text.
1647 1647 ///
1648 1648 /// Currently, SHA-1 is used and only the first 20 bytes of this field
1649 1649 /// are used.
1650 1650 pub fn hash(&self) -> &'a Node {
1651 1651 (&self.bytes[32..52]).try_into().unwrap()
1652 1652 }
1653 1653
1654 1654 pub fn as_bytes(&self) -> &'a [u8] {
1655 1655 self.bytes
1656 1656 }
1657 1657 }
1658 1658
1659 1659 #[cfg(test)]
1660 1660 mod tests {
1661 1661 use super::*;
1662 1662 use crate::node::NULL_NODE;
1663 1663
1664 1664 #[cfg(test)]
1665 1665 #[derive(Debug, Copy, Clone)]
1666 1666 pub struct IndexEntryBuilder {
1667 1667 is_first: bool,
1668 1668 is_inline: bool,
1669 1669 is_general_delta: bool,
1670 1670 version: u16,
1671 1671 offset: usize,
1672 1672 compressed_len: usize,
1673 1673 uncompressed_len: usize,
1674 1674 base_revision_or_base_of_delta_chain: Revision,
1675 1675 link_revision: Revision,
1676 1676 p1: Revision,
1677 1677 p2: Revision,
1678 1678 node: Node,
1679 1679 }
1680 1680
1681 1681 #[cfg(test)]
1682 1682 impl IndexEntryBuilder {
1683 1683 #[allow(clippy::new_without_default)]
1684 1684 pub fn new() -> Self {
1685 1685 Self {
1686 1686 is_first: false,
1687 1687 is_inline: false,
1688 1688 is_general_delta: true,
1689 1689 version: 1,
1690 1690 offset: 0,
1691 1691 compressed_len: 0,
1692 1692 uncompressed_len: 0,
1693 1693 base_revision_or_base_of_delta_chain: Revision(0),
1694 1694 link_revision: Revision(0),
1695 1695 p1: NULL_REVISION,
1696 1696 p2: NULL_REVISION,
1697 1697 node: NULL_NODE,
1698 1698 }
1699 1699 }
1700 1700
1701 1701 pub fn is_first(&mut self, value: bool) -> &mut Self {
1702 1702 self.is_first = value;
1703 1703 self
1704 1704 }
1705 1705
1706 1706 pub fn with_inline(&mut self, value: bool) -> &mut Self {
1707 1707 self.is_inline = value;
1708 1708 self
1709 1709 }
1710 1710
1711 1711 pub fn with_general_delta(&mut self, value: bool) -> &mut Self {
1712 1712 self.is_general_delta = value;
1713 1713 self
1714 1714 }
1715 1715
1716 1716 pub fn with_version(&mut self, value: u16) -> &mut Self {
1717 1717 self.version = value;
1718 1718 self
1719 1719 }
1720 1720
1721 1721 pub fn with_offset(&mut self, value: usize) -> &mut Self {
1722 1722 self.offset = value;
1723 1723 self
1724 1724 }
1725 1725
1726 1726 pub fn with_compressed_len(&mut self, value: usize) -> &mut Self {
1727 1727 self.compressed_len = value;
1728 1728 self
1729 1729 }
1730 1730
1731 1731 pub fn with_uncompressed_len(&mut self, value: usize) -> &mut Self {
1732 1732 self.uncompressed_len = value;
1733 1733 self
1734 1734 }
1735 1735
1736 1736 pub fn with_base_revision_or_base_of_delta_chain(
1737 1737 &mut self,
1738 1738 value: Revision,
1739 1739 ) -> &mut Self {
1740 1740 self.base_revision_or_base_of_delta_chain = value;
1741 1741 self
1742 1742 }
1743 1743
1744 1744 pub fn with_link_revision(&mut self, value: Revision) -> &mut Self {
1745 1745 self.link_revision = value;
1746 1746 self
1747 1747 }
1748 1748
1749 1749 pub fn with_p1(&mut self, value: Revision) -> &mut Self {
1750 1750 self.p1 = value;
1751 1751 self
1752 1752 }
1753 1753
1754 1754 pub fn with_p2(&mut self, value: Revision) -> &mut Self {
1755 1755 self.p2 = value;
1756 1756 self
1757 1757 }
1758 1758
1759 1759 pub fn with_node(&mut self, value: Node) -> &mut Self {
1760 1760 self.node = value;
1761 1761 self
1762 1762 }
1763 1763
1764 1764 pub fn build(&self) -> Vec<u8> {
1765 1765 let mut bytes = Vec::with_capacity(INDEX_ENTRY_SIZE);
1766 1766 if self.is_first {
1767 1767 bytes.extend(&match (self.is_general_delta, self.is_inline) {
1768 1768 (false, false) => [0u8, 0],
1769 1769 (false, true) => [0u8, 1],
1770 1770 (true, false) => [0u8, 2],
1771 1771 (true, true) => [0u8, 3],
1772 1772 });
1773 1773 bytes.extend(&self.version.to_be_bytes());
1774 1774 // Remaining offset bytes.
1775 1775 bytes.extend(&[0u8; 2]);
1776 1776 } else {
1777 1777 // Offset stored on 48 bits (6 bytes)
1778 1778 bytes.extend(&(self.offset as u64).to_be_bytes()[2..]);
1779 1779 }
1780 1780 bytes.extend(&[0u8; 2]); // Revision flags.
1781 1781 bytes.extend(&(self.compressed_len as u32).to_be_bytes());
1782 1782 bytes.extend(&(self.uncompressed_len as u32).to_be_bytes());
1783 1783 bytes.extend(
1784 1784 &self.base_revision_or_base_of_delta_chain.0.to_be_bytes(),
1785 1785 );
1786 1786 bytes.extend(&self.link_revision.0.to_be_bytes());
1787 1787 bytes.extend(&self.p1.0.to_be_bytes());
1788 1788 bytes.extend(&self.p2.0.to_be_bytes());
1789 1789 bytes.extend(self.node.as_bytes());
1790 1790 bytes.extend(vec![0u8; 12]);
1791 1791 bytes
1792 1792 }
1793 1793 }
1794 1794
1795 1795 pub fn is_inline(index_bytes: &[u8]) -> bool {
1796 1796 IndexHeader::parse(index_bytes)
1797 1797 .expect("too short")
1798 1798 .unwrap()
1799 1799 .format_flags()
1800 1800 .is_inline()
1801 1801 }
1802 1802
1803 1803 pub fn uses_generaldelta(index_bytes: &[u8]) -> bool {
1804 1804 IndexHeader::parse(index_bytes)
1805 1805 .expect("too short")
1806 1806 .unwrap()
1807 1807 .format_flags()
1808 1808 .uses_generaldelta()
1809 1809 }
1810 1810
1811 1811 pub fn get_version(index_bytes: &[u8]) -> u16 {
1812 1812 IndexHeader::parse(index_bytes)
1813 1813 .expect("too short")
1814 1814 .unwrap()
1815 1815 .format_version()
1816 1816 }
1817 1817
1818 1818 #[test]
1819 1819 fn flags_when_no_inline_flag_test() {
1820 1820 let bytes = IndexEntryBuilder::new()
1821 1821 .is_first(true)
1822 1822 .with_general_delta(false)
1823 1823 .with_inline(false)
1824 1824 .build();
1825 1825
1826 1826 assert!(!is_inline(&bytes));
1827 1827 assert!(!uses_generaldelta(&bytes));
1828 1828 }
1829 1829
1830 1830 #[test]
1831 1831 fn flags_when_inline_flag_test() {
1832 1832 let bytes = IndexEntryBuilder::new()
1833 1833 .is_first(true)
1834 1834 .with_general_delta(false)
1835 1835 .with_inline(true)
1836 1836 .build();
1837 1837
1838 1838 assert!(is_inline(&bytes));
1839 1839 assert!(!uses_generaldelta(&bytes));
1840 1840 }
1841 1841
1842 1842 #[test]
1843 1843 fn flags_when_inline_and_generaldelta_flags_test() {
1844 1844 let bytes = IndexEntryBuilder::new()
1845 1845 .is_first(true)
1846 1846 .with_general_delta(true)
1847 1847 .with_inline(true)
1848 1848 .build();
1849 1849
1850 1850 assert!(is_inline(&bytes));
1851 1851 assert!(uses_generaldelta(&bytes));
1852 1852 }
1853 1853
1854 1854 #[test]
1855 1855 fn test_offset() {
1856 1856 let bytes = IndexEntryBuilder::new().with_offset(1).build();
1857 1857 let entry = IndexEntry {
1858 1858 bytes: &bytes,
1859 1859 offset_override: None,
1860 1860 };
1861 1861
1862 1862 assert_eq!(entry.offset(), 1)
1863 1863 }
1864 1864
1865 1865 #[test]
1866 1866 fn test_with_overridden_offset() {
1867 1867 let bytes = IndexEntryBuilder::new().with_offset(1).build();
1868 1868 let entry = IndexEntry {
1869 1869 bytes: &bytes,
1870 1870 offset_override: Some(2),
1871 1871 };
1872 1872
1873 1873 assert_eq!(entry.offset(), 2)
1874 1874 }
1875 1875
1876 1876 #[test]
1877 1877 fn test_compressed_len() {
1878 1878 let bytes = IndexEntryBuilder::new().with_compressed_len(1).build();
1879 1879 let entry = IndexEntry {
1880 1880 bytes: &bytes,
1881 1881 offset_override: None,
1882 1882 };
1883 1883
1884 1884 assert_eq!(entry.compressed_len(), 1)
1885 1885 }
1886 1886
1887 1887 #[test]
1888 1888 fn test_uncompressed_len() {
1889 1889 let bytes = IndexEntryBuilder::new().with_uncompressed_len(1).build();
1890 1890 let entry = IndexEntry {
1891 1891 bytes: &bytes,
1892 1892 offset_override: None,
1893 1893 };
1894 1894
1895 1895 assert_eq!(entry.uncompressed_len(), 1)
1896 1896 }
1897 1897
1898 1898 #[test]
1899 1899 fn test_base_revision_or_base_of_delta_chain() {
1900 1900 let bytes = IndexEntryBuilder::new()
1901 1901 .with_base_revision_or_base_of_delta_chain(Revision(1))
1902 1902 .build();
1903 1903 let entry = IndexEntry {
1904 1904 bytes: &bytes,
1905 1905 offset_override: None,
1906 1906 };
1907 1907
1908 1908 assert_eq!(entry.base_revision_or_base_of_delta_chain(), 1.into())
1909 1909 }
1910 1910
1911 1911 #[test]
1912 1912 fn link_revision_test() {
1913 1913 let bytes = IndexEntryBuilder::new()
1914 1914 .with_link_revision(Revision(123))
1915 1915 .build();
1916 1916
1917 1917 let entry = IndexEntry {
1918 1918 bytes: &bytes,
1919 1919 offset_override: None,
1920 1920 };
1921 1921
1922 1922 assert_eq!(entry.link_revision(), 123.into());
1923 1923 }
1924 1924
1925 1925 #[test]
1926 1926 fn p1_test() {
1927 1927 let bytes = IndexEntryBuilder::new().with_p1(Revision(123)).build();
1928 1928
1929 1929 let entry = IndexEntry {
1930 1930 bytes: &bytes,
1931 1931 offset_override: None,
1932 1932 };
1933 1933
1934 1934 assert_eq!(entry.p1(), 123.into());
1935 1935 }
1936 1936
1937 1937 #[test]
1938 1938 fn p2_test() {
1939 1939 let bytes = IndexEntryBuilder::new().with_p2(Revision(123)).build();
1940 1940
1941 1941 let entry = IndexEntry {
1942 1942 bytes: &bytes,
1943 1943 offset_override: None,
1944 1944 };
1945 1945
1946 1946 assert_eq!(entry.p2(), 123.into());
1947 1947 }
1948 1948
1949 1949 #[test]
1950 1950 fn node_test() {
1951 1951 let node = Node::from_hex("0123456789012345678901234567890123456789")
1952 1952 .unwrap();
1953 1953 let bytes = IndexEntryBuilder::new().with_node(node).build();
1954 1954
1955 1955 let entry = IndexEntry {
1956 1956 bytes: &bytes,
1957 1957 offset_override: None,
1958 1958 };
1959 1959
1960 1960 assert_eq!(*entry.hash(), node);
1961 1961 }
1962 1962
1963 1963 #[test]
1964 1964 fn version_test() {
1965 1965 let bytes = IndexEntryBuilder::new()
1966 1966 .is_first(true)
1967 1967 .with_version(2)
1968 1968 .build();
1969 1969
1970 1970 assert_eq!(get_version(&bytes), 2)
1971 1971 }
1972 1972 }
1973 1973
1974 1974 #[cfg(test)]
1975 1975 pub use tests::IndexEntryBuilder;
@@ -1,1158 +1,1158 b''
1 1 // revlog.rs
2 2 //
3 3 // Copyright 2019-2020 Georges Racinet <georges.racinet@octobus.net>
4 4 //
5 5 // This software may be used and distributed according to the terms of the
6 6 // GNU General Public License version 2 or any later version.
7 7
8 8 use crate::{
9 9 cindex,
10 10 conversion::{rev_pyiter_collect, rev_pyiter_collect_or_else},
11 11 utils::{node_from_py_bytes, node_from_py_object},
12 12 PyRevision,
13 13 };
14 14 use cpython::{
15 15 buffer::{Element, PyBuffer},
16 16 exc::{IndexError, ValueError},
17 17 ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyInt, PyList,
18 18 PyModule, PyObject, PyResult, PySet, PyString, PyTuple, Python,
19 19 PythonObject, ToPyObject,
20 20 };
21 21 use hg::{
22 22 errors::HgError,
23 23 index::{
24 24 IndexHeader, Phase, RevisionDataParams, SnapshotsCache,
25 25 INDEX_ENTRY_SIZE,
26 26 },
27 27 nodemap::{Block, NodeMapError, NodeTree},
28 28 revlog::{nodemap::NodeMap, NodePrefix, RevlogError, RevlogIndex},
29 29 BaseRevision, Revision, UncheckedRevision, NULL_REVISION,
30 30 };
31 31 use std::{cell::RefCell, collections::HashMap};
32 32
33 33 /// Return a Struct implementing the Graph trait
34 34 pub(crate) fn pyindex_to_graph(
35 35 py: Python,
36 36 index: PyObject,
37 37 ) -> PyResult<cindex::Index> {
38 38 match index.extract::<MixedIndex>(py) {
39 39 Ok(midx) => Ok(midx.clone_cindex(py)),
40 40 Err(_) => cindex::Index::new(py, index),
41 41 }
42 42 }
43 43
44 44 py_class!(pub class MixedIndex |py| {
45 45 data cindex: RefCell<cindex::Index>;
46 46 data index: RefCell<hg::index::Index>;
47 47 data nt: RefCell<Option<NodeTree>>;
48 48 data docket: RefCell<Option<PyObject>>;
49 49 // Holds a reference to the mmap'ed persistent nodemap data
50 50 data nodemap_mmap: RefCell<Option<PyBuffer>>;
51 51 // Holds a reference to the mmap'ed persistent index data
52 52 data index_mmap: RefCell<Option<PyBuffer>>;
53 53
54 54 def __new__(
55 55 _cls,
56 56 cindex: PyObject,
57 57 data: PyObject,
58 58 default_header: u32,
59 59 ) -> PyResult<MixedIndex> {
60 60 Self::new(py, cindex, data, default_header)
61 61 }
62 62
63 63 /// Compatibility layer used for Python consumers needing access to the C index
64 64 ///
65 65 /// Only use case so far is `scmutil.shortesthexnodeidprefix`,
66 66 /// that may need to build a custom `nodetree`, based on a specified revset.
67 67 /// With a Rust implementation of the nodemap, we will be able to get rid of
68 68 /// this, by exposing our own standalone nodemap class,
69 69 /// ready to accept `MixedIndex`.
70 70 def get_cindex(&self) -> PyResult<PyObject> {
71 71 Ok(self.cindex(py).borrow().inner().clone_ref(py))
72 72 }
73 73
74 74 // Index API involving nodemap, as defined in mercurial/pure/parsers.py
75 75
76 76 /// Return Revision if found, raises a bare `error.RevlogError`
77 77 /// in case of ambiguity, same as C version does
78 78 def get_rev(&self, node: PyBytes) -> PyResult<Option<PyRevision>> {
79 79 let opt = self.get_nodetree(py)?.borrow();
80 80 let nt = opt.as_ref().unwrap();
81 81 let idx = &*self.cindex(py).borrow();
82 82 let ridx = &*self.index(py).borrow();
83 83 let node = node_from_py_bytes(py, &node)?;
84 84 let rust_rev =
85 85 nt.find_bin(ridx, node.into()).map_err(|e| nodemap_error(py, e))?;
86 86 let c_rev =
87 87 nt.find_bin(idx, node.into()).map_err(|e| nodemap_error(py, e))?;
88 88 assert_eq!(rust_rev, c_rev);
89 89 Ok(rust_rev.map(Into::into))
90 90
91 91 }
92 92
93 93 /// same as `get_rev()` but raises a bare `error.RevlogError` if node
94 94 /// is not found.
95 95 ///
96 96 /// No need to repeat `node` in the exception, `mercurial/revlog.py`
97 97 /// will catch and rewrap with it
98 98 def rev(&self, node: PyBytes) -> PyResult<PyRevision> {
99 99 self.get_rev(py, node)?.ok_or_else(|| revlog_error(py))
100 100 }
101 101
102 102 /// return True if the node exist in the index
103 103 def has_node(&self, node: PyBytes) -> PyResult<bool> {
104 104 // TODO OPTIM we could avoid a needless conversion here,
105 105 // to do when scaffolding for pure Rust switch is removed,
106 106 // as `get_rev()` currently does the necessary assertions
107 107 self.get_rev(py, node).map(|opt| opt.is_some())
108 108 }
109 109
110 110 /// find length of shortest hex nodeid of a binary ID
111 111 def shortest(&self, node: PyBytes) -> PyResult<usize> {
112 112 let opt = self.get_nodetree(py)?.borrow();
113 113 let nt = opt.as_ref().unwrap();
114 114 let idx = &*self.index(py).borrow();
115 115 match nt.unique_prefix_len_node(idx, &node_from_py_bytes(py, &node)?)
116 116 {
117 117 Ok(Some(l)) => Ok(l),
118 118 Ok(None) => Err(revlog_error(py)),
119 119 Err(e) => Err(nodemap_error(py, e)),
120 120 }
121 121 }
122 122
123 123 def partialmatch(&self, node: PyObject) -> PyResult<Option<PyBytes>> {
124 124 let opt = self.get_nodetree(py)?.borrow();
125 125 let nt = opt.as_ref().unwrap();
126 126 let idx = &*self.index(py).borrow();
127 127
128 128 let node_as_string = if cfg!(feature = "python3-sys") {
129 129 node.cast_as::<PyString>(py)?.to_string(py)?.to_string()
130 130 }
131 131 else {
132 132 let node = node.extract::<PyBytes>(py)?;
133 133 String::from_utf8_lossy(node.data(py)).to_string()
134 134 };
135 135
136 136 let prefix = NodePrefix::from_hex(&node_as_string)
137 137 .map_err(|_| PyErr::new::<ValueError, _>(
138 138 py, format!("Invalid node or prefix '{}'", node_as_string))
139 139 )?;
140 140
141 141 nt.find_bin(idx, prefix)
142 142 // TODO make an inner API returning the node directly
143 143 .map(|opt| opt.map(
144 144 |rev| PyBytes::new(py, idx.node(rev).unwrap().as_bytes())))
145 145 .map_err(|e| nodemap_error(py, e))
146 146
147 147 }
148 148
149 149 /// append an index entry
150 150 def append(&self, tup: PyTuple) -> PyResult<PyObject> {
151 151 if tup.len(py) < 8 {
152 152 // this is better than the panic promised by tup.get_item()
153 153 return Err(
154 154 PyErr::new::<IndexError, _>(py, "tuple index out of range"))
155 155 }
156 156 let node_bytes = tup.get_item(py, 7).extract(py)?;
157 157 let node = node_from_py_object(py, &node_bytes)?;
158 158
159 159 let rev = self.len(py)? as BaseRevision;
160 160 let mut idx = self.cindex(py).borrow_mut();
161 161
162 162 // This is ok since we will just add the revision to the index
163 163 let rev = Revision(rev);
164 164 idx.append(py, tup.clone_ref(py))?;
165 165 self.index(py)
166 166 .borrow_mut()
167 167 .append(py_tuple_to_revision_data_params(py, tup)?)
168 168 .unwrap();
169 169 self.get_nodetree(py)?.borrow_mut().as_mut().unwrap()
170 170 .insert(&*idx, &node, rev)
171 171 .map_err(|e| nodemap_error(py, e))?;
172 172 Ok(py.None())
173 173 }
174 174
175 175 def __delitem__(&self, key: PyObject) -> PyResult<()> {
176 176 // __delitem__ is both for `del idx[r]` and `del idx[r1:r2]`
177 177 self.cindex(py).borrow().inner().del_item(py, &key)?;
178 178 let start = key.getattr(py, "start")?;
179 179 let start = UncheckedRevision(start.extract(py)?);
180 180 let start = self.index(py)
181 181 .borrow()
182 182 .check_revision(start)
183 183 .ok_or_else(|| {
184 184 nodemap_error(py, NodeMapError::RevisionNotInIndex(start))
185 185 })?;
186 186 self.index(py).borrow_mut().remove(start).unwrap();
187 187 let mut opt = self.get_nodetree(py)?.borrow_mut();
188 188 let nt = opt.as_mut().unwrap();
189 189 nt.invalidate_all();
190 190 self.fill_nodemap(py, nt)?;
191 191 Ok(())
192 192 }
193 193
194 194 //
195 195 // Reforwarded C index API
196 196 //
197 197
198 198 // index_methods (tp_methods). Same ordering as in revlog.c
199 199
200 200 /// return the gca set of the given revs
201 201 def ancestors(&self, *args, **kw) -> PyResult<PyObject> {
202 202 let rust_res = self.inner_ancestors(py, args)?;
203 203
204 204 let c_res = self.call_cindex(py, "ancestors", args, kw)?;
205 205 // the algorithm should always provide the results in reverse ordering
206 206 assert_py_eq(py, "ancestors", &rust_res, &c_res)?;
207 207
208 208 Ok(rust_res)
209 209 }
210 210
211 211 /// return the heads of the common ancestors of the given revs
212 212 def commonancestorsheads(&self, *args, **kw) -> PyResult<PyObject> {
213 213 let rust_res = self.inner_commonancestorsheads(py, args)?;
214 214
215 215 let c_res = self.call_cindex(py, "commonancestorsheads", args, kw)?;
216 216 // the algorithm should always provide the results in reverse ordering
217 217 assert_py_eq(py, "commonancestorsheads", &rust_res, &c_res)?;
218 218
219 219 Ok(rust_res)
220 220 }
221 221
222 222 /// Clear the index caches and inner py_class data.
223 223 /// It is Python's responsibility to call `update_nodemap_data` again.
224 224 def clearcaches(&self, *args, **kw) -> PyResult<PyObject> {
225 225 self.nt(py).borrow_mut().take();
226 226 self.docket(py).borrow_mut().take();
227 227 self.nodemap_mmap(py).borrow_mut().take();
228 228 self.index(py).borrow_mut().clear_caches();
229 229 self.call_cindex(py, "clearcaches", args, kw)
230 230 }
231 231
232 232 /// return the raw binary string representing a revision
233 233 def entry_binary(&self, *args, **kw) -> PyResult<PyObject> {
234 234 let rindex = self.index(py).borrow();
235 235 let rev = UncheckedRevision(args.get_item(py, 0).extract(py)?);
236 236 let rust_bytes = rindex.check_revision(rev).and_then(
237 237 |r| rindex.entry_binary(r))
238 238 .ok_or_else(|| rev_not_in_index(py, rev))?;
239 239 let rust_res = PyBytes::new(py, rust_bytes).into_object();
240 240
241 241 let c_res = self.call_cindex(py, "entry_binary", args, kw)?;
242 242 assert_py_eq(py, "entry_binary", &rust_res, &c_res)?;
243 243 Ok(rust_res)
244 244 }
245 245
246 246 /// return a binary packed version of the header
247 247 def pack_header(&self, *args, **kw) -> PyResult<PyObject> {
248 248 let rindex = self.index(py).borrow();
249 249 let packed = rindex.pack_header(args.get_item(py, 0).extract(py)?);
250 250 let rust_res = PyBytes::new(py, &packed).into_object();
251 251
252 252 let c_res = self.call_cindex(py, "pack_header", args, kw)?;
253 253 assert_py_eq(py, "pack_header", &rust_res, &c_res)?;
254 254 Ok(rust_res)
255 255 }
256 256
257 257 /// compute phases
258 258 def computephasesmapsets(&self, *args, **kw) -> PyResult<PyObject> {
259 259 let py_roots = args.get_item(py, 0).extract::<PyDict>(py)?;
260 260 let rust_res = self.inner_computephasesmapsets(py, py_roots)?;
261 261
262 262 let c_res = self.call_cindex(py, "computephasesmapsets", args, kw)?;
263 263 assert_py_eq(py, "computephasesmapsets", &rust_res, &c_res)?;
264 264 Ok(rust_res)
265 265 }
266 266
267 267 /// reachableroots
268 268 def reachableroots2(&self, *args, **kw) -> PyResult<PyObject> {
269 269 let rust_res = self.inner_reachableroots2(
270 270 py,
271 271 UncheckedRevision(args.get_item(py, 0).extract(py)?),
272 272 args.get_item(py, 1),
273 273 args.get_item(py, 2),
274 274 args.get_item(py, 3).extract(py)?,
275 275 )?;
276 276
277 277 let c_res = self.call_cindex(py, "reachableroots2", args, kw)?;
278 278 // ordering of C result depends on how the computation went, and
279 279 // Rust result ordering is arbitrary. Hence we compare after
280 280 // sorting the results (in Python to avoid reconverting everything
281 281 // back to Rust structs).
282 282 assert_py_eq_normalized(py, "reachableroots2", &rust_res, &c_res,
283 283 |v| format!("sorted({})", v))?;
284 284
285 285 Ok(rust_res)
286 286 }
287 287
288 288 /// get head revisions
289 289 def headrevs(&self, *args, **kw) -> PyResult<PyObject> {
290 290 let rust_res = self.inner_headrevs(py)?;
291 291
292 292 let c_res = self.call_cindex(py, "headrevs", args, kw)?;
293 293 assert_py_eq(py, "headrevs", &rust_res, &c_res)?;
294 294 Ok(rust_res)
295 295 }
296 296
297 297 /// get filtered head revisions
298 298 def headrevsfiltered(&self, *args, **kw) -> PyResult<PyObject> {
299 299 let rust_res = self.inner_headrevsfiltered(py, &args.get_item(py, 0))?;
300 300 let c_res = self.call_cindex(py, "headrevsfiltered", args, kw)?;
301 301
302 302 assert_py_eq(py, "headrevsfiltered", &rust_res, &c_res)?;
303 303 Ok(rust_res)
304 304 }
305 305
306 306 /// True if the object is a snapshot
307 307 def issnapshot(&self, *args, **kw) -> PyResult<bool> {
308 308 let index = self.index(py).borrow();
309 309 let result = index
310 310 .is_snapshot(UncheckedRevision(args.get_item(py, 0).extract(py)?))
311 311 .map_err(|e| {
312 312 PyErr::new::<cpython::exc::ValueError, _>(py, e.to_string())
313 313 })?;
314 314 let cresult = self.call_cindex(py, "issnapshot", args, kw)?;
315 315 assert_eq!(result, cresult.extract(py)?);
316 316 Ok(result)
317 317 }
318 318
319 319 /// Gather snapshot data in a cache dict
320 320 def findsnapshots(&self, *args, **kw) -> PyResult<PyObject> {
321 321 let index = self.index(py).borrow();
322 322 let cache: PyDict = args.get_item(py, 0).extract(py)?;
323 323 // this methods operates by setting new values in the cache,
324 324 // hence we will compare results by letting the C implementation
325 325 // operate over a deepcopy of the cache, and finally compare both
326 326 // caches.
327 327 let c_cache = PyDict::new(py);
328 328 for (k, v) in cache.items(py) {
329 329 c_cache.set_item(py, k, PySet::new(py, v)?)?;
330 330 }
331 331
332 332 let start_rev = UncheckedRevision(args.get_item(py, 1).extract(py)?);
333 333 let end_rev = UncheckedRevision(args.get_item(py, 2).extract(py)?);
334 334 let mut cache_wrapper = PySnapshotsCache{ py, dict: cache };
335 335 index.find_snapshots(
336 336 start_rev,
337 337 end_rev,
338 338 &mut cache_wrapper,
339 339 ).map_err(|_| revlog_error(py))?;
340 340
341 341 let c_args = PyTuple::new(
342 342 py,
343 343 &[
344 344 c_cache.clone_ref(py).into_object(),
345 345 args.get_item(py, 1),
346 346 args.get_item(py, 2)
347 347 ]
348 348 );
349 349 self.call_cindex(py, "findsnapshots", &c_args, kw)?;
350 350 assert_py_eq(py, "findsnapshots cache",
351 351 &cache_wrapper.into_object(),
352 352 &c_cache.into_object())?;
353 353 Ok(py.None())
354 354 }
355 355
356 356 /// determine revisions with deltas to reconstruct fulltext
357 357 def deltachain(&self, *args, **kw) -> PyResult<PyObject> {
358 358 let index = self.index(py).borrow();
359 359 let rev = args.get_item(py, 0).extract::<BaseRevision>(py)?.into();
360 360 let stop_rev =
361 361 args.get_item(py, 1).extract::<Option<BaseRevision>>(py)?;
362 362 let rev = index.check_revision(rev).ok_or_else(|| {
363 363 nodemap_error(py, NodeMapError::RevisionNotInIndex(rev))
364 364 })?;
365 365 let stop_rev = if let Some(stop_rev) = stop_rev {
366 366 let stop_rev = UncheckedRevision(stop_rev);
367 367 Some(index.check_revision(stop_rev).ok_or_else(|| {
368 368 nodemap_error(py, NodeMapError::RevisionNotInIndex(stop_rev))
369 369 })?)
370 370 } else {None};
371 371 let (chain, stopped) = index.delta_chain(rev, stop_rev).map_err(|e| {
372 372 PyErr::new::<cpython::exc::ValueError, _>(py, e.to_string())
373 373 })?;
374 374
375 375 let cresult = self.call_cindex(py, "deltachain", args, kw)?;
376 376 let cchain: Vec<BaseRevision> =
377 377 cresult.get_item(py, 0)?.extract::<Vec<BaseRevision>>(py)?;
378 378 let chain: Vec<_> = chain.into_iter().map(|r| r.0).collect();
379 379 assert_eq!(chain, cchain);
380 380 assert_eq!(stopped, cresult.get_item(py, 1)?.extract(py)?);
381 381
382 382 Ok(
383 383 PyTuple::new(
384 384 py,
385 385 &[
386 386 chain.into_py_object(py).into_object(),
387 387 stopped.into_py_object(py).into_object()
388 388 ]
389 389 ).into_object()
390 390 )
391 391
392 392 }
393 393
394 394 /// slice planned chunk read to reach a density threshold
395 395 def slicechunktodensity(&self, *args, **kw) -> PyResult<PyObject> {
396 396 let rust_res = self.inner_slicechunktodensity(
397 397 py,
398 398 args.get_item(py, 0),
399 399 args.get_item(py, 1).extract(py)?,
400 400 args.get_item(py, 2).extract(py)?
401 401 )?;
402 402
403 403 let c_res = self.call_cindex(py, "slicechunktodensity", args, kw)?;
404 404 assert_py_eq(py, "slicechunktodensity", &rust_res, &c_res)?;
405 405 Ok(rust_res)
406 406 }
407 407
408 408 // index_sequence_methods and index_mapping_methods.
409 409 //
410 410 // Since we call back through the high level Python API,
411 411 // there's no point making a distinction between index_get
412 412 // and index_getitem.
413 413 // gracinet 2023: this above is no longer true for the pure Rust impl
414 414
415 415 def __len__(&self) -> PyResult<usize> {
416 416 self.len(py)
417 417 }
418 418
419 419 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
420 420 let rust_res = self.inner_getitem(py, key.clone_ref(py))?;
421 421
422 422 // this conversion seems needless, but that's actually because
423 423 // `index_getitem` does not handle conversion from PyLong,
424 424 // which expressions such as [e for e in index] internally use.
425 425 // Note that we don't seem to have a direct way to call
426 426 // PySequence_GetItem (does the job), which would possibly be better
427 427 // for performance
428 428 // gracinet 2023: the above comment can be removed when we use
429 429 // the pure Rust impl only. Note also that `key` can be a binary
430 430 // node id.
431 431 let c_key = match key.extract::<BaseRevision>(py) {
432 432 Ok(rev) => rev.to_py_object(py).into_object(),
433 433 Err(_) => key,
434 434 };
435 435 let c_res = self.cindex(py).borrow().inner().get_item(py, c_key)?;
436 436
437 437 assert_py_eq(py, "__getitem__", &rust_res, &c_res)?;
438 438 Ok(rust_res)
439 439 }
440 440
441 441 def __contains__(&self, item: PyObject) -> PyResult<bool> {
442 442 // ObjectProtocol does not seem to provide contains(), so
443 443 // this is an equivalent implementation of the index_contains()
444 444 // defined in revlog.c
445 445 let cindex = self.cindex(py).borrow();
446 446 match item.extract::<i32>(py) {
447 447 Ok(rev) => {
448 448 Ok(rev >= -1 && rev < self.len(py)? as BaseRevision)
449 449 }
450 450 Err(_) => {
451 451 let item_bytes: PyBytes = item.extract(py)?;
452 452 let rust_res = self.has_node(py, item_bytes)?;
453 453
454 454 let c_res = cindex.inner().call_method(
455 455 py,
456 456 "has_node",
457 457 PyTuple::new(py, &[item.clone_ref(py)]),
458 458 None)?
459 459 .extract(py)?;
460 460
461 461 assert_eq!(rust_res, c_res);
462 462 Ok(rust_res)
463 463 }
464 464 }
465 465 }
466 466
467 467 def nodemap_data_all(&self) -> PyResult<PyBytes> {
468 468 self.inner_nodemap_data_all(py)
469 469 }
470 470
471 471 def nodemap_data_incremental(&self) -> PyResult<PyObject> {
472 472 self.inner_nodemap_data_incremental(py)
473 473 }
474 474 def update_nodemap_data(
475 475 &self,
476 476 docket: PyObject,
477 477 nm_data: PyObject
478 478 ) -> PyResult<PyObject> {
479 479 self.inner_update_nodemap_data(py, docket, nm_data)
480 480 }
481 481
482 482 @property
483 483 def entry_size(&self) -> PyResult<PyInt> {
484 484 let rust_res: PyInt = INDEX_ENTRY_SIZE.to_py_object(py);
485 485
486 486 let c_res = self.cindex(py).borrow().inner()
487 487 .getattr(py, "entry_size")?;
488 488 assert_py_eq(py, "entry_size", rust_res.as_object(), &c_res)?;
489 489
490 490 Ok(rust_res)
491 491 }
492 492
493 493 @property
494 494 def rust_ext_compat(&self) -> PyResult<PyInt> {
495 495 // will be entirely removed when the Rust index yet useful to
496 496 // implement in Rust to detangle things when removing `self.cindex`
497 497 let rust_res: PyInt = 1.to_py_object(py);
498 498
499 499 let c_res = self.cindex(py).borrow().inner()
500 500 .getattr(py, "rust_ext_compat")?;
501 501 assert_py_eq(py, "rust_ext_compat", rust_res.as_object(), &c_res)?;
502 502
503 503 Ok(rust_res)
504 504 }
505 505
506 506 });
507 507
508 508 /// Take a (potentially) mmap'ed buffer, and return the underlying Python
509 509 /// buffer along with the Rust slice into said buffer. We need to keep the
510 510 /// Python buffer around, otherwise we'd get a dangling pointer once the buffer
511 511 /// is freed from Python's side.
512 512 ///
513 513 /// # Safety
514 514 ///
515 515 /// The caller must make sure that the buffer is kept around for at least as
516 516 /// long as the slice.
517 517 #[deny(unsafe_op_in_unsafe_fn)]
518 518 unsafe fn mmap_keeparound(
519 519 py: Python,
520 520 data: PyObject,
521 521 ) -> PyResult<(
522 522 PyBuffer,
523 Box<dyn std::ops::Deref<Target = [u8]> + Send + 'static>,
523 Box<dyn std::ops::Deref<Target = [u8]> + Send + Sync + 'static>,
524 524 )> {
525 525 let buf = PyBuffer::get(py, &data)?;
526 526 let len = buf.item_count();
527 527
528 528 // Build a slice from the mmap'ed buffer data
529 529 let cbuf = buf.buf_ptr();
530 530 let bytes = if std::mem::size_of::<u8>() == buf.item_size()
531 531 && buf.is_c_contiguous()
532 532 && u8::is_compatible_format(buf.format())
533 533 {
534 534 unsafe { std::slice::from_raw_parts(cbuf as *const u8, len) }
535 535 } else {
536 536 return Err(PyErr::new::<ValueError, _>(
537 537 py,
538 538 "Nodemap data buffer has an invalid memory representation"
539 539 .to_string(),
540 540 ));
541 541 };
542 542
543 543 Ok((buf, Box::new(bytes)))
544 544 }
545 545
546 546 fn py_tuple_to_revision_data_params(
547 547 py: Python,
548 548 tuple: PyTuple,
549 549 ) -> PyResult<RevisionDataParams> {
550 550 if tuple.len(py) < 8 {
551 551 // this is better than the panic promised by tup.get_item()
552 552 return Err(PyErr::new::<IndexError, _>(
553 553 py,
554 554 "tuple index out of range",
555 555 ));
556 556 }
557 557 let offset_or_flags: u64 = tuple.get_item(py, 0).extract(py)?;
558 558 let node_id = tuple
559 559 .get_item(py, 7)
560 560 .extract::<PyBytes>(py)?
561 561 .data(py)
562 562 .try_into()
563 563 .unwrap();
564 564 let flags = (offset_or_flags & 0xFFFF) as u16;
565 565 let data_offset = offset_or_flags >> 16;
566 566 Ok(RevisionDataParams {
567 567 flags,
568 568 data_offset,
569 569 data_compressed_length: tuple.get_item(py, 1).extract(py)?,
570 570 data_uncompressed_length: tuple.get_item(py, 2).extract(py)?,
571 571 data_delta_base: tuple.get_item(py, 3).extract(py)?,
572 572 link_rev: tuple.get_item(py, 4).extract(py)?,
573 573 parent_rev_1: tuple.get_item(py, 5).extract(py)?,
574 574 parent_rev_2: tuple.get_item(py, 6).extract(py)?,
575 575 node_id,
576 576 ..Default::default()
577 577 })
578 578 }
579 579 fn revision_data_params_to_py_tuple(
580 580 py: Python,
581 581 params: RevisionDataParams,
582 582 ) -> PyTuple {
583 583 PyTuple::new(
584 584 py,
585 585 &[
586 586 params.data_offset.into_py_object(py).into_object(),
587 587 params
588 588 .data_compressed_length
589 589 .into_py_object(py)
590 590 .into_object(),
591 591 params
592 592 .data_uncompressed_length
593 593 .into_py_object(py)
594 594 .into_object(),
595 595 params.data_delta_base.into_py_object(py).into_object(),
596 596 params.link_rev.into_py_object(py).into_object(),
597 597 params.parent_rev_1.into_py_object(py).into_object(),
598 598 params.parent_rev_2.into_py_object(py).into_object(),
599 599 PyBytes::new(py, &params.node_id)
600 600 .into_py_object(py)
601 601 .into_object(),
602 602 params._sidedata_offset.into_py_object(py).into_object(),
603 603 params
604 604 ._sidedata_compressed_length
605 605 .into_py_object(py)
606 606 .into_object(),
607 607 params
608 608 .data_compression_mode
609 609 .into_py_object(py)
610 610 .into_object(),
611 611 params
612 612 ._sidedata_compression_mode
613 613 .into_py_object(py)
614 614 .into_object(),
615 615 params._rank.into_py_object(py).into_object(),
616 616 ],
617 617 )
618 618 }
619 619
620 620 struct PySnapshotsCache<'p> {
621 621 py: Python<'p>,
622 622 dict: PyDict,
623 623 }
624 624
625 625 impl<'p> PySnapshotsCache<'p> {
626 626 fn into_object(self) -> PyObject {
627 627 self.dict.into_object()
628 628 }
629 629 }
630 630
631 631 impl<'p> SnapshotsCache for PySnapshotsCache<'p> {
632 632 fn insert_for(
633 633 &mut self,
634 634 rev: BaseRevision,
635 635 value: BaseRevision,
636 636 ) -> Result<(), RevlogError> {
637 637 let pyvalue = value.into_py_object(self.py).into_object();
638 638 match self.dict.get_item(self.py, rev) {
639 639 Some(obj) => obj
640 640 .extract::<PySet>(self.py)
641 641 .and_then(|set| set.add(self.py, pyvalue)),
642 642 None => PySet::new(self.py, vec![pyvalue])
643 643 .and_then(|set| self.dict.set_item(self.py, rev, set)),
644 644 }
645 645 .map_err(|_| {
646 646 RevlogError::Other(HgError::unsupported(
647 647 "Error in Python caches handling",
648 648 ))
649 649 })
650 650 }
651 651 }
652 652
653 653 impl MixedIndex {
654 654 fn new(
655 655 py: Python,
656 656 cindex: PyObject,
657 657 data: PyObject,
658 658 header: u32,
659 659 ) -> PyResult<MixedIndex> {
660 660 // Safety: we keep the buffer around inside the class as `index_mmap`
661 661 let (buf, bytes) = unsafe { mmap_keeparound(py, data)? };
662 662
663 663 Self::create_instance(
664 664 py,
665 665 RefCell::new(cindex::Index::new(py, cindex)?),
666 666 RefCell::new(
667 667 hg::index::Index::new(
668 668 bytes,
669 669 IndexHeader::parse(&header.to_be_bytes())
670 670 .expect("default header is broken")
671 671 .unwrap(),
672 672 )
673 673 .map_err(|e| {
674 674 revlog_error_with_msg(py, e.to_string().as_bytes())
675 675 })?,
676 676 ),
677 677 RefCell::new(None),
678 678 RefCell::new(None),
679 679 RefCell::new(None),
680 680 RefCell::new(Some(buf)),
681 681 )
682 682 }
683 683
684 684 fn len(&self, py: Python) -> PyResult<usize> {
685 685 let rust_index_len = self.index(py).borrow().len();
686 686 let cindex_len = self.cindex(py).borrow().inner().len(py)?;
687 687 assert_eq!(rust_index_len, cindex_len);
688 688 Ok(rust_index_len)
689 689 }
690 690
691 691 /// This is scaffolding at this point, but it could also become
692 692 /// a way to start a persistent nodemap or perform a
693 693 /// vacuum / repack operation
694 694 fn fill_nodemap(
695 695 &self,
696 696 py: Python,
697 697 nt: &mut NodeTree,
698 698 ) -> PyResult<PyObject> {
699 699 let index = self.index(py).borrow();
700 700 for r in 0..self.len(py)? {
701 701 let rev = Revision(r as BaseRevision);
702 702 // in this case node() won't ever return None
703 703 nt.insert(&*index, index.node(rev).unwrap(), rev)
704 704 .map_err(|e| nodemap_error(py, e))?
705 705 }
706 706 Ok(py.None())
707 707 }
708 708
709 709 fn get_nodetree<'a>(
710 710 &'a self,
711 711 py: Python<'a>,
712 712 ) -> PyResult<&'a RefCell<Option<NodeTree>>> {
713 713 if self.nt(py).borrow().is_none() {
714 714 let readonly = Box::<Vec<_>>::default();
715 715 let mut nt = NodeTree::load_bytes(readonly, 0);
716 716 self.fill_nodemap(py, &mut nt)?;
717 717 self.nt(py).borrow_mut().replace(nt);
718 718 }
719 719 Ok(self.nt(py))
720 720 }
721 721
722 722 /// forward a method call to the underlying C index
723 723 fn call_cindex(
724 724 &self,
725 725 py: Python,
726 726 name: &str,
727 727 args: &PyTuple,
728 728 kwargs: Option<&PyDict>,
729 729 ) -> PyResult<PyObject> {
730 730 self.cindex(py)
731 731 .borrow()
732 732 .inner()
733 733 .call_method(py, name, args, kwargs)
734 734 }
735 735
736 736 pub fn clone_cindex(&self, py: Python) -> cindex::Index {
737 737 self.cindex(py).borrow().clone_ref(py)
738 738 }
739 739
740 740 /// Returns the full nodemap bytes to be written as-is to disk
741 741 fn inner_nodemap_data_all(&self, py: Python) -> PyResult<PyBytes> {
742 742 let nodemap = self.get_nodetree(py)?.borrow_mut().take().unwrap();
743 743 let (readonly, bytes) = nodemap.into_readonly_and_added_bytes();
744 744
745 745 // If there's anything readonly, we need to build the data again from
746 746 // scratch
747 747 let bytes = if readonly.len() > 0 {
748 748 let mut nt = NodeTree::load_bytes(Box::<Vec<_>>::default(), 0);
749 749 self.fill_nodemap(py, &mut nt)?;
750 750
751 751 let (readonly, bytes) = nt.into_readonly_and_added_bytes();
752 752 assert_eq!(readonly.len(), 0);
753 753
754 754 bytes
755 755 } else {
756 756 bytes
757 757 };
758 758
759 759 let bytes = PyBytes::new(py, &bytes);
760 760 Ok(bytes)
761 761 }
762 762
763 763 /// Returns the last saved docket along with the size of any changed data
764 764 /// (in number of blocks), and said data as bytes.
765 765 fn inner_nodemap_data_incremental(
766 766 &self,
767 767 py: Python,
768 768 ) -> PyResult<PyObject> {
769 769 let docket = self.docket(py).borrow();
770 770 let docket = match docket.as_ref() {
771 771 Some(d) => d,
772 772 None => return Ok(py.None()),
773 773 };
774 774
775 775 let node_tree = self.get_nodetree(py)?.borrow_mut().take().unwrap();
776 776 let masked_blocks = node_tree.masked_readonly_blocks();
777 777 let (_, data) = node_tree.into_readonly_and_added_bytes();
778 778 let changed = masked_blocks * std::mem::size_of::<Block>();
779 779
780 780 Ok((docket, changed, PyBytes::new(py, &data))
781 781 .to_py_object(py)
782 782 .into_object())
783 783 }
784 784
785 785 /// Update the nodemap from the new (mmaped) data.
786 786 /// The docket is kept as a reference for later incremental calls.
787 787 fn inner_update_nodemap_data(
788 788 &self,
789 789 py: Python,
790 790 docket: PyObject,
791 791 nm_data: PyObject,
792 792 ) -> PyResult<PyObject> {
793 793 // Safety: we keep the buffer around inside the class as `nodemap_mmap`
794 794 let (buf, bytes) = unsafe { mmap_keeparound(py, nm_data)? };
795 795 let len = buf.item_count();
796 796 self.nodemap_mmap(py).borrow_mut().replace(buf);
797 797
798 798 let mut nt = NodeTree::load_bytes(bytes, len);
799 799
800 800 let data_tip = docket
801 801 .getattr(py, "tip_rev")?
802 802 .extract::<BaseRevision>(py)?
803 803 .into();
804 804 self.docket(py).borrow_mut().replace(docket.clone_ref(py));
805 805 let idx = self.index(py).borrow();
806 806 let data_tip = idx.check_revision(data_tip).ok_or_else(|| {
807 807 nodemap_error(py, NodeMapError::RevisionNotInIndex(data_tip))
808 808 })?;
809 809 let current_tip = idx.len();
810 810
811 811 for r in (data_tip.0 + 1)..current_tip as BaseRevision {
812 812 let rev = Revision(r);
813 813 // in this case node() won't ever return None
814 814 nt.insert(&*idx, idx.node(rev).unwrap(), rev)
815 815 .map_err(|e| nodemap_error(py, e))?
816 816 }
817 817
818 818 *self.nt(py).borrow_mut() = Some(nt);
819 819
820 820 Ok(py.None())
821 821 }
822 822
823 823 fn inner_getitem(&self, py: Python, key: PyObject) -> PyResult<PyObject> {
824 824 let idx = self.index(py).borrow();
825 825 Ok(match key.extract::<BaseRevision>(py) {
826 826 Ok(key_as_int) => {
827 827 let entry_params = if key_as_int == NULL_REVISION.0 {
828 828 RevisionDataParams::default()
829 829 } else {
830 830 let rev = UncheckedRevision(key_as_int);
831 831 match idx.entry_as_params(rev) {
832 832 Some(e) => e,
833 833 None => {
834 834 return Err(PyErr::new::<IndexError, _>(
835 835 py,
836 836 "revlog index out of range",
837 837 ));
838 838 }
839 839 }
840 840 };
841 841 revision_data_params_to_py_tuple(py, entry_params)
842 842 .into_object()
843 843 }
844 844 _ => self.get_rev(py, key.extract::<PyBytes>(py)?)?.map_or_else(
845 845 || py.None(),
846 846 |py_rev| py_rev.into_py_object(py).into_object(),
847 847 ),
848 848 })
849 849 }
850 850
851 851 fn inner_headrevs(&self, py: Python) -> PyResult<PyObject> {
852 852 let index = &mut *self.index(py).borrow_mut();
853 853 let as_vec: Vec<PyObject> = index
854 854 .head_revs()
855 855 .map_err(|e| graph_error(py, e))?
856 856 .iter()
857 857 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
858 858 .collect();
859 859 Ok(PyList::new(py, &as_vec).into_object())
860 860 }
861 861
862 862 fn inner_headrevsfiltered(
863 863 &self,
864 864 py: Python,
865 865 filtered_revs: &PyObject,
866 866 ) -> PyResult<PyObject> {
867 867 let index = &mut *self.index(py).borrow_mut();
868 868 let filtered_revs = rev_pyiter_collect(py, filtered_revs, index)?;
869 869
870 870 let as_vec: Vec<PyObject> = index
871 871 .head_revs_filtered(&filtered_revs)
872 872 .map_err(|e| graph_error(py, e))?
873 873 .iter()
874 874 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
875 875 .collect();
876 876 Ok(PyList::new(py, &as_vec).into_object())
877 877 }
878 878
879 879 fn inner_ancestors(
880 880 &self,
881 881 py: Python,
882 882 py_revs: &PyTuple,
883 883 ) -> PyResult<PyObject> {
884 884 let index = &mut *self.index(py).borrow_mut();
885 885 let revs: Vec<_> = rev_pyiter_collect(py, py_revs.as_object(), index)?;
886 886 let as_vec: Vec<_> = index
887 887 .ancestors(&revs)
888 888 .map_err(|e| graph_error(py, e))?
889 889 .iter()
890 890 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
891 891 .collect();
892 892 Ok(PyList::new(py, &as_vec).into_object())
893 893 }
894 894
895 895 fn inner_commonancestorsheads(
896 896 &self,
897 897 py: Python,
898 898 py_revs: &PyTuple,
899 899 ) -> PyResult<PyObject> {
900 900 let index = &mut *self.index(py).borrow_mut();
901 901 let revs: Vec<_> = rev_pyiter_collect(py, py_revs.as_object(), index)?;
902 902 let as_vec: Vec<_> = index
903 903 .common_ancestor_heads(&revs)
904 904 .map_err(|e| graph_error(py, e))?
905 905 .iter()
906 906 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
907 907 .collect();
908 908 Ok(PyList::new(py, &as_vec).into_object())
909 909 }
910 910
911 911 fn inner_computephasesmapsets(
912 912 &self,
913 913 py: Python,
914 914 py_roots: PyDict,
915 915 ) -> PyResult<PyObject> {
916 916 let index = &*self.index(py).borrow();
917 917 let opt = self.get_nodetree(py)?.borrow();
918 918 let nt = opt.as_ref().unwrap();
919 919 let roots: Result<HashMap<Phase, Vec<Revision>>, PyErr> = py_roots
920 920 .items_list(py)
921 921 .iter(py)
922 922 .map(|r| {
923 923 let phase = r.get_item(py, 0)?;
924 924 let nodes = r.get_item(py, 1)?;
925 925 // Transform the nodes from Python to revs here since we
926 926 // have access to the nodemap
927 927 let revs: Result<_, _> = nodes
928 928 .iter(py)?
929 929 .map(|node| match node?.extract::<PyBytes>(py) {
930 930 Ok(py_bytes) => {
931 931 let node = node_from_py_bytes(py, &py_bytes)?;
932 932 nt.find_bin(index, node.into())
933 933 .map_err(|e| nodemap_error(py, e))?
934 934 .ok_or_else(|| revlog_error(py))
935 935 }
936 936 Err(e) => Err(e),
937 937 })
938 938 .collect();
939 939 let phase = Phase::try_from(phase.extract::<usize>(py)?)
940 940 .map_err(|_| revlog_error(py));
941 941 Ok((phase?, revs?))
942 942 })
943 943 .collect();
944 944 let (len, phase_maps) = index
945 945 .compute_phases_map_sets(roots?)
946 946 .map_err(|e| graph_error(py, e))?;
947 947
948 948 // Ugly hack, but temporary
949 949 const IDX_TO_PHASE_NUM: [usize; 4] = [1, 2, 32, 96];
950 950 let py_phase_maps = PyDict::new(py);
951 951 for (idx, roots) in phase_maps.iter().enumerate() {
952 952 let phase_num = IDX_TO_PHASE_NUM[idx].into_py_object(py);
953 953 // OPTIM too bad we have to collect here. At least, we could
954 954 // reuse the same Vec and allocate it with capacity at
955 955 // max(len(phase_maps)
956 956 let roots_vec: Vec<PyInt> = roots
957 957 .iter()
958 958 .map(|r| PyRevision::from(*r).into_py_object(py))
959 959 .collect();
960 960 py_phase_maps.set_item(
961 961 py,
962 962 phase_num,
963 963 PySet::new(py, roots_vec)?,
964 964 )?;
965 965 }
966 966 Ok(PyTuple::new(
967 967 py,
968 968 &[
969 969 len.into_py_object(py).into_object(),
970 970 py_phase_maps.into_object(),
971 971 ],
972 972 )
973 973 .into_object())
974 974 }
975 975
976 976 fn inner_slicechunktodensity(
977 977 &self,
978 978 py: Python,
979 979 revs: PyObject,
980 980 target_density: f64,
981 981 min_gap_size: usize,
982 982 ) -> PyResult<PyObject> {
983 983 let index = &mut *self.index(py).borrow_mut();
984 984 let revs: Vec<_> = rev_pyiter_collect(py, &revs, index)?;
985 985 let as_nested_vec =
986 986 index.slice_chunk_to_density(&revs, target_density, min_gap_size);
987 987 let mut res = Vec::with_capacity(as_nested_vec.len());
988 988 let mut py_chunk = Vec::new();
989 989 for chunk in as_nested_vec {
990 990 py_chunk.clear();
991 991 py_chunk.reserve_exact(chunk.len());
992 992 for rev in chunk {
993 993 py_chunk.push(
994 994 PyRevision::from(rev).into_py_object(py).into_object(),
995 995 );
996 996 }
997 997 res.push(PyList::new(py, &py_chunk).into_object());
998 998 }
999 999 // This is just to do the same as C, not sure why it does this
1000 1000 if res.len() == 1 {
1001 1001 Ok(PyTuple::new(py, &res).into_object())
1002 1002 } else {
1003 1003 Ok(PyList::new(py, &res).into_object())
1004 1004 }
1005 1005 }
1006 1006
1007 1007 fn inner_reachableroots2(
1008 1008 &self,
1009 1009 py: Python,
1010 1010 min_root: UncheckedRevision,
1011 1011 heads: PyObject,
1012 1012 roots: PyObject,
1013 1013 include_path: bool,
1014 1014 ) -> PyResult<PyObject> {
1015 1015 let index = &*self.index(py).borrow();
1016 1016 let heads = rev_pyiter_collect_or_else(py, &heads, index, |_rev| {
1017 1017 PyErr::new::<IndexError, _>(py, "head out of range")
1018 1018 })?;
1019 1019 let roots: Result<_, _> = roots
1020 1020 .iter(py)?
1021 1021 .map(|r| {
1022 1022 r.and_then(|o| match o.extract::<PyRevision>(py) {
1023 1023 Ok(r) => Ok(UncheckedRevision(r.0)),
1024 1024 Err(e) => Err(e),
1025 1025 })
1026 1026 })
1027 1027 .collect();
1028 1028 let as_set = index
1029 1029 .reachable_roots(min_root, heads, roots?, include_path)
1030 1030 .map_err(|e| graph_error(py, e))?;
1031 1031 let as_vec: Vec<PyObject> = as_set
1032 1032 .iter()
1033 1033 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
1034 1034 .collect();
1035 1035 Ok(PyList::new(py, &as_vec).into_object())
1036 1036 }
1037 1037 }
1038 1038
1039 1039 fn revlog_error(py: Python) -> PyErr {
1040 1040 match py
1041 1041 .import("mercurial.error")
1042 1042 .and_then(|m| m.get(py, "RevlogError"))
1043 1043 {
1044 1044 Err(e) => e,
1045 1045 Ok(cls) => PyErr::from_instance(
1046 1046 py,
1047 1047 cls.call(py, (py.None(),), None).ok().into_py_object(py),
1048 1048 ),
1049 1049 }
1050 1050 }
1051 1051
1052 1052 fn revlog_error_with_msg(py: Python, msg: &[u8]) -> PyErr {
1053 1053 match py
1054 1054 .import("mercurial.error")
1055 1055 .and_then(|m| m.get(py, "RevlogError"))
1056 1056 {
1057 1057 Err(e) => e,
1058 1058 Ok(cls) => PyErr::from_instance(
1059 1059 py,
1060 1060 cls.call(py, (PyBytes::new(py, msg),), None)
1061 1061 .ok()
1062 1062 .into_py_object(py),
1063 1063 ),
1064 1064 }
1065 1065 }
1066 1066
1067 1067 fn graph_error(py: Python, _err: hg::GraphError) -> PyErr {
1068 1068 // ParentOutOfRange is currently the only alternative
1069 1069 // in `hg::GraphError`. The C index always raises this simple ValueError.
1070 1070 PyErr::new::<ValueError, _>(py, "parent out of range")
1071 1071 }
1072 1072
1073 1073 fn nodemap_rev_not_in_index(py: Python, rev: UncheckedRevision) -> PyErr {
1074 1074 PyErr::new::<ValueError, _>(
1075 1075 py,
1076 1076 format!(
1077 1077 "Inconsistency: Revision {} found in nodemap \
1078 1078 is not in revlog index",
1079 1079 rev
1080 1080 ),
1081 1081 )
1082 1082 }
1083 1083
1084 1084 fn rev_not_in_index(py: Python, rev: UncheckedRevision) -> PyErr {
1085 1085 PyErr::new::<ValueError, _>(
1086 1086 py,
1087 1087 format!("revlog index out of range: {}", rev),
1088 1088 )
1089 1089 }
1090 1090
1091 1091 /// Standard treatment of NodeMapError
1092 1092 fn nodemap_error(py: Python, err: NodeMapError) -> PyErr {
1093 1093 match err {
1094 1094 NodeMapError::MultipleResults => revlog_error(py),
1095 1095 NodeMapError::RevisionNotInIndex(r) => nodemap_rev_not_in_index(py, r),
1096 1096 }
1097 1097 }
1098 1098
1099 1099 /// assert two Python objects to be equal from a Python point of view
1100 1100 ///
1101 1101 /// `method` is a label for the assertion error message, intended to be the
1102 1102 /// name of the caller.
1103 1103 /// `normalizer` is a function that takes a Python variable name and returns
1104 1104 /// an expression that the conparison will actually use.
1105 1105 /// Foe example: `|v| format!("sorted({})", v)`
1106 1106 fn assert_py_eq_normalized(
1107 1107 py: Python,
1108 1108 method: &str,
1109 1109 rust: &PyObject,
1110 1110 c: &PyObject,
1111 1111 normalizer: impl FnOnce(&str) -> String + Copy,
1112 1112 ) -> PyResult<()> {
1113 1113 let locals = PyDict::new(py);
1114 1114 locals.set_item(py, "rust".into_py_object(py).into_object(), rust)?;
1115 1115 locals.set_item(py, "c".into_py_object(py).into_object(), c)?;
1116 1116 // let lhs = format!(normalizer_fmt, "rust");
1117 1117 // let rhs = format!(normalizer_fmt, "c");
1118 1118 let is_eq: PyBool = py
1119 1119 .eval(
1120 1120 &format!("{} == {}", &normalizer("rust"), &normalizer("c")),
1121 1121 None,
1122 1122 Some(&locals),
1123 1123 )?
1124 1124 .extract(py)?;
1125 1125 assert!(
1126 1126 is_eq.is_true(),
1127 1127 "{} results differ. Rust: {:?} C: {:?} (before any normalization)",
1128 1128 method,
1129 1129 rust,
1130 1130 c
1131 1131 );
1132 1132 Ok(())
1133 1133 }
1134 1134
1135 1135 fn assert_py_eq(
1136 1136 py: Python,
1137 1137 method: &str,
1138 1138 rust: &PyObject,
1139 1139 c: &PyObject,
1140 1140 ) -> PyResult<()> {
1141 1141 assert_py_eq_normalized(py, method, rust, c, |v| v.to_owned())
1142 1142 }
1143 1143
1144 1144 /// Create the module, with __package__ given from parent
1145 1145 pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> {
1146 1146 let dotted_name = &format!("{}.revlog", package);
1147 1147 let m = PyModule::new(py, dotted_name)?;
1148 1148 m.add(py, "__package__", package)?;
1149 1149 m.add(py, "__doc__", "RevLog - Rust implementations")?;
1150 1150
1151 1151 m.add_class::<MixedIndex>(py)?;
1152 1152
1153 1153 let sys = PyModule::import(py, "sys")?;
1154 1154 let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?;
1155 1155 sys_modules.set_item(py, dotted_name, &m)?;
1156 1156
1157 1157 Ok(m)
1158 1158 }
General Comments 0
You need to be logged in to leave comments. Login now