##// END OF EJS Templates
rust-index: add Sync bound to all relevant mmap-derived values...
Raphaël Gomès -
r52126:ca81cd96 default
parent child Browse files
Show More
@@ -1,1975 +1,1975 b''
1 use std::collections::hash_map::RandomState;
1 use std::collections::hash_map::RandomState;
2 use std::collections::{HashMap, HashSet};
2 use std::collections::{HashMap, HashSet};
3 use std::fmt::Debug;
3 use std::fmt::Debug;
4 use std::ops::Deref;
4 use std::ops::Deref;
5 use std::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard};
5 use std::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard};
6
6
7 use byteorder::{BigEndian, ByteOrder};
7 use byteorder::{BigEndian, ByteOrder};
8 use bytes_cast::{unaligned, BytesCast};
8 use bytes_cast::{unaligned, BytesCast};
9
9
10 use super::REVIDX_KNOWN_FLAGS;
10 use super::REVIDX_KNOWN_FLAGS;
11 use crate::errors::HgError;
11 use crate::errors::HgError;
12 use crate::node::{NODE_BYTES_LENGTH, NULL_NODE, STORED_NODE_ID_BYTES};
12 use crate::node::{NODE_BYTES_LENGTH, NULL_NODE, STORED_NODE_ID_BYTES};
13 use crate::revlog::node::Node;
13 use crate::revlog::node::Node;
14 use crate::revlog::{Revision, NULL_REVISION};
14 use crate::revlog::{Revision, NULL_REVISION};
15 use crate::{
15 use crate::{
16 dagops, BaseRevision, FastHashMap, Graph, GraphError, RevlogError,
16 dagops, BaseRevision, FastHashMap, Graph, GraphError, RevlogError,
17 RevlogIndex, UncheckedRevision,
17 RevlogIndex, UncheckedRevision,
18 };
18 };
19
19
20 pub const INDEX_ENTRY_SIZE: usize = 64;
20 pub const INDEX_ENTRY_SIZE: usize = 64;
21 pub const COMPRESSION_MODE_INLINE: u8 = 2;
21 pub const COMPRESSION_MODE_INLINE: u8 = 2;
22
22
23 #[derive(Debug)]
23 #[derive(Debug)]
24 pub struct IndexHeader {
24 pub struct IndexHeader {
25 pub(super) header_bytes: [u8; 4],
25 pub(super) header_bytes: [u8; 4],
26 }
26 }
27
27
28 #[derive(Copy, Clone)]
28 #[derive(Copy, Clone)]
29 pub struct IndexHeaderFlags {
29 pub struct IndexHeaderFlags {
30 flags: u16,
30 flags: u16,
31 }
31 }
32
32
33 /// Corresponds to the high bits of `_format_flags` in python
33 /// Corresponds to the high bits of `_format_flags` in python
34 impl IndexHeaderFlags {
34 impl IndexHeaderFlags {
35 /// Corresponds to FLAG_INLINE_DATA in python
35 /// Corresponds to FLAG_INLINE_DATA in python
36 pub fn is_inline(self) -> bool {
36 pub fn is_inline(self) -> bool {
37 self.flags & 1 != 0
37 self.flags & 1 != 0
38 }
38 }
39 /// Corresponds to FLAG_GENERALDELTA in python
39 /// Corresponds to FLAG_GENERALDELTA in python
40 pub fn uses_generaldelta(self) -> bool {
40 pub fn uses_generaldelta(self) -> bool {
41 self.flags & 2 != 0
41 self.flags & 2 != 0
42 }
42 }
43 }
43 }
44
44
45 /// Corresponds to the INDEX_HEADER structure,
45 /// Corresponds to the INDEX_HEADER structure,
46 /// which is parsed as a `header` variable in `_loadindex` in `revlog.py`
46 /// which is parsed as a `header` variable in `_loadindex` in `revlog.py`
47 impl IndexHeader {
47 impl IndexHeader {
48 fn format_flags(&self) -> IndexHeaderFlags {
48 fn format_flags(&self) -> IndexHeaderFlags {
49 // No "unknown flags" check here, unlike in python. Maybe there should
49 // No "unknown flags" check here, unlike in python. Maybe there should
50 // be.
50 // be.
51 IndexHeaderFlags {
51 IndexHeaderFlags {
52 flags: BigEndian::read_u16(&self.header_bytes[0..2]),
52 flags: BigEndian::read_u16(&self.header_bytes[0..2]),
53 }
53 }
54 }
54 }
55
55
56 /// The only revlog version currently supported by rhg.
56 /// The only revlog version currently supported by rhg.
57 const REVLOGV1: u16 = 1;
57 const REVLOGV1: u16 = 1;
58
58
59 /// Corresponds to `_format_version` in Python.
59 /// Corresponds to `_format_version` in Python.
60 fn format_version(&self) -> u16 {
60 fn format_version(&self) -> u16 {
61 BigEndian::read_u16(&self.header_bytes[2..4])
61 BigEndian::read_u16(&self.header_bytes[2..4])
62 }
62 }
63
63
64 pub fn parse(index_bytes: &[u8]) -> Result<Option<IndexHeader>, HgError> {
64 pub fn parse(index_bytes: &[u8]) -> Result<Option<IndexHeader>, HgError> {
65 if index_bytes.is_empty() {
65 if index_bytes.is_empty() {
66 return Ok(None);
66 return Ok(None);
67 }
67 }
68 if index_bytes.len() < 4 {
68 if index_bytes.len() < 4 {
69 return Err(HgError::corrupted(
69 return Err(HgError::corrupted(
70 "corrupted revlog: can't read the index format header",
70 "corrupted revlog: can't read the index format header",
71 ));
71 ));
72 }
72 }
73 Ok(Some(IndexHeader {
73 Ok(Some(IndexHeader {
74 header_bytes: {
74 header_bytes: {
75 let bytes: [u8; 4] =
75 let bytes: [u8; 4] =
76 index_bytes[0..4].try_into().expect("impossible");
76 index_bytes[0..4].try_into().expect("impossible");
77 bytes
77 bytes
78 },
78 },
79 }))
79 }))
80 }
80 }
81 }
81 }
82
82
83 /// Abstracts the access to the index bytes since they can be spread between
83 /// Abstracts the access to the index bytes since they can be spread between
84 /// the immutable (bytes) part and the mutable (added) part if any appends
84 /// the immutable (bytes) part and the mutable (added) part if any appends
85 /// happened. This makes it transparent for the callers.
85 /// happened. This makes it transparent for the callers.
86 struct IndexData {
86 struct IndexData {
87 /// Immutable bytes, most likely taken from disk
87 /// Immutable bytes, most likely taken from disk
88 bytes: Box<dyn Deref<Target = [u8]> + Send>,
88 bytes: Box<dyn Deref<Target = [u8]> + Send + Sync>,
89 /// Used when stripping index contents, keeps track of the start of the
89 /// Used when stripping index contents, keeps track of the start of the
90 /// first stripped revision, which is used to give a slice of the
90 /// first stripped revision, which is used to give a slice of the
91 /// `bytes` field.
91 /// `bytes` field.
92 truncation: Option<usize>,
92 truncation: Option<usize>,
93 /// Bytes that were added after reading the index
93 /// Bytes that were added after reading the index
94 added: Vec<u8>,
94 added: Vec<u8>,
95 }
95 }
96
96
97 impl IndexData {
97 impl IndexData {
98 pub fn new(bytes: Box<dyn Deref<Target = [u8]> + Send>) -> Self {
98 pub fn new(bytes: Box<dyn Deref<Target = [u8]> + Send + Sync>) -> Self {
99 Self {
99 Self {
100 bytes,
100 bytes,
101 truncation: None,
101 truncation: None,
102 added: vec![],
102 added: vec![],
103 }
103 }
104 }
104 }
105
105
106 pub fn len(&self) -> usize {
106 pub fn len(&self) -> usize {
107 match self.truncation {
107 match self.truncation {
108 Some(truncation) => truncation + self.added.len(),
108 Some(truncation) => truncation + self.added.len(),
109 None => self.bytes.len() + self.added.len(),
109 None => self.bytes.len() + self.added.len(),
110 }
110 }
111 }
111 }
112
112
113 fn remove(
113 fn remove(
114 &mut self,
114 &mut self,
115 rev: Revision,
115 rev: Revision,
116 offsets: Option<&[usize]>,
116 offsets: Option<&[usize]>,
117 ) -> Result<(), RevlogError> {
117 ) -> Result<(), RevlogError> {
118 let rev = rev.0 as usize;
118 let rev = rev.0 as usize;
119 let truncation = if let Some(offsets) = offsets {
119 let truncation = if let Some(offsets) = offsets {
120 offsets[rev]
120 offsets[rev]
121 } else {
121 } else {
122 rev * INDEX_ENTRY_SIZE
122 rev * INDEX_ENTRY_SIZE
123 };
123 };
124 if truncation < self.bytes.len() {
124 if truncation < self.bytes.len() {
125 self.truncation = Some(truncation);
125 self.truncation = Some(truncation);
126 self.added.clear();
126 self.added.clear();
127 } else {
127 } else {
128 self.added.truncate(truncation - self.bytes.len());
128 self.added.truncate(truncation - self.bytes.len());
129 }
129 }
130 Ok(())
130 Ok(())
131 }
131 }
132
132
133 fn is_new(&self) -> bool {
133 fn is_new(&self) -> bool {
134 self.bytes.is_empty()
134 self.bytes.is_empty()
135 }
135 }
136 }
136 }
137
137
138 impl std::ops::Index<std::ops::Range<usize>> for IndexData {
138 impl std::ops::Index<std::ops::Range<usize>> for IndexData {
139 type Output = [u8];
139 type Output = [u8];
140
140
141 fn index(&self, index: std::ops::Range<usize>) -> &Self::Output {
141 fn index(&self, index: std::ops::Range<usize>) -> &Self::Output {
142 let start = index.start;
142 let start = index.start;
143 let end = index.end;
143 let end = index.end;
144 let immutable_len = match self.truncation {
144 let immutable_len = match self.truncation {
145 Some(truncation) => truncation,
145 Some(truncation) => truncation,
146 None => self.bytes.len(),
146 None => self.bytes.len(),
147 };
147 };
148 if start < immutable_len {
148 if start < immutable_len {
149 if end > immutable_len {
149 if end > immutable_len {
150 panic!("index data cannot span existing and added ranges");
150 panic!("index data cannot span existing and added ranges");
151 }
151 }
152 &self.bytes[index]
152 &self.bytes[index]
153 } else {
153 } else {
154 &self.added[start - immutable_len..end - immutable_len]
154 &self.added[start - immutable_len..end - immutable_len]
155 }
155 }
156 }
156 }
157 }
157 }
158
158
159 #[derive(Debug, PartialEq, Eq)]
159 #[derive(Debug, PartialEq, Eq)]
160 pub struct RevisionDataParams {
160 pub struct RevisionDataParams {
161 pub flags: u16,
161 pub flags: u16,
162 pub data_offset: u64,
162 pub data_offset: u64,
163 pub data_compressed_length: i32,
163 pub data_compressed_length: i32,
164 pub data_uncompressed_length: i32,
164 pub data_uncompressed_length: i32,
165 pub data_delta_base: i32,
165 pub data_delta_base: i32,
166 pub link_rev: i32,
166 pub link_rev: i32,
167 pub parent_rev_1: i32,
167 pub parent_rev_1: i32,
168 pub parent_rev_2: i32,
168 pub parent_rev_2: i32,
169 pub node_id: [u8; NODE_BYTES_LENGTH],
169 pub node_id: [u8; NODE_BYTES_LENGTH],
170 pub _sidedata_offset: u64,
170 pub _sidedata_offset: u64,
171 pub _sidedata_compressed_length: i32,
171 pub _sidedata_compressed_length: i32,
172 pub data_compression_mode: u8,
172 pub data_compression_mode: u8,
173 pub _sidedata_compression_mode: u8,
173 pub _sidedata_compression_mode: u8,
174 pub _rank: i32,
174 pub _rank: i32,
175 }
175 }
176
176
177 impl Default for RevisionDataParams {
177 impl Default for RevisionDataParams {
178 fn default() -> Self {
178 fn default() -> Self {
179 Self {
179 Self {
180 flags: 0,
180 flags: 0,
181 data_offset: 0,
181 data_offset: 0,
182 data_compressed_length: 0,
182 data_compressed_length: 0,
183 data_uncompressed_length: 0,
183 data_uncompressed_length: 0,
184 data_delta_base: -1,
184 data_delta_base: -1,
185 link_rev: -1,
185 link_rev: -1,
186 parent_rev_1: -1,
186 parent_rev_1: -1,
187 parent_rev_2: -1,
187 parent_rev_2: -1,
188 node_id: [0; NODE_BYTES_LENGTH],
188 node_id: [0; NODE_BYTES_LENGTH],
189 _sidedata_offset: 0,
189 _sidedata_offset: 0,
190 _sidedata_compressed_length: 0,
190 _sidedata_compressed_length: 0,
191 data_compression_mode: COMPRESSION_MODE_INLINE,
191 data_compression_mode: COMPRESSION_MODE_INLINE,
192 _sidedata_compression_mode: COMPRESSION_MODE_INLINE,
192 _sidedata_compression_mode: COMPRESSION_MODE_INLINE,
193 _rank: -1,
193 _rank: -1,
194 }
194 }
195 }
195 }
196 }
196 }
197
197
198 #[derive(BytesCast)]
198 #[derive(BytesCast)]
199 #[repr(C)]
199 #[repr(C)]
200 pub struct RevisionDataV1 {
200 pub struct RevisionDataV1 {
201 data_offset_or_flags: unaligned::U64Be,
201 data_offset_or_flags: unaligned::U64Be,
202 data_compressed_length: unaligned::I32Be,
202 data_compressed_length: unaligned::I32Be,
203 data_uncompressed_length: unaligned::I32Be,
203 data_uncompressed_length: unaligned::I32Be,
204 data_delta_base: unaligned::I32Be,
204 data_delta_base: unaligned::I32Be,
205 link_rev: unaligned::I32Be,
205 link_rev: unaligned::I32Be,
206 parent_rev_1: unaligned::I32Be,
206 parent_rev_1: unaligned::I32Be,
207 parent_rev_2: unaligned::I32Be,
207 parent_rev_2: unaligned::I32Be,
208 node_id: [u8; STORED_NODE_ID_BYTES],
208 node_id: [u8; STORED_NODE_ID_BYTES],
209 }
209 }
210
210
211 fn _static_assert_size_of_revision_data_v1() {
211 fn _static_assert_size_of_revision_data_v1() {
212 let _ = std::mem::transmute::<RevisionDataV1, [u8; 64]>;
212 let _ = std::mem::transmute::<RevisionDataV1, [u8; 64]>;
213 }
213 }
214
214
215 impl RevisionDataParams {
215 impl RevisionDataParams {
216 pub fn validate(&self) -> Result<(), RevlogError> {
216 pub fn validate(&self) -> Result<(), RevlogError> {
217 if self.flags & !REVIDX_KNOWN_FLAGS != 0 {
217 if self.flags & !REVIDX_KNOWN_FLAGS != 0 {
218 return Err(RevlogError::corrupted(format!(
218 return Err(RevlogError::corrupted(format!(
219 "unknown revlog index flags: {}",
219 "unknown revlog index flags: {}",
220 self.flags
220 self.flags
221 )));
221 )));
222 }
222 }
223 if self.data_compression_mode != COMPRESSION_MODE_INLINE {
223 if self.data_compression_mode != COMPRESSION_MODE_INLINE {
224 return Err(RevlogError::corrupted(format!(
224 return Err(RevlogError::corrupted(format!(
225 "invalid data compression mode: {}",
225 "invalid data compression mode: {}",
226 self.data_compression_mode
226 self.data_compression_mode
227 )));
227 )));
228 }
228 }
229 // FIXME isn't this only for v2 or changelog v2?
229 // FIXME isn't this only for v2 or changelog v2?
230 if self._sidedata_compression_mode != COMPRESSION_MODE_INLINE {
230 if self._sidedata_compression_mode != COMPRESSION_MODE_INLINE {
231 return Err(RevlogError::corrupted(format!(
231 return Err(RevlogError::corrupted(format!(
232 "invalid sidedata compression mode: {}",
232 "invalid sidedata compression mode: {}",
233 self._sidedata_compression_mode
233 self._sidedata_compression_mode
234 )));
234 )));
235 }
235 }
236 Ok(())
236 Ok(())
237 }
237 }
238
238
239 pub fn into_v1(self) -> RevisionDataV1 {
239 pub fn into_v1(self) -> RevisionDataV1 {
240 let data_offset_or_flags = self.data_offset << 16 | self.flags as u64;
240 let data_offset_or_flags = self.data_offset << 16 | self.flags as u64;
241 let mut node_id = [0; STORED_NODE_ID_BYTES];
241 let mut node_id = [0; STORED_NODE_ID_BYTES];
242 node_id[..NODE_BYTES_LENGTH].copy_from_slice(&self.node_id);
242 node_id[..NODE_BYTES_LENGTH].copy_from_slice(&self.node_id);
243 RevisionDataV1 {
243 RevisionDataV1 {
244 data_offset_or_flags: data_offset_or_flags.into(),
244 data_offset_or_flags: data_offset_or_flags.into(),
245 data_compressed_length: self.data_compressed_length.into(),
245 data_compressed_length: self.data_compressed_length.into(),
246 data_uncompressed_length: self.data_uncompressed_length.into(),
246 data_uncompressed_length: self.data_uncompressed_length.into(),
247 data_delta_base: self.data_delta_base.into(),
247 data_delta_base: self.data_delta_base.into(),
248 link_rev: self.link_rev.into(),
248 link_rev: self.link_rev.into(),
249 parent_rev_1: self.parent_rev_1.into(),
249 parent_rev_1: self.parent_rev_1.into(),
250 parent_rev_2: self.parent_rev_2.into(),
250 parent_rev_2: self.parent_rev_2.into(),
251 node_id,
251 node_id,
252 }
252 }
253 }
253 }
254 }
254 }
255
255
256 /// A Revlog index
256 /// A Revlog index
257 pub struct Index {
257 pub struct Index {
258 bytes: IndexData,
258 bytes: IndexData,
259 /// Offsets of starts of index blocks.
259 /// Offsets of starts of index blocks.
260 /// Only needed when the index is interleaved with data.
260 /// Only needed when the index is interleaved with data.
261 offsets: RwLock<Option<Vec<usize>>>,
261 offsets: RwLock<Option<Vec<usize>>>,
262 uses_generaldelta: bool,
262 uses_generaldelta: bool,
263 is_inline: bool,
263 is_inline: bool,
264 /// Cache of the head revisions in this index, kept in sync. Should
264 /// Cache of the head revisions in this index, kept in sync. Should
265 /// be accessed via the [`Self::head_revs`] method.
265 /// be accessed via the [`Self::head_revs`] method.
266 head_revs: Vec<Revision>,
266 head_revs: Vec<Revision>,
267 /// Cache of the last filtered revisions in this index, used to make sure
267 /// Cache of the last filtered revisions in this index, used to make sure
268 /// we haven't changed filters when returning the cached `head_revs`.
268 /// we haven't changed filters when returning the cached `head_revs`.
269 filtered_revs: HashSet<Revision>,
269 filtered_revs: HashSet<Revision>,
270 }
270 }
271
271
272 impl Debug for Index {
272 impl Debug for Index {
273 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
273 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
274 f.debug_struct("Index")
274 f.debug_struct("Index")
275 .field("offsets", &self.offsets)
275 .field("offsets", &self.offsets)
276 .field("uses_generaldelta", &self.uses_generaldelta)
276 .field("uses_generaldelta", &self.uses_generaldelta)
277 .finish()
277 .finish()
278 }
278 }
279 }
279 }
280
280
281 impl Graph for Index {
281 impl Graph for Index {
282 fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
282 fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
283 let err = || GraphError::ParentOutOfRange(rev);
283 let err = || GraphError::ParentOutOfRange(rev);
284 match self.get_entry(rev) {
284 match self.get_entry(rev) {
285 Some(entry) => {
285 Some(entry) => {
286 // The C implementation checks that the parents are valid
286 // The C implementation checks that the parents are valid
287 // before returning
287 // before returning
288 Ok([
288 Ok([
289 self.check_revision(entry.p1()).ok_or_else(err)?,
289 self.check_revision(entry.p1()).ok_or_else(err)?,
290 self.check_revision(entry.p2()).ok_or_else(err)?,
290 self.check_revision(entry.p2()).ok_or_else(err)?,
291 ])
291 ])
292 }
292 }
293 None => Ok([NULL_REVISION, NULL_REVISION]),
293 None => Ok([NULL_REVISION, NULL_REVISION]),
294 }
294 }
295 }
295 }
296 }
296 }
297
297
298 /// A cache suitable for find_snapshots
298 /// A cache suitable for find_snapshots
299 ///
299 ///
300 /// Logically equivalent to a mapping whose keys are [`BaseRevision`] and
300 /// Logically equivalent to a mapping whose keys are [`BaseRevision`] and
301 /// values sets of [`BaseRevision`]
301 /// values sets of [`BaseRevision`]
302 ///
302 ///
303 /// TODO the dubious part is insisting that errors must be RevlogError
303 /// TODO the dubious part is insisting that errors must be RevlogError
304 /// we would probably need to sprinkle some magic here, such as an associated
304 /// we would probably need to sprinkle some magic here, such as an associated
305 /// type that would be Into<RevlogError> but even that would not be
305 /// type that would be Into<RevlogError> but even that would not be
306 /// satisfactory, as errors potentially have nothing to do with the revlog.
306 /// satisfactory, as errors potentially have nothing to do with the revlog.
307 pub trait SnapshotsCache {
307 pub trait SnapshotsCache {
308 fn insert_for(
308 fn insert_for(
309 &mut self,
309 &mut self,
310 rev: BaseRevision,
310 rev: BaseRevision,
311 value: BaseRevision,
311 value: BaseRevision,
312 ) -> Result<(), RevlogError>;
312 ) -> Result<(), RevlogError>;
313 }
313 }
314
314
315 impl SnapshotsCache for FastHashMap<BaseRevision, HashSet<BaseRevision>> {
315 impl SnapshotsCache for FastHashMap<BaseRevision, HashSet<BaseRevision>> {
316 fn insert_for(
316 fn insert_for(
317 &mut self,
317 &mut self,
318 rev: BaseRevision,
318 rev: BaseRevision,
319 value: BaseRevision,
319 value: BaseRevision,
320 ) -> Result<(), RevlogError> {
320 ) -> Result<(), RevlogError> {
321 let all_values = self.entry(rev).or_insert_with(HashSet::new);
321 let all_values = self.entry(rev).or_insert_with(HashSet::new);
322 all_values.insert(value);
322 all_values.insert(value);
323 Ok(())
323 Ok(())
324 }
324 }
325 }
325 }
326
326
327 impl Index {
327 impl Index {
328 /// Create an index from bytes.
328 /// Create an index from bytes.
329 /// Calculate the start of each entry when is_inline is true.
329 /// Calculate the start of each entry when is_inline is true.
330 pub fn new(
330 pub fn new(
331 bytes: Box<dyn Deref<Target = [u8]> + Send>,
331 bytes: Box<dyn Deref<Target = [u8]> + Send + Sync>,
332 default_header: IndexHeader,
332 default_header: IndexHeader,
333 ) -> Result<Self, HgError> {
333 ) -> Result<Self, HgError> {
334 let header =
334 let header =
335 IndexHeader::parse(bytes.as_ref())?.unwrap_or(default_header);
335 IndexHeader::parse(bytes.as_ref())?.unwrap_or(default_header);
336
336
337 if header.format_version() != IndexHeader::REVLOGV1 {
337 if header.format_version() != IndexHeader::REVLOGV1 {
338 // A proper new version should have had a repo/store
338 // A proper new version should have had a repo/store
339 // requirement.
339 // requirement.
340 return Err(HgError::corrupted("unsupported revlog version"));
340 return Err(HgError::corrupted("unsupported revlog version"));
341 }
341 }
342
342
343 // This is only correct because we know version is REVLOGV1.
343 // This is only correct because we know version is REVLOGV1.
344 // In v2 we always use generaldelta, while in v0 we never use
344 // In v2 we always use generaldelta, while in v0 we never use
345 // generaldelta. Similar for [is_inline] (it's only used in v1).
345 // generaldelta. Similar for [is_inline] (it's only used in v1).
346 let uses_generaldelta = header.format_flags().uses_generaldelta();
346 let uses_generaldelta = header.format_flags().uses_generaldelta();
347
347
348 if header.format_flags().is_inline() {
348 if header.format_flags().is_inline() {
349 let mut offset: usize = 0;
349 let mut offset: usize = 0;
350 let mut offsets = Vec::new();
350 let mut offsets = Vec::new();
351
351
352 while offset + INDEX_ENTRY_SIZE <= bytes.len() {
352 while offset + INDEX_ENTRY_SIZE <= bytes.len() {
353 offsets.push(offset);
353 offsets.push(offset);
354 let end = offset + INDEX_ENTRY_SIZE;
354 let end = offset + INDEX_ENTRY_SIZE;
355 let entry = IndexEntry {
355 let entry = IndexEntry {
356 bytes: &bytes[offset..end],
356 bytes: &bytes[offset..end],
357 offset_override: None,
357 offset_override: None,
358 };
358 };
359
359
360 offset += INDEX_ENTRY_SIZE + entry.compressed_len() as usize;
360 offset += INDEX_ENTRY_SIZE + entry.compressed_len() as usize;
361 }
361 }
362
362
363 if offset == bytes.len() {
363 if offset == bytes.len() {
364 Ok(Self {
364 Ok(Self {
365 bytes: IndexData::new(bytes),
365 bytes: IndexData::new(bytes),
366 offsets: RwLock::new(Some(offsets)),
366 offsets: RwLock::new(Some(offsets)),
367 uses_generaldelta,
367 uses_generaldelta,
368 is_inline: true,
368 is_inline: true,
369 head_revs: vec![],
369 head_revs: vec![],
370 filtered_revs: HashSet::new(),
370 filtered_revs: HashSet::new(),
371 })
371 })
372 } else {
372 } else {
373 Err(HgError::corrupted("unexpected inline revlog length"))
373 Err(HgError::corrupted("unexpected inline revlog length"))
374 }
374 }
375 } else {
375 } else {
376 Ok(Self {
376 Ok(Self {
377 bytes: IndexData::new(bytes),
377 bytes: IndexData::new(bytes),
378 offsets: RwLock::new(None),
378 offsets: RwLock::new(None),
379 uses_generaldelta,
379 uses_generaldelta,
380 is_inline: false,
380 is_inline: false,
381 head_revs: vec![],
381 head_revs: vec![],
382 filtered_revs: HashSet::new(),
382 filtered_revs: HashSet::new(),
383 })
383 })
384 }
384 }
385 }
385 }
386
386
387 pub fn uses_generaldelta(&self) -> bool {
387 pub fn uses_generaldelta(&self) -> bool {
388 self.uses_generaldelta
388 self.uses_generaldelta
389 }
389 }
390
390
391 /// Value of the inline flag.
391 /// Value of the inline flag.
392 pub fn is_inline(&self) -> bool {
392 pub fn is_inline(&self) -> bool {
393 self.is_inline
393 self.is_inline
394 }
394 }
395
395
396 /// Return a slice of bytes if `revlog` is inline. Panic if not.
396 /// Return a slice of bytes if `revlog` is inline. Panic if not.
397 pub fn data(&self, start: usize, end: usize) -> &[u8] {
397 pub fn data(&self, start: usize, end: usize) -> &[u8] {
398 if !self.is_inline() {
398 if !self.is_inline() {
399 panic!("tried to access data in the index of a revlog that is not inline");
399 panic!("tried to access data in the index of a revlog that is not inline");
400 }
400 }
401 &self.bytes[start..end]
401 &self.bytes[start..end]
402 }
402 }
403
403
404 /// Return number of entries of the revlog index.
404 /// Return number of entries of the revlog index.
405 pub fn len(&self) -> usize {
405 pub fn len(&self) -> usize {
406 if let Some(offsets) = &*self.get_offsets() {
406 if let Some(offsets) = &*self.get_offsets() {
407 offsets.len()
407 offsets.len()
408 } else {
408 } else {
409 self.bytes.len() / INDEX_ENTRY_SIZE
409 self.bytes.len() / INDEX_ENTRY_SIZE
410 }
410 }
411 }
411 }
412
412
413 pub fn get_offsets(&self) -> RwLockReadGuard<Option<Vec<usize>>> {
413 pub fn get_offsets(&self) -> RwLockReadGuard<Option<Vec<usize>>> {
414 if self.is_inline() {
414 if self.is_inline() {
415 {
415 {
416 // Wrap in a block to drop the read guard
416 // Wrap in a block to drop the read guard
417 // TODO perf?
417 // TODO perf?
418 let mut offsets = self.offsets.write().unwrap();
418 let mut offsets = self.offsets.write().unwrap();
419 if offsets.is_none() {
419 if offsets.is_none() {
420 offsets.replace(inline_scan(&self.bytes.bytes).1);
420 offsets.replace(inline_scan(&self.bytes.bytes).1);
421 }
421 }
422 }
422 }
423 }
423 }
424 self.offsets.read().unwrap()
424 self.offsets.read().unwrap()
425 }
425 }
426
426
427 pub fn get_offsets_mut(&mut self) -> RwLockWriteGuard<Option<Vec<usize>>> {
427 pub fn get_offsets_mut(&mut self) -> RwLockWriteGuard<Option<Vec<usize>>> {
428 let mut offsets = self.offsets.write().unwrap();
428 let mut offsets = self.offsets.write().unwrap();
429 if self.is_inline() && offsets.is_none() {
429 if self.is_inline() && offsets.is_none() {
430 offsets.replace(inline_scan(&self.bytes.bytes).1);
430 offsets.replace(inline_scan(&self.bytes.bytes).1);
431 }
431 }
432 offsets
432 offsets
433 }
433 }
434
434
435 /// Returns `true` if the `Index` has zero `entries`.
435 /// Returns `true` if the `Index` has zero `entries`.
436 pub fn is_empty(&self) -> bool {
436 pub fn is_empty(&self) -> bool {
437 self.len() == 0
437 self.len() == 0
438 }
438 }
439
439
440 /// Return the index entry corresponding to the given revision or `None`
440 /// Return the index entry corresponding to the given revision or `None`
441 /// for [`NULL_REVISION`]
441 /// for [`NULL_REVISION`]
442 ///
442 ///
443 /// The specified revision being of the checked type, it always exists
443 /// The specified revision being of the checked type, it always exists
444 /// if it was validated by this index.
444 /// if it was validated by this index.
445 pub fn get_entry(&self, rev: Revision) -> Option<IndexEntry> {
445 pub fn get_entry(&self, rev: Revision) -> Option<IndexEntry> {
446 if rev == NULL_REVISION {
446 if rev == NULL_REVISION {
447 return None;
447 return None;
448 }
448 }
449 Some(if let Some(offsets) = &*self.get_offsets() {
449 Some(if let Some(offsets) = &*self.get_offsets() {
450 self.get_entry_inline(rev, offsets.as_ref())
450 self.get_entry_inline(rev, offsets.as_ref())
451 } else {
451 } else {
452 self.get_entry_separated(rev)
452 self.get_entry_separated(rev)
453 })
453 })
454 }
454 }
455
455
456 /// Return the binary content of the index entry for the given revision
456 /// Return the binary content of the index entry for the given revision
457 ///
457 ///
458 /// See [get_entry()](`Self::get_entry()`) for cases when `None` is
458 /// See [get_entry()](`Self::get_entry()`) for cases when `None` is
459 /// returned.
459 /// returned.
460 pub fn entry_binary(&self, rev: Revision) -> Option<&[u8]> {
460 pub fn entry_binary(&self, rev: Revision) -> Option<&[u8]> {
461 self.get_entry(rev).map(|e| {
461 self.get_entry(rev).map(|e| {
462 let bytes = e.as_bytes();
462 let bytes = e.as_bytes();
463 if rev.0 == 0 {
463 if rev.0 == 0 {
464 &bytes[4..]
464 &bytes[4..]
465 } else {
465 } else {
466 bytes
466 bytes
467 }
467 }
468 })
468 })
469 }
469 }
470
470
471 pub fn entry_as_params(
471 pub fn entry_as_params(
472 &self,
472 &self,
473 rev: UncheckedRevision,
473 rev: UncheckedRevision,
474 ) -> Option<RevisionDataParams> {
474 ) -> Option<RevisionDataParams> {
475 let rev = self.check_revision(rev)?;
475 let rev = self.check_revision(rev)?;
476 self.get_entry(rev).map(|e| RevisionDataParams {
476 self.get_entry(rev).map(|e| RevisionDataParams {
477 flags: e.flags(),
477 flags: e.flags(),
478 data_offset: if rev.0 == 0 && !self.bytes.is_new() {
478 data_offset: if rev.0 == 0 && !self.bytes.is_new() {
479 e.flags() as u64
479 e.flags() as u64
480 } else {
480 } else {
481 e.raw_offset()
481 e.raw_offset()
482 },
482 },
483 data_compressed_length: e.compressed_len().try_into().unwrap(),
483 data_compressed_length: e.compressed_len().try_into().unwrap(),
484 data_uncompressed_length: e.uncompressed_len(),
484 data_uncompressed_length: e.uncompressed_len(),
485 data_delta_base: e.base_revision_or_base_of_delta_chain().0,
485 data_delta_base: e.base_revision_or_base_of_delta_chain().0,
486 link_rev: e.link_revision().0,
486 link_rev: e.link_revision().0,
487 parent_rev_1: e.p1().0,
487 parent_rev_1: e.p1().0,
488 parent_rev_2: e.p2().0,
488 parent_rev_2: e.p2().0,
489 node_id: e.hash().as_bytes().try_into().unwrap(),
489 node_id: e.hash().as_bytes().try_into().unwrap(),
490 ..Default::default()
490 ..Default::default()
491 })
491 })
492 }
492 }
493
493
494 fn get_entry_inline(
494 fn get_entry_inline(
495 &self,
495 &self,
496 rev: Revision,
496 rev: Revision,
497 offsets: &[usize],
497 offsets: &[usize],
498 ) -> IndexEntry {
498 ) -> IndexEntry {
499 let start = offsets[rev.0 as usize];
499 let start = offsets[rev.0 as usize];
500 let end = start + INDEX_ENTRY_SIZE;
500 let end = start + INDEX_ENTRY_SIZE;
501 let bytes = &self.bytes[start..end];
501 let bytes = &self.bytes[start..end];
502
502
503 // See IndexEntry for an explanation of this override.
503 // See IndexEntry for an explanation of this override.
504 let offset_override = Some(end);
504 let offset_override = Some(end);
505
505
506 IndexEntry {
506 IndexEntry {
507 bytes,
507 bytes,
508 offset_override,
508 offset_override,
509 }
509 }
510 }
510 }
511
511
512 fn get_entry_separated(&self, rev: Revision) -> IndexEntry {
512 fn get_entry_separated(&self, rev: Revision) -> IndexEntry {
513 let start = rev.0 as usize * INDEX_ENTRY_SIZE;
513 let start = rev.0 as usize * INDEX_ENTRY_SIZE;
514 let end = start + INDEX_ENTRY_SIZE;
514 let end = start + INDEX_ENTRY_SIZE;
515 let bytes = &self.bytes[start..end];
515 let bytes = &self.bytes[start..end];
516
516
517 // Override the offset of the first revision as its bytes are used
517 // Override the offset of the first revision as its bytes are used
518 // for the index's metadata (saving space because it is always 0)
518 // for the index's metadata (saving space because it is always 0)
519 let offset_override = if rev == Revision(0) { Some(0) } else { None };
519 let offset_override = if rev == Revision(0) { Some(0) } else { None };
520
520
521 IndexEntry {
521 IndexEntry {
522 bytes,
522 bytes,
523 offset_override,
523 offset_override,
524 }
524 }
525 }
525 }
526
526
527 fn null_entry(&self) -> IndexEntry {
527 fn null_entry(&self) -> IndexEntry {
528 IndexEntry {
528 IndexEntry {
529 bytes: &[0; INDEX_ENTRY_SIZE],
529 bytes: &[0; INDEX_ENTRY_SIZE],
530 offset_override: Some(0),
530 offset_override: Some(0),
531 }
531 }
532 }
532 }
533
533
534 /// Return the head revisions of this index
534 /// Return the head revisions of this index
535 pub fn head_revs(&mut self) -> Result<Vec<Revision>, GraphError> {
535 pub fn head_revs(&mut self) -> Result<Vec<Revision>, GraphError> {
536 self.head_revs_filtered(&HashSet::new())
536 self.head_revs_filtered(&HashSet::new())
537 }
537 }
538
538
539 /// Return the head revisions of this index
539 /// Return the head revisions of this index
540 pub fn head_revs_filtered(
540 pub fn head_revs_filtered(
541 &mut self,
541 &mut self,
542 filtered_revs: &HashSet<Revision>,
542 filtered_revs: &HashSet<Revision>,
543 ) -> Result<Vec<Revision>, GraphError> {
543 ) -> Result<Vec<Revision>, GraphError> {
544 if !self.head_revs.is_empty() && filtered_revs == &self.filtered_revs {
544 if !self.head_revs.is_empty() && filtered_revs == &self.filtered_revs {
545 return Ok(self.head_revs.to_owned());
545 return Ok(self.head_revs.to_owned());
546 }
546 }
547 let mut revs: HashSet<Revision, RandomState> =
547 let mut revs: HashSet<Revision, RandomState> =
548 if filtered_revs.is_empty() {
548 if filtered_revs.is_empty() {
549 (0..self.len())
549 (0..self.len())
550 .into_iter()
550 .into_iter()
551 .map(|i| Revision(i as BaseRevision))
551 .map(|i| Revision(i as BaseRevision))
552 .collect()
552 .collect()
553 } else {
553 } else {
554 (0..self.len())
554 (0..self.len())
555 .into_iter()
555 .into_iter()
556 .filter_map(|i| {
556 .filter_map(|i| {
557 let r = Revision(i as BaseRevision);
557 let r = Revision(i as BaseRevision);
558 if filtered_revs.contains(&r) {
558 if filtered_revs.contains(&r) {
559 None
559 None
560 } else {
560 } else {
561 Some(r)
561 Some(r)
562 }
562 }
563 })
563 })
564 .collect()
564 .collect()
565 };
565 };
566 dagops::retain_heads(self, &mut revs)?;
566 dagops::retain_heads(self, &mut revs)?;
567 if self.is_empty() {
567 if self.is_empty() {
568 revs.insert(NULL_REVISION);
568 revs.insert(NULL_REVISION);
569 }
569 }
570 let mut as_vec: Vec<Revision> =
570 let mut as_vec: Vec<Revision> =
571 revs.into_iter().map(Into::into).collect();
571 revs.into_iter().map(Into::into).collect();
572 as_vec.sort_unstable();
572 as_vec.sort_unstable();
573 self.head_revs = as_vec.to_owned();
573 self.head_revs = as_vec.to_owned();
574 self.filtered_revs = filtered_revs.to_owned();
574 self.filtered_revs = filtered_revs.to_owned();
575 Ok(as_vec)
575 Ok(as_vec)
576 }
576 }
577
577
578 /// Obtain the delta chain for a revision.
578 /// Obtain the delta chain for a revision.
579 ///
579 ///
580 /// `stop_rev` specifies a revision to stop at. If not specified, we
580 /// `stop_rev` specifies a revision to stop at. If not specified, we
581 /// stop at the base of the chain.
581 /// stop at the base of the chain.
582 ///
582 ///
583 /// Returns a 2-tuple of (chain, stopped) where `chain` is a vec of
583 /// Returns a 2-tuple of (chain, stopped) where `chain` is a vec of
584 /// revs in ascending order and `stopped` is a bool indicating whether
584 /// revs in ascending order and `stopped` is a bool indicating whether
585 /// `stoprev` was hit.
585 /// `stoprev` was hit.
586 pub fn delta_chain(
586 pub fn delta_chain(
587 &self,
587 &self,
588 rev: Revision,
588 rev: Revision,
589 stop_rev: Option<Revision>,
589 stop_rev: Option<Revision>,
590 ) -> Result<(Vec<Revision>, bool), HgError> {
590 ) -> Result<(Vec<Revision>, bool), HgError> {
591 let mut current_rev = rev;
591 let mut current_rev = rev;
592 let mut entry = self.get_entry(rev).unwrap();
592 let mut entry = self.get_entry(rev).unwrap();
593 let mut chain = vec![];
593 let mut chain = vec![];
594 while current_rev.0 != entry.base_revision_or_base_of_delta_chain().0
594 while current_rev.0 != entry.base_revision_or_base_of_delta_chain().0
595 && stop_rev.map(|r| r != current_rev).unwrap_or(true)
595 && stop_rev.map(|r| r != current_rev).unwrap_or(true)
596 {
596 {
597 chain.push(current_rev);
597 chain.push(current_rev);
598 let new_rev = if self.uses_generaldelta() {
598 let new_rev = if self.uses_generaldelta() {
599 entry.base_revision_or_base_of_delta_chain()
599 entry.base_revision_or_base_of_delta_chain()
600 } else {
600 } else {
601 UncheckedRevision(current_rev.0 - 1)
601 UncheckedRevision(current_rev.0 - 1)
602 };
602 };
603 current_rev = self.check_revision(new_rev).ok_or_else(|| {
603 current_rev = self.check_revision(new_rev).ok_or_else(|| {
604 HgError::corrupted(format!("Revision {new_rev} out of range"))
604 HgError::corrupted(format!("Revision {new_rev} out of range"))
605 })?;
605 })?;
606 if current_rev.0 == NULL_REVISION.0 {
606 if current_rev.0 == NULL_REVISION.0 {
607 break;
607 break;
608 }
608 }
609 entry = self.get_entry(current_rev).unwrap()
609 entry = self.get_entry(current_rev).unwrap()
610 }
610 }
611
611
612 let stopped = if stop_rev.map(|r| current_rev == r).unwrap_or(false) {
612 let stopped = if stop_rev.map(|r| current_rev == r).unwrap_or(false) {
613 true
613 true
614 } else {
614 } else {
615 chain.push(current_rev);
615 chain.push(current_rev);
616 false
616 false
617 };
617 };
618 chain.reverse();
618 chain.reverse();
619 Ok((chain, stopped))
619 Ok((chain, stopped))
620 }
620 }
621
621
622 pub fn find_snapshots(
622 pub fn find_snapshots(
623 &self,
623 &self,
624 start_rev: UncheckedRevision,
624 start_rev: UncheckedRevision,
625 end_rev: UncheckedRevision,
625 end_rev: UncheckedRevision,
626 cache: &mut impl SnapshotsCache,
626 cache: &mut impl SnapshotsCache,
627 ) -> Result<(), RevlogError> {
627 ) -> Result<(), RevlogError> {
628 let mut start_rev = start_rev.0;
628 let mut start_rev = start_rev.0;
629 let mut end_rev = end_rev.0;
629 let mut end_rev = end_rev.0;
630 end_rev += 1;
630 end_rev += 1;
631 let len = self.len().try_into().unwrap();
631 let len = self.len().try_into().unwrap();
632 if end_rev > len {
632 if end_rev > len {
633 end_rev = len;
633 end_rev = len;
634 }
634 }
635 if start_rev < 0 {
635 if start_rev < 0 {
636 start_rev = 0;
636 start_rev = 0;
637 }
637 }
638 for rev in start_rev..end_rev {
638 for rev in start_rev..end_rev {
639 if !self.is_snapshot_unchecked(Revision(rev))? {
639 if !self.is_snapshot_unchecked(Revision(rev))? {
640 continue;
640 continue;
641 }
641 }
642 let mut base = self
642 let mut base = self
643 .get_entry(Revision(rev))
643 .get_entry(Revision(rev))
644 .unwrap()
644 .unwrap()
645 .base_revision_or_base_of_delta_chain();
645 .base_revision_or_base_of_delta_chain();
646 if base.0 == rev {
646 if base.0 == rev {
647 base = NULL_REVISION.into();
647 base = NULL_REVISION.into();
648 }
648 }
649 cache.insert_for(base.0, rev)?;
649 cache.insert_for(base.0, rev)?;
650 }
650 }
651 Ok(())
651 Ok(())
652 }
652 }
653
653
654 /// TODO move this to the trait probably, along with other things
654 /// TODO move this to the trait probably, along with other things
655 pub fn append(
655 pub fn append(
656 &mut self,
656 &mut self,
657 revision_data: RevisionDataParams,
657 revision_data: RevisionDataParams,
658 ) -> Result<(), RevlogError> {
658 ) -> Result<(), RevlogError> {
659 revision_data.validate()?;
659 revision_data.validate()?;
660 let new_offset = self.bytes.len();
660 let new_offset = self.bytes.len();
661 if let Some(offsets) = &mut *self.get_offsets_mut() {
661 if let Some(offsets) = &mut *self.get_offsets_mut() {
662 offsets.push(new_offset)
662 offsets.push(new_offset)
663 }
663 }
664 self.bytes.added.extend(revision_data.into_v1().as_bytes());
664 self.bytes.added.extend(revision_data.into_v1().as_bytes());
665 self.head_revs.clear();
665 self.head_revs.clear();
666 Ok(())
666 Ok(())
667 }
667 }
668
668
669 pub fn pack_header(&self, header: i32) -> [u8; 4] {
669 pub fn pack_header(&self, header: i32) -> [u8; 4] {
670 header.to_be_bytes()
670 header.to_be_bytes()
671 }
671 }
672
672
673 pub fn remove(&mut self, rev: Revision) -> Result<(), RevlogError> {
673 pub fn remove(&mut self, rev: Revision) -> Result<(), RevlogError> {
674 let offsets = self.get_offsets().clone();
674 let offsets = self.get_offsets().clone();
675 self.bytes.remove(rev, offsets.as_deref())?;
675 self.bytes.remove(rev, offsets.as_deref())?;
676 if let Some(offsets) = &mut *self.get_offsets_mut() {
676 if let Some(offsets) = &mut *self.get_offsets_mut() {
677 offsets.truncate(rev.0 as usize)
677 offsets.truncate(rev.0 as usize)
678 }
678 }
679 self.head_revs.clear();
679 self.head_revs.clear();
680 Ok(())
680 Ok(())
681 }
681 }
682
682
683 pub fn clear_caches(&mut self) {
683 pub fn clear_caches(&mut self) {
684 // We need to get the 'inline' value from Python at init and use this
684 // We need to get the 'inline' value from Python at init and use this
685 // instead of offsets to determine whether we're inline since we might
685 // instead of offsets to determine whether we're inline since we might
686 // clear caches. This implies re-populating the offsets on-demand.
686 // clear caches. This implies re-populating the offsets on-demand.
687 self.offsets = RwLock::new(None);
687 self.offsets = RwLock::new(None);
688 self.head_revs.clear();
688 self.head_revs.clear();
689 }
689 }
690
690
691 /// Unchecked version of `is_snapshot`.
691 /// Unchecked version of `is_snapshot`.
692 /// Assumes the caller checked that `rev` is within a valid revision range.
692 /// Assumes the caller checked that `rev` is within a valid revision range.
693 pub fn is_snapshot_unchecked(
693 pub fn is_snapshot_unchecked(
694 &self,
694 &self,
695 mut rev: Revision,
695 mut rev: Revision,
696 ) -> Result<bool, RevlogError> {
696 ) -> Result<bool, RevlogError> {
697 while rev.0 >= 0 {
697 while rev.0 >= 0 {
698 let entry = self.get_entry(rev).unwrap();
698 let entry = self.get_entry(rev).unwrap();
699 let mut base = entry.base_revision_or_base_of_delta_chain().0;
699 let mut base = entry.base_revision_or_base_of_delta_chain().0;
700 if base == rev.0 {
700 if base == rev.0 {
701 base = NULL_REVISION.0;
701 base = NULL_REVISION.0;
702 }
702 }
703 if base == NULL_REVISION.0 {
703 if base == NULL_REVISION.0 {
704 return Ok(true);
704 return Ok(true);
705 }
705 }
706 let [mut p1, mut p2] = self
706 let [mut p1, mut p2] = self
707 .parents(rev)
707 .parents(rev)
708 .map_err(|_| RevlogError::InvalidRevision)?;
708 .map_err(|_| RevlogError::InvalidRevision)?;
709 while let Some(p1_entry) = self.get_entry(p1) {
709 while let Some(p1_entry) = self.get_entry(p1) {
710 if p1_entry.compressed_len() != 0 || p1.0 == 0 {
710 if p1_entry.compressed_len() != 0 || p1.0 == 0 {
711 break;
711 break;
712 }
712 }
713 let parent_base =
713 let parent_base =
714 p1_entry.base_revision_or_base_of_delta_chain();
714 p1_entry.base_revision_or_base_of_delta_chain();
715 if parent_base.0 == p1.0 {
715 if parent_base.0 == p1.0 {
716 break;
716 break;
717 }
717 }
718 p1 = self
718 p1 = self
719 .check_revision(parent_base)
719 .check_revision(parent_base)
720 .ok_or(RevlogError::InvalidRevision)?;
720 .ok_or(RevlogError::InvalidRevision)?;
721 }
721 }
722 while let Some(p2_entry) = self.get_entry(p2) {
722 while let Some(p2_entry) = self.get_entry(p2) {
723 if p2_entry.compressed_len() != 0 || p2.0 == 0 {
723 if p2_entry.compressed_len() != 0 || p2.0 == 0 {
724 break;
724 break;
725 }
725 }
726 let parent_base =
726 let parent_base =
727 p2_entry.base_revision_or_base_of_delta_chain();
727 p2_entry.base_revision_or_base_of_delta_chain();
728 if parent_base.0 == p2.0 {
728 if parent_base.0 == p2.0 {
729 break;
729 break;
730 }
730 }
731 p2 = self
731 p2 = self
732 .check_revision(parent_base)
732 .check_revision(parent_base)
733 .ok_or(RevlogError::InvalidRevision)?;
733 .ok_or(RevlogError::InvalidRevision)?;
734 }
734 }
735 if base == p1.0 || base == p2.0 {
735 if base == p1.0 || base == p2.0 {
736 return Ok(false);
736 return Ok(false);
737 }
737 }
738 rev = self
738 rev = self
739 .check_revision(base.into())
739 .check_revision(base.into())
740 .ok_or(RevlogError::InvalidRevision)?;
740 .ok_or(RevlogError::InvalidRevision)?;
741 }
741 }
742 Ok(rev == NULL_REVISION)
742 Ok(rev == NULL_REVISION)
743 }
743 }
744
744
745 /// Return whether the given revision is a snapshot. Returns an error if
745 /// Return whether the given revision is a snapshot. Returns an error if
746 /// `rev` is not within a valid revision range.
746 /// `rev` is not within a valid revision range.
747 pub fn is_snapshot(
747 pub fn is_snapshot(
748 &self,
748 &self,
749 rev: UncheckedRevision,
749 rev: UncheckedRevision,
750 ) -> Result<bool, RevlogError> {
750 ) -> Result<bool, RevlogError> {
751 let rev = self
751 let rev = self
752 .check_revision(rev)
752 .check_revision(rev)
753 .ok_or_else(|| RevlogError::corrupted("test"))?;
753 .ok_or_else(|| RevlogError::corrupted("test"))?;
754 self.is_snapshot_unchecked(rev)
754 self.is_snapshot_unchecked(rev)
755 }
755 }
756
756
757 /// Slice revs to reduce the amount of unrelated data to be read from disk.
757 /// Slice revs to reduce the amount of unrelated data to be read from disk.
758 ///
758 ///
759 /// The index is sliced into groups that should be read in one time.
759 /// The index is sliced into groups that should be read in one time.
760 ///
760 ///
761 /// The initial chunk is sliced until the overall density
761 /// The initial chunk is sliced until the overall density
762 /// (payload/chunks-span ratio) is above `target_density`.
762 /// (payload/chunks-span ratio) is above `target_density`.
763 /// No gap smaller than `min_gap_size` is skipped.
763 /// No gap smaller than `min_gap_size` is skipped.
764 pub fn slice_chunk_to_density(
764 pub fn slice_chunk_to_density(
765 &self,
765 &self,
766 revs: &[Revision],
766 revs: &[Revision],
767 target_density: f64,
767 target_density: f64,
768 min_gap_size: usize,
768 min_gap_size: usize,
769 ) -> Vec<Vec<Revision>> {
769 ) -> Vec<Vec<Revision>> {
770 if revs.is_empty() {
770 if revs.is_empty() {
771 return vec![];
771 return vec![];
772 }
772 }
773 if revs.len() == 1 {
773 if revs.len() == 1 {
774 return vec![revs.to_owned()];
774 return vec![revs.to_owned()];
775 }
775 }
776 let delta_chain_span = self.segment_span(revs);
776 let delta_chain_span = self.segment_span(revs);
777 if delta_chain_span < min_gap_size {
777 if delta_chain_span < min_gap_size {
778 return vec![revs.to_owned()];
778 return vec![revs.to_owned()];
779 }
779 }
780 let entries: Vec<_> = revs
780 let entries: Vec<_> = revs
781 .iter()
781 .iter()
782 .map(|r| {
782 .map(|r| {
783 (*r, self.get_entry(*r).unwrap_or_else(|| self.null_entry()))
783 (*r, self.get_entry(*r).unwrap_or_else(|| self.null_entry()))
784 })
784 })
785 .collect();
785 .collect();
786
786
787 let mut read_data = delta_chain_span;
787 let mut read_data = delta_chain_span;
788 let chain_payload: u32 =
788 let chain_payload: u32 =
789 entries.iter().map(|(_r, e)| e.compressed_len()).sum();
789 entries.iter().map(|(_r, e)| e.compressed_len()).sum();
790 let mut density = if delta_chain_span > 0 {
790 let mut density = if delta_chain_span > 0 {
791 chain_payload as f64 / delta_chain_span as f64
791 chain_payload as f64 / delta_chain_span as f64
792 } else {
792 } else {
793 1.0
793 1.0
794 };
794 };
795
795
796 if density >= target_density {
796 if density >= target_density {
797 return vec![revs.to_owned()];
797 return vec![revs.to_owned()];
798 }
798 }
799
799
800 // Store the gaps in a heap to have them sorted by decreasing size
800 // Store the gaps in a heap to have them sorted by decreasing size
801 let mut gaps = Vec::new();
801 let mut gaps = Vec::new();
802 let mut previous_end = None;
802 let mut previous_end = None;
803
803
804 for (i, (_rev, entry)) in entries.iter().enumerate() {
804 for (i, (_rev, entry)) in entries.iter().enumerate() {
805 let start = entry.c_start() as usize;
805 let start = entry.c_start() as usize;
806 let length = entry.compressed_len();
806 let length = entry.compressed_len();
807
807
808 // Skip empty revisions to form larger holes
808 // Skip empty revisions to form larger holes
809 if length == 0 {
809 if length == 0 {
810 continue;
810 continue;
811 }
811 }
812
812
813 if let Some(end) = previous_end {
813 if let Some(end) = previous_end {
814 let gap_size = start - end;
814 let gap_size = start - end;
815 // Only consider holes that are large enough
815 // Only consider holes that are large enough
816 if gap_size > min_gap_size {
816 if gap_size > min_gap_size {
817 gaps.push((gap_size, i));
817 gaps.push((gap_size, i));
818 }
818 }
819 }
819 }
820 previous_end = Some(start + length as usize);
820 previous_end = Some(start + length as usize);
821 }
821 }
822 if gaps.is_empty() {
822 if gaps.is_empty() {
823 return vec![revs.to_owned()];
823 return vec![revs.to_owned()];
824 }
824 }
825 // sort the gaps to pop them from largest to small
825 // sort the gaps to pop them from largest to small
826 gaps.sort_unstable();
826 gaps.sort_unstable();
827
827
828 // Collect the indices of the largest holes until
828 // Collect the indices of the largest holes until
829 // the density is acceptable
829 // the density is acceptable
830 let mut selected = vec![];
830 let mut selected = vec![];
831 while let Some((gap_size, gap_id)) = gaps.pop() {
831 while let Some((gap_size, gap_id)) = gaps.pop() {
832 if density >= target_density {
832 if density >= target_density {
833 break;
833 break;
834 }
834 }
835 selected.push(gap_id);
835 selected.push(gap_id);
836
836
837 // The gap sizes are stored as negatives to be sorted decreasingly
837 // The gap sizes are stored as negatives to be sorted decreasingly
838 // by the heap
838 // by the heap
839 read_data -= gap_size;
839 read_data -= gap_size;
840 density = if read_data > 0 {
840 density = if read_data > 0 {
841 chain_payload as f64 / read_data as f64
841 chain_payload as f64 / read_data as f64
842 } else {
842 } else {
843 1.0
843 1.0
844 };
844 };
845 if density >= target_density {
845 if density >= target_density {
846 break;
846 break;
847 }
847 }
848 }
848 }
849 selected.sort_unstable();
849 selected.sort_unstable();
850 selected.push(revs.len());
850 selected.push(revs.len());
851
851
852 // Cut the revs at collected indices
852 // Cut the revs at collected indices
853 let mut previous_idx = 0;
853 let mut previous_idx = 0;
854 let mut chunks = vec![];
854 let mut chunks = vec![];
855 for idx in selected {
855 for idx in selected {
856 let chunk = self.trim_chunk(&entries, previous_idx, idx);
856 let chunk = self.trim_chunk(&entries, previous_idx, idx);
857 if !chunk.is_empty() {
857 if !chunk.is_empty() {
858 chunks.push(chunk.iter().map(|(rev, _entry)| *rev).collect());
858 chunks.push(chunk.iter().map(|(rev, _entry)| *rev).collect());
859 }
859 }
860 previous_idx = idx;
860 previous_idx = idx;
861 }
861 }
862 let chunk = self.trim_chunk(&entries, previous_idx, entries.len());
862 let chunk = self.trim_chunk(&entries, previous_idx, entries.len());
863 if !chunk.is_empty() {
863 if !chunk.is_empty() {
864 chunks.push(chunk.iter().map(|(rev, _entry)| *rev).collect());
864 chunks.push(chunk.iter().map(|(rev, _entry)| *rev).collect());
865 }
865 }
866
866
867 chunks
867 chunks
868 }
868 }
869
869
870 /// Get the byte span of a segment of sorted revisions.
870 /// Get the byte span of a segment of sorted revisions.
871 ///
871 ///
872 /// Occurrences of [`NULL_REVISION`] are ignored at the beginning of
872 /// Occurrences of [`NULL_REVISION`] are ignored at the beginning of
873 /// the `revs` segment.
873 /// the `revs` segment.
874 ///
874 ///
875 /// panics:
875 /// panics:
876 /// - if `revs` is empty or only made of `NULL_REVISION`
876 /// - if `revs` is empty or only made of `NULL_REVISION`
877 /// - if cannot retrieve entry for the last or first not null element of
877 /// - if cannot retrieve entry for the last or first not null element of
878 /// `revs`.
878 /// `revs`.
879 fn segment_span(&self, revs: &[Revision]) -> usize {
879 fn segment_span(&self, revs: &[Revision]) -> usize {
880 if revs.is_empty() {
880 if revs.is_empty() {
881 return 0;
881 return 0;
882 }
882 }
883 let last_entry = &self.get_entry(revs[revs.len() - 1]).unwrap();
883 let last_entry = &self.get_entry(revs[revs.len() - 1]).unwrap();
884 let end = last_entry.c_start() + last_entry.compressed_len() as u64;
884 let end = last_entry.c_start() + last_entry.compressed_len() as u64;
885 let first_rev = revs.iter().find(|r| r.0 != NULL_REVISION.0).unwrap();
885 let first_rev = revs.iter().find(|r| r.0 != NULL_REVISION.0).unwrap();
886 let start = if (*first_rev).0 == 0 {
886 let start = if (*first_rev).0 == 0 {
887 0
887 0
888 } else {
888 } else {
889 self.get_entry(*first_rev).unwrap().c_start()
889 self.get_entry(*first_rev).unwrap().c_start()
890 };
890 };
891 (end - start) as usize
891 (end - start) as usize
892 }
892 }
893
893
894 /// Returns `&revs[startidx..endidx]` without empty trailing revs
894 /// Returns `&revs[startidx..endidx]` without empty trailing revs
895 fn trim_chunk<'a>(
895 fn trim_chunk<'a>(
896 &'a self,
896 &'a self,
897 revs: &'a [(Revision, IndexEntry)],
897 revs: &'a [(Revision, IndexEntry)],
898 start: usize,
898 start: usize,
899 mut end: usize,
899 mut end: usize,
900 ) -> &'a [(Revision, IndexEntry)] {
900 ) -> &'a [(Revision, IndexEntry)] {
901 // Trim empty revs at the end, except the very first rev of a chain
901 // Trim empty revs at the end, except the very first rev of a chain
902 let last_rev = revs[end - 1].0;
902 let last_rev = revs[end - 1].0;
903 if last_rev.0 < self.len() as BaseRevision {
903 if last_rev.0 < self.len() as BaseRevision {
904 while end > 1
904 while end > 1
905 && end > start
905 && end > start
906 && revs[end - 1].1.compressed_len() == 0
906 && revs[end - 1].1.compressed_len() == 0
907 {
907 {
908 end -= 1
908 end -= 1
909 }
909 }
910 }
910 }
911 &revs[start..end]
911 &revs[start..end]
912 }
912 }
913
913
914 /// Computes the set of revisions for each non-public phase from `roots`,
914 /// Computes the set of revisions for each non-public phase from `roots`,
915 /// which are the last known roots for each non-public phase.
915 /// which are the last known roots for each non-public phase.
916 pub fn compute_phases_map_sets(
916 pub fn compute_phases_map_sets(
917 &self,
917 &self,
918 roots: HashMap<Phase, Vec<Revision>>,
918 roots: HashMap<Phase, Vec<Revision>>,
919 ) -> Result<(usize, RootsPerPhase), GraphError> {
919 ) -> Result<(usize, RootsPerPhase), GraphError> {
920 let mut phases = HashMap::new();
920 let mut phases = HashMap::new();
921 let mut min_phase_rev = NULL_REVISION;
921 let mut min_phase_rev = NULL_REVISION;
922
922
923 for phase in Phase::non_public_phases() {
923 for phase in Phase::non_public_phases() {
924 if let Some(phase_roots) = roots.get(phase) {
924 if let Some(phase_roots) = roots.get(phase) {
925 let min_rev =
925 let min_rev =
926 self.add_roots_get_min(phase_roots, &mut phases, *phase);
926 self.add_roots_get_min(phase_roots, &mut phases, *phase);
927 if min_rev != NULL_REVISION
927 if min_rev != NULL_REVISION
928 && (min_phase_rev == NULL_REVISION
928 && (min_phase_rev == NULL_REVISION
929 || min_rev < min_phase_rev)
929 || min_rev < min_phase_rev)
930 {
930 {
931 min_phase_rev = min_rev;
931 min_phase_rev = min_rev;
932 }
932 }
933 } else {
933 } else {
934 continue;
934 continue;
935 };
935 };
936 }
936 }
937 let mut phase_sets: RootsPerPhase = Default::default();
937 let mut phase_sets: RootsPerPhase = Default::default();
938
938
939 if min_phase_rev == NULL_REVISION {
939 if min_phase_rev == NULL_REVISION {
940 min_phase_rev = Revision(self.len() as BaseRevision);
940 min_phase_rev = Revision(self.len() as BaseRevision);
941 }
941 }
942
942
943 for rev in min_phase_rev.0..self.len() as BaseRevision {
943 for rev in min_phase_rev.0..self.len() as BaseRevision {
944 let rev = Revision(rev);
944 let rev = Revision(rev);
945 let [p1, p2] = self.parents(rev)?;
945 let [p1, p2] = self.parents(rev)?;
946
946
947 const DEFAULT_PHASE: &Phase = &Phase::Public;
947 const DEFAULT_PHASE: &Phase = &Phase::Public;
948 if p1.0 >= 0
948 if p1.0 >= 0
949 && phases.get(&p1).unwrap_or(DEFAULT_PHASE)
949 && phases.get(&p1).unwrap_or(DEFAULT_PHASE)
950 > phases.get(&rev).unwrap_or(DEFAULT_PHASE)
950 > phases.get(&rev).unwrap_or(DEFAULT_PHASE)
951 {
951 {
952 phases.insert(rev, phases[&p1]);
952 phases.insert(rev, phases[&p1]);
953 }
953 }
954 if p2.0 >= 0
954 if p2.0 >= 0
955 && phases.get(&p2).unwrap_or(DEFAULT_PHASE)
955 && phases.get(&p2).unwrap_or(DEFAULT_PHASE)
956 > phases.get(&rev).unwrap_or(DEFAULT_PHASE)
956 > phases.get(&rev).unwrap_or(DEFAULT_PHASE)
957 {
957 {
958 phases.insert(rev, phases[&p2]);
958 phases.insert(rev, phases[&p2]);
959 }
959 }
960 let set = match phases.get(&rev).unwrap_or(DEFAULT_PHASE) {
960 let set = match phases.get(&rev).unwrap_or(DEFAULT_PHASE) {
961 Phase::Public => continue,
961 Phase::Public => continue,
962 phase => &mut phase_sets[*phase as usize - 1],
962 phase => &mut phase_sets[*phase as usize - 1],
963 };
963 };
964 set.insert(rev);
964 set.insert(rev);
965 }
965 }
966
966
967 Ok((self.len(), phase_sets))
967 Ok((self.len(), phase_sets))
968 }
968 }
969
969
970 fn add_roots_get_min(
970 fn add_roots_get_min(
971 &self,
971 &self,
972 phase_roots: &[Revision],
972 phase_roots: &[Revision],
973 phases: &mut HashMap<Revision, Phase>,
973 phases: &mut HashMap<Revision, Phase>,
974 phase: Phase,
974 phase: Phase,
975 ) -> Revision {
975 ) -> Revision {
976 let mut min_rev = NULL_REVISION;
976 let mut min_rev = NULL_REVISION;
977
977
978 for root in phase_roots {
978 for root in phase_roots {
979 phases.insert(*root, phase);
979 phases.insert(*root, phase);
980 if min_rev == NULL_REVISION || min_rev > *root {
980 if min_rev == NULL_REVISION || min_rev > *root {
981 min_rev = *root;
981 min_rev = *root;
982 }
982 }
983 }
983 }
984 min_rev
984 min_rev
985 }
985 }
986
986
987 /// Return `(heads(::(<roots> and <roots>::<heads>)))`
987 /// Return `(heads(::(<roots> and <roots>::<heads>)))`
988 /// If `include_path` is `true`, return `(<roots>::<heads>)`."""
988 /// If `include_path` is `true`, return `(<roots>::<heads>)`."""
989 ///
989 ///
990 /// `min_root` and `roots` are unchecked since they are just used as
990 /// `min_root` and `roots` are unchecked since they are just used as
991 /// a bound or for comparison and don't need to represent a valid revision.
991 /// a bound or for comparison and don't need to represent a valid revision.
992 /// In practice, the only invalid revision passed is the working directory
992 /// In practice, the only invalid revision passed is the working directory
993 /// revision ([`i32::MAX`]).
993 /// revision ([`i32::MAX`]).
994 pub fn reachable_roots(
994 pub fn reachable_roots(
995 &self,
995 &self,
996 min_root: UncheckedRevision,
996 min_root: UncheckedRevision,
997 mut heads: Vec<Revision>,
997 mut heads: Vec<Revision>,
998 roots: HashSet<UncheckedRevision>,
998 roots: HashSet<UncheckedRevision>,
999 include_path: bool,
999 include_path: bool,
1000 ) -> Result<HashSet<Revision>, GraphError> {
1000 ) -> Result<HashSet<Revision>, GraphError> {
1001 if roots.is_empty() {
1001 if roots.is_empty() {
1002 return Ok(HashSet::new());
1002 return Ok(HashSet::new());
1003 }
1003 }
1004 let mut reachable = HashSet::new();
1004 let mut reachable = HashSet::new();
1005 let mut seen = HashMap::new();
1005 let mut seen = HashMap::new();
1006
1006
1007 while let Some(rev) = heads.pop() {
1007 while let Some(rev) = heads.pop() {
1008 if roots.contains(&rev.into()) {
1008 if roots.contains(&rev.into()) {
1009 reachable.insert(rev);
1009 reachable.insert(rev);
1010 if !include_path {
1010 if !include_path {
1011 continue;
1011 continue;
1012 }
1012 }
1013 }
1013 }
1014 let parents = self.parents(rev)?;
1014 let parents = self.parents(rev)?;
1015 seen.insert(rev, parents);
1015 seen.insert(rev, parents);
1016 for parent in parents {
1016 for parent in parents {
1017 if parent.0 >= min_root.0 && !seen.contains_key(&parent) {
1017 if parent.0 >= min_root.0 && !seen.contains_key(&parent) {
1018 heads.push(parent);
1018 heads.push(parent);
1019 }
1019 }
1020 }
1020 }
1021 }
1021 }
1022 if !include_path {
1022 if !include_path {
1023 return Ok(reachable);
1023 return Ok(reachable);
1024 }
1024 }
1025 let mut revs: Vec<_> = seen.keys().collect();
1025 let mut revs: Vec<_> = seen.keys().collect();
1026 revs.sort_unstable();
1026 revs.sort_unstable();
1027 for rev in revs {
1027 for rev in revs {
1028 for parent in seen[rev] {
1028 for parent in seen[rev] {
1029 if reachable.contains(&parent) {
1029 if reachable.contains(&parent) {
1030 reachable.insert(*rev);
1030 reachable.insert(*rev);
1031 }
1031 }
1032 }
1032 }
1033 }
1033 }
1034 Ok(reachable)
1034 Ok(reachable)
1035 }
1035 }
1036
1036
1037 /// Given a (possibly overlapping) set of revs, return all the
1037 /// Given a (possibly overlapping) set of revs, return all the
1038 /// common ancestors heads: `heads(::args[0] and ::a[1] and ...)`
1038 /// common ancestors heads: `heads(::args[0] and ::a[1] and ...)`
1039 pub fn common_ancestor_heads(
1039 pub fn common_ancestor_heads(
1040 &self,
1040 &self,
1041 revisions: &[Revision],
1041 revisions: &[Revision],
1042 ) -> Result<Vec<Revision>, GraphError> {
1042 ) -> Result<Vec<Revision>, GraphError> {
1043 // given that revisions is expected to be small, we find this shortcut
1043 // given that revisions is expected to be small, we find this shortcut
1044 // potentially acceptable, especially given that `hg-cpython` could
1044 // potentially acceptable, especially given that `hg-cpython` could
1045 // very much bypass this, constructing a vector of unique values from
1045 // very much bypass this, constructing a vector of unique values from
1046 // the onset.
1046 // the onset.
1047 let as_set: HashSet<Revision> = revisions.iter().copied().collect();
1047 let as_set: HashSet<Revision> = revisions.iter().copied().collect();
1048 // Besides deduplicating, the C version also implements the shortcut
1048 // Besides deduplicating, the C version also implements the shortcut
1049 // for `NULL_REVISION`:
1049 // for `NULL_REVISION`:
1050 if as_set.contains(&NULL_REVISION) {
1050 if as_set.contains(&NULL_REVISION) {
1051 return Ok(vec![]);
1051 return Ok(vec![]);
1052 }
1052 }
1053
1053
1054 let revisions: Vec<Revision> = as_set.into_iter().collect();
1054 let revisions: Vec<Revision> = as_set.into_iter().collect();
1055
1055
1056 if revisions.len() < 8 {
1056 if revisions.len() < 8 {
1057 self.find_gca_candidates::<u8>(&revisions)
1057 self.find_gca_candidates::<u8>(&revisions)
1058 } else if revisions.len() < 64 {
1058 } else if revisions.len() < 64 {
1059 self.find_gca_candidates::<u64>(&revisions)
1059 self.find_gca_candidates::<u64>(&revisions)
1060 } else {
1060 } else {
1061 self.find_gca_candidates::<NonStaticPoisonableBitSet>(&revisions)
1061 self.find_gca_candidates::<NonStaticPoisonableBitSet>(&revisions)
1062 }
1062 }
1063 }
1063 }
1064
1064
1065 pub fn ancestors(
1065 pub fn ancestors(
1066 &self,
1066 &self,
1067 revisions: &[Revision],
1067 revisions: &[Revision],
1068 ) -> Result<Vec<Revision>, GraphError> {
1068 ) -> Result<Vec<Revision>, GraphError> {
1069 self.find_deepest_revs(&self.common_ancestor_heads(revisions)?)
1069 self.find_deepest_revs(&self.common_ancestor_heads(revisions)?)
1070 }
1070 }
1071
1071
1072 /// Given a disjoint set of revs, return all candidates for the
1072 /// Given a disjoint set of revs, return all candidates for the
1073 /// greatest common ancestor. In revset notation, this is the set
1073 /// greatest common ancestor. In revset notation, this is the set
1074 /// `heads(::a and ::b and ...)`
1074 /// `heads(::a and ::b and ...)`
1075 fn find_gca_candidates<BS: PoisonableBitSet + Clone>(
1075 fn find_gca_candidates<BS: PoisonableBitSet + Clone>(
1076 &self,
1076 &self,
1077 revs: &[Revision],
1077 revs: &[Revision],
1078 ) -> Result<Vec<Revision>, GraphError> {
1078 ) -> Result<Vec<Revision>, GraphError> {
1079 if revs.is_empty() {
1079 if revs.is_empty() {
1080 return Ok(vec![]);
1080 return Ok(vec![]);
1081 }
1081 }
1082 let revcount = revs.len();
1082 let revcount = revs.len();
1083 let mut candidates = vec![];
1083 let mut candidates = vec![];
1084 let max_rev = revs.iter().max().unwrap();
1084 let max_rev = revs.iter().max().unwrap();
1085
1085
1086 let mut seen = BS::vec_of_empty(revs.len(), (max_rev.0 + 1) as usize);
1086 let mut seen = BS::vec_of_empty(revs.len(), (max_rev.0 + 1) as usize);
1087
1087
1088 for (idx, rev) in revs.iter().enumerate() {
1088 for (idx, rev) in revs.iter().enumerate() {
1089 seen[rev.0 as usize].add(idx);
1089 seen[rev.0 as usize].add(idx);
1090 }
1090 }
1091 let mut current_rev = *max_rev;
1091 let mut current_rev = *max_rev;
1092 // Number of revisions whose inspection in the main loop
1092 // Number of revisions whose inspection in the main loop
1093 // will give a result or trigger inspection of other revisions
1093 // will give a result or trigger inspection of other revisions
1094 let mut interesting = revcount;
1094 let mut interesting = revcount;
1095
1095
1096 // The algorithm works on a vector of bit sets, indexed by revision
1096 // The algorithm works on a vector of bit sets, indexed by revision
1097 // numbers and iterated on reverse order.
1097 // numbers and iterated on reverse order.
1098 // An entry in this vector is poisoned if and only if the corresponding
1098 // An entry in this vector is poisoned if and only if the corresponding
1099 // revision is a common, yet not maximal ancestor.
1099 // revision is a common, yet not maximal ancestor.
1100
1100
1101 // The principle of the algorithm is as follows:
1101 // The principle of the algorithm is as follows:
1102 // For a revision `r`, when entering the loop, `seen[r]` is either
1102 // For a revision `r`, when entering the loop, `seen[r]` is either
1103 // poisoned or the sub set of `revs` of which `r` is an ancestor.
1103 // poisoned or the sub set of `revs` of which `r` is an ancestor.
1104 // In this sub set is full, then `r` is a solution and its parents
1104 // In this sub set is full, then `r` is a solution and its parents
1105 // have to be poisoned.
1105 // have to be poisoned.
1106 //
1106 //
1107 // At each iteration, the bit sets of the parents are updated by
1107 // At each iteration, the bit sets of the parents are updated by
1108 // union with `seen[r]`.
1108 // union with `seen[r]`.
1109 // As we walk the index from the end, we are sure we have encountered
1109 // As we walk the index from the end, we are sure we have encountered
1110 // all children of `r` before `r`, hence we know that `seen[r]` is
1110 // all children of `r` before `r`, hence we know that `seen[r]` is
1111 // fully computed.
1111 // fully computed.
1112 //
1112 //
1113 // On top of that there are several optimizations that make reading
1113 // On top of that there are several optimizations that make reading
1114 // less obvious than the comment above:
1114 // less obvious than the comment above:
1115 // - The `interesting` counter allows to break early
1115 // - The `interesting` counter allows to break early
1116 // - The loop starts from `max(revs)`
1116 // - The loop starts from `max(revs)`
1117 // - Early return in case it is detected that one of the incoming revs
1117 // - Early return in case it is detected that one of the incoming revs
1118 // is a common ancestor of all of them.
1118 // is a common ancestor of all of them.
1119 while current_rev.0 >= 0 && interesting > 0 {
1119 while current_rev.0 >= 0 && interesting > 0 {
1120 let current_seen = seen[current_rev.0 as usize].clone();
1120 let current_seen = seen[current_rev.0 as usize].clone();
1121
1121
1122 if current_seen.is_empty() {
1122 if current_seen.is_empty() {
1123 current_rev = Revision(current_rev.0 - 1);
1123 current_rev = Revision(current_rev.0 - 1);
1124 continue;
1124 continue;
1125 }
1125 }
1126 let mut poison = current_seen.is_poisoned();
1126 let mut poison = current_seen.is_poisoned();
1127 if !poison {
1127 if !poison {
1128 interesting -= 1;
1128 interesting -= 1;
1129 if current_seen.is_full_range(revcount) {
1129 if current_seen.is_full_range(revcount) {
1130 candidates.push(current_rev);
1130 candidates.push(current_rev);
1131 poison = true;
1131 poison = true;
1132
1132
1133 // Being a common ancestor, if `current_rev` is among
1133 // Being a common ancestor, if `current_rev` is among
1134 // the input revisions, it is *the* answer.
1134 // the input revisions, it is *the* answer.
1135 for rev in revs {
1135 for rev in revs {
1136 if *rev == current_rev {
1136 if *rev == current_rev {
1137 return Ok(candidates);
1137 return Ok(candidates);
1138 }
1138 }
1139 }
1139 }
1140 }
1140 }
1141 }
1141 }
1142 for parent in self.parents(current_rev)? {
1142 for parent in self.parents(current_rev)? {
1143 if parent == NULL_REVISION {
1143 if parent == NULL_REVISION {
1144 continue;
1144 continue;
1145 }
1145 }
1146 let parent_seen = &mut seen[parent.0 as usize];
1146 let parent_seen = &mut seen[parent.0 as usize];
1147 if poison {
1147 if poison {
1148 // this block is logically equivalent to poisoning parent
1148 // this block is logically equivalent to poisoning parent
1149 // and counting it as non interesting if it
1149 // and counting it as non interesting if it
1150 // has been seen before (hence counted then as interesting)
1150 // has been seen before (hence counted then as interesting)
1151 if !parent_seen.is_empty() && !parent_seen.is_poisoned() {
1151 if !parent_seen.is_empty() && !parent_seen.is_poisoned() {
1152 interesting -= 1;
1152 interesting -= 1;
1153 }
1153 }
1154 parent_seen.poison();
1154 parent_seen.poison();
1155 } else {
1155 } else {
1156 if parent_seen.is_empty() {
1156 if parent_seen.is_empty() {
1157 interesting += 1;
1157 interesting += 1;
1158 }
1158 }
1159 parent_seen.union(&current_seen);
1159 parent_seen.union(&current_seen);
1160 }
1160 }
1161 }
1161 }
1162
1162
1163 current_rev = Revision(current_rev.0 - 1);
1163 current_rev = Revision(current_rev.0 - 1);
1164 }
1164 }
1165
1165
1166 Ok(candidates)
1166 Ok(candidates)
1167 }
1167 }
1168
1168
1169 /// Given a disjoint set of revs, return the subset with the longest path
1169 /// Given a disjoint set of revs, return the subset with the longest path
1170 /// to the root.
1170 /// to the root.
1171 fn find_deepest_revs(
1171 fn find_deepest_revs(
1172 &self,
1172 &self,
1173 revs: &[Revision],
1173 revs: &[Revision],
1174 ) -> Result<Vec<Revision>, GraphError> {
1174 ) -> Result<Vec<Revision>, GraphError> {
1175 // TODO replace this all with just comparing rank?
1175 // TODO replace this all with just comparing rank?
1176 // Also, the original implementations in C/Python are cryptic, not
1176 // Also, the original implementations in C/Python are cryptic, not
1177 // even sure we actually need this?
1177 // even sure we actually need this?
1178 if revs.len() <= 1 {
1178 if revs.len() <= 1 {
1179 return Ok(revs.to_owned());
1179 return Ok(revs.to_owned());
1180 }
1180 }
1181 let max_rev = revs.iter().max().unwrap().0;
1181 let max_rev = revs.iter().max().unwrap().0;
1182 let mut interesting = HashMap::new();
1182 let mut interesting = HashMap::new();
1183 let mut seen = vec![0; max_rev as usize + 1];
1183 let mut seen = vec![0; max_rev as usize + 1];
1184 let mut depth = vec![0; max_rev as usize + 1];
1184 let mut depth = vec![0; max_rev as usize + 1];
1185 let mut mapping = vec![];
1185 let mut mapping = vec![];
1186 let mut revs = revs.to_owned();
1186 let mut revs = revs.to_owned();
1187 revs.sort_unstable();
1187 revs.sort_unstable();
1188
1188
1189 for (idx, rev) in revs.iter().enumerate() {
1189 for (idx, rev) in revs.iter().enumerate() {
1190 depth[rev.0 as usize] = 1;
1190 depth[rev.0 as usize] = 1;
1191 let shift = 1 << idx;
1191 let shift = 1 << idx;
1192 seen[rev.0 as usize] = shift;
1192 seen[rev.0 as usize] = shift;
1193 interesting.insert(shift, 1);
1193 interesting.insert(shift, 1);
1194 mapping.push((shift, *rev));
1194 mapping.push((shift, *rev));
1195 }
1195 }
1196
1196
1197 let mut current_rev = Revision(max_rev);
1197 let mut current_rev = Revision(max_rev);
1198 while current_rev.0 >= 0 && interesting.len() > 1 {
1198 while current_rev.0 >= 0 && interesting.len() > 1 {
1199 let current_depth = depth[current_rev.0 as usize];
1199 let current_depth = depth[current_rev.0 as usize];
1200 if current_depth == 0 {
1200 if current_depth == 0 {
1201 current_rev = Revision(current_rev.0 - 1);
1201 current_rev = Revision(current_rev.0 - 1);
1202 continue;
1202 continue;
1203 }
1203 }
1204
1204
1205 let current_seen = seen[current_rev.0 as usize];
1205 let current_seen = seen[current_rev.0 as usize];
1206 for parent in self.parents(current_rev)? {
1206 for parent in self.parents(current_rev)? {
1207 if parent == NULL_REVISION {
1207 if parent == NULL_REVISION {
1208 continue;
1208 continue;
1209 }
1209 }
1210 let parent_seen = seen[parent.0 as usize];
1210 let parent_seen = seen[parent.0 as usize];
1211 let parent_depth = depth[parent.0 as usize];
1211 let parent_depth = depth[parent.0 as usize];
1212 if parent_depth <= current_depth {
1212 if parent_depth <= current_depth {
1213 depth[parent.0 as usize] = current_depth + 1;
1213 depth[parent.0 as usize] = current_depth + 1;
1214 if parent_seen != current_seen {
1214 if parent_seen != current_seen {
1215 *interesting.get_mut(&current_seen).unwrap() += 1;
1215 *interesting.get_mut(&current_seen).unwrap() += 1;
1216 seen[parent.0 as usize] = current_seen;
1216 seen[parent.0 as usize] = current_seen;
1217 if parent_seen != 0 {
1217 if parent_seen != 0 {
1218 let parent_interesting =
1218 let parent_interesting =
1219 interesting.get_mut(&parent_seen).unwrap();
1219 interesting.get_mut(&parent_seen).unwrap();
1220 *parent_interesting -= 1;
1220 *parent_interesting -= 1;
1221 if *parent_interesting == 0 {
1221 if *parent_interesting == 0 {
1222 interesting.remove(&parent_seen);
1222 interesting.remove(&parent_seen);
1223 }
1223 }
1224 }
1224 }
1225 }
1225 }
1226 } else if current_depth == parent_depth - 1 {
1226 } else if current_depth == parent_depth - 1 {
1227 let either_seen = parent_seen | current_seen;
1227 let either_seen = parent_seen | current_seen;
1228 if either_seen == parent_seen {
1228 if either_seen == parent_seen {
1229 continue;
1229 continue;
1230 }
1230 }
1231 seen[parent.0 as usize] = either_seen;
1231 seen[parent.0 as usize] = either_seen;
1232 interesting
1232 interesting
1233 .entry(either_seen)
1233 .entry(either_seen)
1234 .and_modify(|v| *v += 1)
1234 .and_modify(|v| *v += 1)
1235 .or_insert(1);
1235 .or_insert(1);
1236 *interesting.get_mut(&parent_seen).unwrap() -= 1;
1236 *interesting.get_mut(&parent_seen).unwrap() -= 1;
1237 if interesting[&parent_seen] == 0 {
1237 if interesting[&parent_seen] == 0 {
1238 interesting.remove(&parent_seen);
1238 interesting.remove(&parent_seen);
1239 }
1239 }
1240 }
1240 }
1241 }
1241 }
1242 *interesting.get_mut(&current_seen).unwrap() -= 1;
1242 *interesting.get_mut(&current_seen).unwrap() -= 1;
1243 if interesting[&current_seen] == 0 {
1243 if interesting[&current_seen] == 0 {
1244 interesting.remove(&current_seen);
1244 interesting.remove(&current_seen);
1245 }
1245 }
1246
1246
1247 current_rev = Revision(current_rev.0 - 1);
1247 current_rev = Revision(current_rev.0 - 1);
1248 }
1248 }
1249
1249
1250 if interesting.len() != 1 {
1250 if interesting.len() != 1 {
1251 return Ok(vec![]);
1251 return Ok(vec![]);
1252 }
1252 }
1253 let mask = interesting.keys().next().unwrap();
1253 let mask = interesting.keys().next().unwrap();
1254
1254
1255 Ok(mapping
1255 Ok(mapping
1256 .into_iter()
1256 .into_iter()
1257 .filter_map(|(shift, rev)| {
1257 .filter_map(|(shift, rev)| {
1258 if (mask & shift) != 0 {
1258 if (mask & shift) != 0 {
1259 return Some(rev);
1259 return Some(rev);
1260 }
1260 }
1261 None
1261 None
1262 })
1262 })
1263 .collect())
1263 .collect())
1264 }
1264 }
1265 }
1265 }
1266
1266
1267 /// The kind of functionality needed by find_gca_candidates
1267 /// The kind of functionality needed by find_gca_candidates
1268 ///
1268 ///
1269 /// This is a bit mask which can be declared to be "poisoned", which callers
1269 /// This is a bit mask which can be declared to be "poisoned", which callers
1270 /// interpret to break out of some loops.
1270 /// interpret to break out of some loops.
1271 ///
1271 ///
1272 /// The maximum capacity of the bit mask is up to the actual implementation
1272 /// The maximum capacity of the bit mask is up to the actual implementation
1273 trait PoisonableBitSet: Sized + PartialEq {
1273 trait PoisonableBitSet: Sized + PartialEq {
1274 /// Return a vector of exactly n elements, initialized to be empty.
1274 /// Return a vector of exactly n elements, initialized to be empty.
1275 ///
1275 ///
1276 /// Optimization can vastly depend on implementation. Those being `Copy`
1276 /// Optimization can vastly depend on implementation. Those being `Copy`
1277 /// and having constant capacity typically can have a very simple
1277 /// and having constant capacity typically can have a very simple
1278 /// implementation.
1278 /// implementation.
1279 fn vec_of_empty(sets_size: usize, vec_len: usize) -> Vec<Self>;
1279 fn vec_of_empty(sets_size: usize, vec_len: usize) -> Vec<Self>;
1280
1280
1281 /// The size of the bit mask in memory
1281 /// The size of the bit mask in memory
1282 fn size(&self) -> usize;
1282 fn size(&self) -> usize;
1283
1283
1284 /// The number of elements that can be represented in the set.
1284 /// The number of elements that can be represented in the set.
1285 ///
1285 ///
1286 /// Another way to put it is that it is the highest integer `C` such that
1286 /// Another way to put it is that it is the highest integer `C` such that
1287 /// the set is guaranteed to always be a subset of the integer range
1287 /// the set is guaranteed to always be a subset of the integer range
1288 /// `[0, C)`
1288 /// `[0, C)`
1289 fn capacity(&self) -> usize;
1289 fn capacity(&self) -> usize;
1290
1290
1291 /// Declare `n` to belong to the set
1291 /// Declare `n` to belong to the set
1292 fn add(&mut self, n: usize);
1292 fn add(&mut self, n: usize);
1293
1293
1294 /// Declare `n` not to belong to the set
1294 /// Declare `n` not to belong to the set
1295 fn discard(&mut self, n: usize);
1295 fn discard(&mut self, n: usize);
1296
1296
1297 /// Replace this bit set by its union with other
1297 /// Replace this bit set by its union with other
1298 fn union(&mut self, other: &Self);
1298 fn union(&mut self, other: &Self);
1299
1299
1300 /// Poison the bit set
1300 /// Poison the bit set
1301 ///
1301 ///
1302 /// Interpretation up to the caller
1302 /// Interpretation up to the caller
1303 fn poison(&mut self);
1303 fn poison(&mut self);
1304
1304
1305 /// Is the bit set poisoned?
1305 /// Is the bit set poisoned?
1306 ///
1306 ///
1307 /// Interpretation is up to the caller
1307 /// Interpretation is up to the caller
1308 fn is_poisoned(&self) -> bool;
1308 fn is_poisoned(&self) -> bool;
1309
1309
1310 /// Is the bit set empty?
1310 /// Is the bit set empty?
1311 fn is_empty(&self) -> bool;
1311 fn is_empty(&self) -> bool;
1312
1312
1313 /// return `true` if and only if the bit is the full range `[0, n)`
1313 /// return `true` if and only if the bit is the full range `[0, n)`
1314 /// of integers
1314 /// of integers
1315 fn is_full_range(&self, n: usize) -> bool;
1315 fn is_full_range(&self, n: usize) -> bool;
1316 }
1316 }
1317
1317
1318 const U64_POISON: u64 = 1 << 63;
1318 const U64_POISON: u64 = 1 << 63;
1319 const U8_POISON: u8 = 1 << 7;
1319 const U8_POISON: u8 = 1 << 7;
1320
1320
1321 impl PoisonableBitSet for u64 {
1321 impl PoisonableBitSet for u64 {
1322 fn vec_of_empty(_sets_size: usize, vec_len: usize) -> Vec<Self> {
1322 fn vec_of_empty(_sets_size: usize, vec_len: usize) -> Vec<Self> {
1323 vec![0u64; vec_len]
1323 vec![0u64; vec_len]
1324 }
1324 }
1325
1325
1326 fn size(&self) -> usize {
1326 fn size(&self) -> usize {
1327 8
1327 8
1328 }
1328 }
1329
1329
1330 fn capacity(&self) -> usize {
1330 fn capacity(&self) -> usize {
1331 63
1331 63
1332 }
1332 }
1333
1333
1334 fn add(&mut self, n: usize) {
1334 fn add(&mut self, n: usize) {
1335 (*self) |= 1u64 << n;
1335 (*self) |= 1u64 << n;
1336 }
1336 }
1337
1337
1338 fn discard(&mut self, n: usize) {
1338 fn discard(&mut self, n: usize) {
1339 (*self) &= u64::MAX - (1u64 << n);
1339 (*self) &= u64::MAX - (1u64 << n);
1340 }
1340 }
1341
1341
1342 fn union(&mut self, other: &Self) {
1342 fn union(&mut self, other: &Self) {
1343 if *self != *other {
1343 if *self != *other {
1344 (*self) |= *other;
1344 (*self) |= *other;
1345 }
1345 }
1346 }
1346 }
1347
1347
1348 fn is_full_range(&self, n: usize) -> bool {
1348 fn is_full_range(&self, n: usize) -> bool {
1349 *self + 1 == (1u64 << n)
1349 *self + 1 == (1u64 << n)
1350 }
1350 }
1351
1351
1352 fn is_empty(&self) -> bool {
1352 fn is_empty(&self) -> bool {
1353 *self == 0
1353 *self == 0
1354 }
1354 }
1355
1355
1356 fn poison(&mut self) {
1356 fn poison(&mut self) {
1357 *self = U64_POISON;
1357 *self = U64_POISON;
1358 }
1358 }
1359
1359
1360 fn is_poisoned(&self) -> bool {
1360 fn is_poisoned(&self) -> bool {
1361 // equality comparison would be tempting but would not resist
1361 // equality comparison would be tempting but would not resist
1362 // operations after poisoning (even if these should be bogus).
1362 // operations after poisoning (even if these should be bogus).
1363 *self >= U64_POISON
1363 *self >= U64_POISON
1364 }
1364 }
1365 }
1365 }
1366
1366
1367 impl PoisonableBitSet for u8 {
1367 impl PoisonableBitSet for u8 {
1368 fn vec_of_empty(_sets_size: usize, vec_len: usize) -> Vec<Self> {
1368 fn vec_of_empty(_sets_size: usize, vec_len: usize) -> Vec<Self> {
1369 vec![0; vec_len]
1369 vec![0; vec_len]
1370 }
1370 }
1371
1371
1372 fn size(&self) -> usize {
1372 fn size(&self) -> usize {
1373 1
1373 1
1374 }
1374 }
1375
1375
1376 fn capacity(&self) -> usize {
1376 fn capacity(&self) -> usize {
1377 7
1377 7
1378 }
1378 }
1379
1379
1380 fn add(&mut self, n: usize) {
1380 fn add(&mut self, n: usize) {
1381 (*self) |= 1 << n;
1381 (*self) |= 1 << n;
1382 }
1382 }
1383
1383
1384 fn discard(&mut self, n: usize) {
1384 fn discard(&mut self, n: usize) {
1385 (*self) &= u8::MAX - (1 << n);
1385 (*self) &= u8::MAX - (1 << n);
1386 }
1386 }
1387
1387
1388 fn union(&mut self, other: &Self) {
1388 fn union(&mut self, other: &Self) {
1389 if *self != *other {
1389 if *self != *other {
1390 (*self) |= *other;
1390 (*self) |= *other;
1391 }
1391 }
1392 }
1392 }
1393
1393
1394 fn is_full_range(&self, n: usize) -> bool {
1394 fn is_full_range(&self, n: usize) -> bool {
1395 *self + 1 == (1 << n)
1395 *self + 1 == (1 << n)
1396 }
1396 }
1397
1397
1398 fn is_empty(&self) -> bool {
1398 fn is_empty(&self) -> bool {
1399 *self == 0
1399 *self == 0
1400 }
1400 }
1401
1401
1402 fn poison(&mut self) {
1402 fn poison(&mut self) {
1403 *self = U8_POISON;
1403 *self = U8_POISON;
1404 }
1404 }
1405
1405
1406 fn is_poisoned(&self) -> bool {
1406 fn is_poisoned(&self) -> bool {
1407 // equality comparison would be tempting but would not resist
1407 // equality comparison would be tempting but would not resist
1408 // operations after poisoning (even if these should be bogus).
1408 // operations after poisoning (even if these should be bogus).
1409 *self >= U8_POISON
1409 *self >= U8_POISON
1410 }
1410 }
1411 }
1411 }
1412
1412
1413 /// A poisonable bit set whose capacity is not known at compile time but
1413 /// A poisonable bit set whose capacity is not known at compile time but
1414 /// is constant after initial construction
1414 /// is constant after initial construction
1415 ///
1415 ///
1416 /// This can be way further optimized if performance assessments (speed
1416 /// This can be way further optimized if performance assessments (speed
1417 /// and/or RAM) require it.
1417 /// and/or RAM) require it.
1418 /// As far as RAM is concerned, for large vectors of these, the main problem
1418 /// As far as RAM is concerned, for large vectors of these, the main problem
1419 /// would be the repetition of set_size in each item. We would need a trait
1419 /// would be the repetition of set_size in each item. We would need a trait
1420 /// to abstract over the idea of a vector of such bit sets to do better.
1420 /// to abstract over the idea of a vector of such bit sets to do better.
1421 #[derive(Clone, PartialEq)]
1421 #[derive(Clone, PartialEq)]
1422 struct NonStaticPoisonableBitSet {
1422 struct NonStaticPoisonableBitSet {
1423 set_size: usize,
1423 set_size: usize,
1424 bit_set: Vec<u64>,
1424 bit_set: Vec<u64>,
1425 }
1425 }
1426
1426
1427 /// Number of `u64` needed for a [`NonStaticPoisonableBitSet`] of given size
1427 /// Number of `u64` needed for a [`NonStaticPoisonableBitSet`] of given size
1428 fn non_static_poisonable_inner_len(set_size: usize) -> usize {
1428 fn non_static_poisonable_inner_len(set_size: usize) -> usize {
1429 1 + (set_size + 1) / 64
1429 1 + (set_size + 1) / 64
1430 }
1430 }
1431
1431
1432 impl NonStaticPoisonableBitSet {
1432 impl NonStaticPoisonableBitSet {
1433 /// The index of the sub-bit set for the given n, and the index inside
1433 /// The index of the sub-bit set for the given n, and the index inside
1434 /// the latter
1434 /// the latter
1435 fn index(&self, n: usize) -> (usize, usize) {
1435 fn index(&self, n: usize) -> (usize, usize) {
1436 (n / 64, n % 64)
1436 (n / 64, n % 64)
1437 }
1437 }
1438 }
1438 }
1439
1439
1440 /// Mock implementation to ensure that the trait makes sense
1440 /// Mock implementation to ensure that the trait makes sense
1441 impl PoisonableBitSet for NonStaticPoisonableBitSet {
1441 impl PoisonableBitSet for NonStaticPoisonableBitSet {
1442 fn vec_of_empty(set_size: usize, vec_len: usize) -> Vec<Self> {
1442 fn vec_of_empty(set_size: usize, vec_len: usize) -> Vec<Self> {
1443 let tmpl = Self {
1443 let tmpl = Self {
1444 set_size,
1444 set_size,
1445 bit_set: vec![0u64; non_static_poisonable_inner_len(set_size)],
1445 bit_set: vec![0u64; non_static_poisonable_inner_len(set_size)],
1446 };
1446 };
1447 vec![tmpl; vec_len]
1447 vec![tmpl; vec_len]
1448 }
1448 }
1449
1449
1450 fn size(&self) -> usize {
1450 fn size(&self) -> usize {
1451 8 + self.bit_set.len() * 8
1451 8 + self.bit_set.len() * 8
1452 }
1452 }
1453
1453
1454 fn capacity(&self) -> usize {
1454 fn capacity(&self) -> usize {
1455 self.set_size
1455 self.set_size
1456 }
1456 }
1457
1457
1458 fn add(&mut self, n: usize) {
1458 fn add(&mut self, n: usize) {
1459 let (sub_bs, bit_pos) = self.index(n);
1459 let (sub_bs, bit_pos) = self.index(n);
1460 self.bit_set[sub_bs] |= 1 << bit_pos
1460 self.bit_set[sub_bs] |= 1 << bit_pos
1461 }
1461 }
1462
1462
1463 fn discard(&mut self, n: usize) {
1463 fn discard(&mut self, n: usize) {
1464 let (sub_bs, bit_pos) = self.index(n);
1464 let (sub_bs, bit_pos) = self.index(n);
1465 self.bit_set[sub_bs] |= u64::MAX - (1 << bit_pos)
1465 self.bit_set[sub_bs] |= u64::MAX - (1 << bit_pos)
1466 }
1466 }
1467
1467
1468 fn union(&mut self, other: &Self) {
1468 fn union(&mut self, other: &Self) {
1469 assert!(
1469 assert!(
1470 self.set_size == other.set_size,
1470 self.set_size == other.set_size,
1471 "Binary operations on bit sets can only be done on same size"
1471 "Binary operations on bit sets can only be done on same size"
1472 );
1472 );
1473 for i in 0..self.bit_set.len() - 1 {
1473 for i in 0..self.bit_set.len() - 1 {
1474 self.bit_set[i] |= other.bit_set[i]
1474 self.bit_set[i] |= other.bit_set[i]
1475 }
1475 }
1476 }
1476 }
1477
1477
1478 fn is_full_range(&self, n: usize) -> bool {
1478 fn is_full_range(&self, n: usize) -> bool {
1479 let (sub_bs, bit_pos) = self.index(n);
1479 let (sub_bs, bit_pos) = self.index(n);
1480 self.bit_set[..sub_bs].iter().all(|bs| *bs == u64::MAX)
1480 self.bit_set[..sub_bs].iter().all(|bs| *bs == u64::MAX)
1481 && self.bit_set[sub_bs] == (1 << (bit_pos + 1)) - 1
1481 && self.bit_set[sub_bs] == (1 << (bit_pos + 1)) - 1
1482 }
1482 }
1483
1483
1484 fn is_empty(&self) -> bool {
1484 fn is_empty(&self) -> bool {
1485 self.bit_set.iter().all(|bs| *bs == 0u64)
1485 self.bit_set.iter().all(|bs| *bs == 0u64)
1486 }
1486 }
1487
1487
1488 fn poison(&mut self) {
1488 fn poison(&mut self) {
1489 let (sub_bs, bit_pos) = self.index(self.set_size);
1489 let (sub_bs, bit_pos) = self.index(self.set_size);
1490 self.bit_set[sub_bs] = 1 << bit_pos;
1490 self.bit_set[sub_bs] = 1 << bit_pos;
1491 }
1491 }
1492
1492
1493 fn is_poisoned(&self) -> bool {
1493 fn is_poisoned(&self) -> bool {
1494 let (sub_bs, bit_pos) = self.index(self.set_size);
1494 let (sub_bs, bit_pos) = self.index(self.set_size);
1495 self.bit_set[sub_bs] >= 1 << bit_pos
1495 self.bit_set[sub_bs] >= 1 << bit_pos
1496 }
1496 }
1497 }
1497 }
1498
1498
1499 /// Set of roots of all non-public phases
1499 /// Set of roots of all non-public phases
1500 pub type RootsPerPhase = [HashSet<Revision>; Phase::non_public_phases().len()];
1500 pub type RootsPerPhase = [HashSet<Revision>; Phase::non_public_phases().len()];
1501
1501
1502 #[derive(Debug, Copy, Clone, PartialEq, Eq, Ord, PartialOrd, Hash)]
1502 #[derive(Debug, Copy, Clone, PartialEq, Eq, Ord, PartialOrd, Hash)]
1503 pub enum Phase {
1503 pub enum Phase {
1504 Public = 0,
1504 Public = 0,
1505 Draft = 1,
1505 Draft = 1,
1506 Secret = 2,
1506 Secret = 2,
1507 Archived = 3,
1507 Archived = 3,
1508 Internal = 4,
1508 Internal = 4,
1509 }
1509 }
1510
1510
1511 impl TryFrom<usize> for Phase {
1511 impl TryFrom<usize> for Phase {
1512 type Error = RevlogError;
1512 type Error = RevlogError;
1513
1513
1514 fn try_from(value: usize) -> Result<Self, Self::Error> {
1514 fn try_from(value: usize) -> Result<Self, Self::Error> {
1515 Ok(match value {
1515 Ok(match value {
1516 0 => Self::Public,
1516 0 => Self::Public,
1517 1 => Self::Draft,
1517 1 => Self::Draft,
1518 2 => Self::Secret,
1518 2 => Self::Secret,
1519 32 => Self::Archived,
1519 32 => Self::Archived,
1520 96 => Self::Internal,
1520 96 => Self::Internal,
1521 v => {
1521 v => {
1522 return Err(RevlogError::corrupted(format!(
1522 return Err(RevlogError::corrupted(format!(
1523 "invalid phase value {}",
1523 "invalid phase value {}",
1524 v
1524 v
1525 )))
1525 )))
1526 }
1526 }
1527 })
1527 })
1528 }
1528 }
1529 }
1529 }
1530
1530
1531 impl Phase {
1531 impl Phase {
1532 pub const fn all_phases() -> &'static [Self] {
1532 pub const fn all_phases() -> &'static [Self] {
1533 &[
1533 &[
1534 Self::Public,
1534 Self::Public,
1535 Self::Draft,
1535 Self::Draft,
1536 Self::Secret,
1536 Self::Secret,
1537 Self::Archived,
1537 Self::Archived,
1538 Self::Internal,
1538 Self::Internal,
1539 ]
1539 ]
1540 }
1540 }
1541 pub const fn non_public_phases() -> &'static [Self] {
1541 pub const fn non_public_phases() -> &'static [Self] {
1542 &[Self::Draft, Self::Secret, Self::Archived, Self::Internal]
1542 &[Self::Draft, Self::Secret, Self::Archived, Self::Internal]
1543 }
1543 }
1544 }
1544 }
1545
1545
1546 fn inline_scan(bytes: &[u8]) -> (usize, Vec<usize>) {
1546 fn inline_scan(bytes: &[u8]) -> (usize, Vec<usize>) {
1547 let mut offset: usize = 0;
1547 let mut offset: usize = 0;
1548 let mut offsets = Vec::new();
1548 let mut offsets = Vec::new();
1549
1549
1550 while offset + INDEX_ENTRY_SIZE <= bytes.len() {
1550 while offset + INDEX_ENTRY_SIZE <= bytes.len() {
1551 offsets.push(offset);
1551 offsets.push(offset);
1552 let end = offset + INDEX_ENTRY_SIZE;
1552 let end = offset + INDEX_ENTRY_SIZE;
1553 let entry = IndexEntry {
1553 let entry = IndexEntry {
1554 bytes: &bytes[offset..end],
1554 bytes: &bytes[offset..end],
1555 offset_override: None,
1555 offset_override: None,
1556 };
1556 };
1557
1557
1558 offset += INDEX_ENTRY_SIZE + entry.compressed_len() as usize;
1558 offset += INDEX_ENTRY_SIZE + entry.compressed_len() as usize;
1559 }
1559 }
1560 (offset, offsets)
1560 (offset, offsets)
1561 }
1561 }
1562
1562
1563 impl super::RevlogIndex for Index {
1563 impl super::RevlogIndex for Index {
1564 fn len(&self) -> usize {
1564 fn len(&self) -> usize {
1565 self.len()
1565 self.len()
1566 }
1566 }
1567
1567
1568 fn node(&self, rev: Revision) -> Option<&Node> {
1568 fn node(&self, rev: Revision) -> Option<&Node> {
1569 if rev == NULL_REVISION {
1569 if rev == NULL_REVISION {
1570 return Some(&NULL_NODE);
1570 return Some(&NULL_NODE);
1571 }
1571 }
1572 self.get_entry(rev).map(|entry| entry.hash())
1572 self.get_entry(rev).map(|entry| entry.hash())
1573 }
1573 }
1574 }
1574 }
1575
1575
1576 #[derive(Debug)]
1576 #[derive(Debug)]
1577 pub struct IndexEntry<'a> {
1577 pub struct IndexEntry<'a> {
1578 bytes: &'a [u8],
1578 bytes: &'a [u8],
1579 /// Allows to override the offset value of the entry.
1579 /// Allows to override the offset value of the entry.
1580 ///
1580 ///
1581 /// For interleaved index and data, the offset stored in the index
1581 /// For interleaved index and data, the offset stored in the index
1582 /// corresponds to the separated data offset.
1582 /// corresponds to the separated data offset.
1583 /// It has to be overridden with the actual offset in the interleaved
1583 /// It has to be overridden with the actual offset in the interleaved
1584 /// index which is just after the index block.
1584 /// index which is just after the index block.
1585 ///
1585 ///
1586 /// For separated index and data, the offset stored in the first index
1586 /// For separated index and data, the offset stored in the first index
1587 /// entry is mixed with the index headers.
1587 /// entry is mixed with the index headers.
1588 /// It has to be overridden with 0.
1588 /// It has to be overridden with 0.
1589 offset_override: Option<usize>,
1589 offset_override: Option<usize>,
1590 }
1590 }
1591
1591
1592 impl<'a> IndexEntry<'a> {
1592 impl<'a> IndexEntry<'a> {
1593 /// Return the offset of the data.
1593 /// Return the offset of the data.
1594 pub fn offset(&self) -> usize {
1594 pub fn offset(&self) -> usize {
1595 if let Some(offset_override) = self.offset_override {
1595 if let Some(offset_override) = self.offset_override {
1596 offset_override
1596 offset_override
1597 } else {
1597 } else {
1598 let mut bytes = [0; 8];
1598 let mut bytes = [0; 8];
1599 bytes[2..8].copy_from_slice(&self.bytes[0..=5]);
1599 bytes[2..8].copy_from_slice(&self.bytes[0..=5]);
1600 BigEndian::read_u64(&bytes[..]) as usize
1600 BigEndian::read_u64(&bytes[..]) as usize
1601 }
1601 }
1602 }
1602 }
1603 pub fn raw_offset(&self) -> u64 {
1603 pub fn raw_offset(&self) -> u64 {
1604 BigEndian::read_u64(&self.bytes[0..8])
1604 BigEndian::read_u64(&self.bytes[0..8])
1605 }
1605 }
1606
1606
1607 /// Same result (except potentially for rev 0) as C `index_get_start()`
1607 /// Same result (except potentially for rev 0) as C `index_get_start()`
1608 fn c_start(&self) -> u64 {
1608 fn c_start(&self) -> u64 {
1609 self.raw_offset() >> 16
1609 self.raw_offset() >> 16
1610 }
1610 }
1611
1611
1612 pub fn flags(&self) -> u16 {
1612 pub fn flags(&self) -> u16 {
1613 BigEndian::read_u16(&self.bytes[6..=7])
1613 BigEndian::read_u16(&self.bytes[6..=7])
1614 }
1614 }
1615
1615
1616 /// Return the compressed length of the data.
1616 /// Return the compressed length of the data.
1617 pub fn compressed_len(&self) -> u32 {
1617 pub fn compressed_len(&self) -> u32 {
1618 BigEndian::read_u32(&self.bytes[8..=11])
1618 BigEndian::read_u32(&self.bytes[8..=11])
1619 }
1619 }
1620
1620
1621 /// Return the uncompressed length of the data.
1621 /// Return the uncompressed length of the data.
1622 pub fn uncompressed_len(&self) -> i32 {
1622 pub fn uncompressed_len(&self) -> i32 {
1623 BigEndian::read_i32(&self.bytes[12..=15])
1623 BigEndian::read_i32(&self.bytes[12..=15])
1624 }
1624 }
1625
1625
1626 /// Return the revision upon which the data has been derived.
1626 /// Return the revision upon which the data has been derived.
1627 pub fn base_revision_or_base_of_delta_chain(&self) -> UncheckedRevision {
1627 pub fn base_revision_or_base_of_delta_chain(&self) -> UncheckedRevision {
1628 // TODO Maybe return an Option when base_revision == rev?
1628 // TODO Maybe return an Option when base_revision == rev?
1629 // Requires to add rev to IndexEntry
1629 // Requires to add rev to IndexEntry
1630
1630
1631 BigEndian::read_i32(&self.bytes[16..]).into()
1631 BigEndian::read_i32(&self.bytes[16..]).into()
1632 }
1632 }
1633
1633
1634 pub fn link_revision(&self) -> UncheckedRevision {
1634 pub fn link_revision(&self) -> UncheckedRevision {
1635 BigEndian::read_i32(&self.bytes[20..]).into()
1635 BigEndian::read_i32(&self.bytes[20..]).into()
1636 }
1636 }
1637
1637
1638 pub fn p1(&self) -> UncheckedRevision {
1638 pub fn p1(&self) -> UncheckedRevision {
1639 BigEndian::read_i32(&self.bytes[24..]).into()
1639 BigEndian::read_i32(&self.bytes[24..]).into()
1640 }
1640 }
1641
1641
1642 pub fn p2(&self) -> UncheckedRevision {
1642 pub fn p2(&self) -> UncheckedRevision {
1643 BigEndian::read_i32(&self.bytes[28..]).into()
1643 BigEndian::read_i32(&self.bytes[28..]).into()
1644 }
1644 }
1645
1645
1646 /// Return the hash of revision's full text.
1646 /// Return the hash of revision's full text.
1647 ///
1647 ///
1648 /// Currently, SHA-1 is used and only the first 20 bytes of this field
1648 /// Currently, SHA-1 is used and only the first 20 bytes of this field
1649 /// are used.
1649 /// are used.
1650 pub fn hash(&self) -> &'a Node {
1650 pub fn hash(&self) -> &'a Node {
1651 (&self.bytes[32..52]).try_into().unwrap()
1651 (&self.bytes[32..52]).try_into().unwrap()
1652 }
1652 }
1653
1653
1654 pub fn as_bytes(&self) -> &'a [u8] {
1654 pub fn as_bytes(&self) -> &'a [u8] {
1655 self.bytes
1655 self.bytes
1656 }
1656 }
1657 }
1657 }
1658
1658
1659 #[cfg(test)]
1659 #[cfg(test)]
1660 mod tests {
1660 mod tests {
1661 use super::*;
1661 use super::*;
1662 use crate::node::NULL_NODE;
1662 use crate::node::NULL_NODE;
1663
1663
1664 #[cfg(test)]
1664 #[cfg(test)]
1665 #[derive(Debug, Copy, Clone)]
1665 #[derive(Debug, Copy, Clone)]
1666 pub struct IndexEntryBuilder {
1666 pub struct IndexEntryBuilder {
1667 is_first: bool,
1667 is_first: bool,
1668 is_inline: bool,
1668 is_inline: bool,
1669 is_general_delta: bool,
1669 is_general_delta: bool,
1670 version: u16,
1670 version: u16,
1671 offset: usize,
1671 offset: usize,
1672 compressed_len: usize,
1672 compressed_len: usize,
1673 uncompressed_len: usize,
1673 uncompressed_len: usize,
1674 base_revision_or_base_of_delta_chain: Revision,
1674 base_revision_or_base_of_delta_chain: Revision,
1675 link_revision: Revision,
1675 link_revision: Revision,
1676 p1: Revision,
1676 p1: Revision,
1677 p2: Revision,
1677 p2: Revision,
1678 node: Node,
1678 node: Node,
1679 }
1679 }
1680
1680
1681 #[cfg(test)]
1681 #[cfg(test)]
1682 impl IndexEntryBuilder {
1682 impl IndexEntryBuilder {
1683 #[allow(clippy::new_without_default)]
1683 #[allow(clippy::new_without_default)]
1684 pub fn new() -> Self {
1684 pub fn new() -> Self {
1685 Self {
1685 Self {
1686 is_first: false,
1686 is_first: false,
1687 is_inline: false,
1687 is_inline: false,
1688 is_general_delta: true,
1688 is_general_delta: true,
1689 version: 1,
1689 version: 1,
1690 offset: 0,
1690 offset: 0,
1691 compressed_len: 0,
1691 compressed_len: 0,
1692 uncompressed_len: 0,
1692 uncompressed_len: 0,
1693 base_revision_or_base_of_delta_chain: Revision(0),
1693 base_revision_or_base_of_delta_chain: Revision(0),
1694 link_revision: Revision(0),
1694 link_revision: Revision(0),
1695 p1: NULL_REVISION,
1695 p1: NULL_REVISION,
1696 p2: NULL_REVISION,
1696 p2: NULL_REVISION,
1697 node: NULL_NODE,
1697 node: NULL_NODE,
1698 }
1698 }
1699 }
1699 }
1700
1700
1701 pub fn is_first(&mut self, value: bool) -> &mut Self {
1701 pub fn is_first(&mut self, value: bool) -> &mut Self {
1702 self.is_first = value;
1702 self.is_first = value;
1703 self
1703 self
1704 }
1704 }
1705
1705
1706 pub fn with_inline(&mut self, value: bool) -> &mut Self {
1706 pub fn with_inline(&mut self, value: bool) -> &mut Self {
1707 self.is_inline = value;
1707 self.is_inline = value;
1708 self
1708 self
1709 }
1709 }
1710
1710
1711 pub fn with_general_delta(&mut self, value: bool) -> &mut Self {
1711 pub fn with_general_delta(&mut self, value: bool) -> &mut Self {
1712 self.is_general_delta = value;
1712 self.is_general_delta = value;
1713 self
1713 self
1714 }
1714 }
1715
1715
1716 pub fn with_version(&mut self, value: u16) -> &mut Self {
1716 pub fn with_version(&mut self, value: u16) -> &mut Self {
1717 self.version = value;
1717 self.version = value;
1718 self
1718 self
1719 }
1719 }
1720
1720
1721 pub fn with_offset(&mut self, value: usize) -> &mut Self {
1721 pub fn with_offset(&mut self, value: usize) -> &mut Self {
1722 self.offset = value;
1722 self.offset = value;
1723 self
1723 self
1724 }
1724 }
1725
1725
1726 pub fn with_compressed_len(&mut self, value: usize) -> &mut Self {
1726 pub fn with_compressed_len(&mut self, value: usize) -> &mut Self {
1727 self.compressed_len = value;
1727 self.compressed_len = value;
1728 self
1728 self
1729 }
1729 }
1730
1730
1731 pub fn with_uncompressed_len(&mut self, value: usize) -> &mut Self {
1731 pub fn with_uncompressed_len(&mut self, value: usize) -> &mut Self {
1732 self.uncompressed_len = value;
1732 self.uncompressed_len = value;
1733 self
1733 self
1734 }
1734 }
1735
1735
1736 pub fn with_base_revision_or_base_of_delta_chain(
1736 pub fn with_base_revision_or_base_of_delta_chain(
1737 &mut self,
1737 &mut self,
1738 value: Revision,
1738 value: Revision,
1739 ) -> &mut Self {
1739 ) -> &mut Self {
1740 self.base_revision_or_base_of_delta_chain = value;
1740 self.base_revision_or_base_of_delta_chain = value;
1741 self
1741 self
1742 }
1742 }
1743
1743
1744 pub fn with_link_revision(&mut self, value: Revision) -> &mut Self {
1744 pub fn with_link_revision(&mut self, value: Revision) -> &mut Self {
1745 self.link_revision = value;
1745 self.link_revision = value;
1746 self
1746 self
1747 }
1747 }
1748
1748
1749 pub fn with_p1(&mut self, value: Revision) -> &mut Self {
1749 pub fn with_p1(&mut self, value: Revision) -> &mut Self {
1750 self.p1 = value;
1750 self.p1 = value;
1751 self
1751 self
1752 }
1752 }
1753
1753
1754 pub fn with_p2(&mut self, value: Revision) -> &mut Self {
1754 pub fn with_p2(&mut self, value: Revision) -> &mut Self {
1755 self.p2 = value;
1755 self.p2 = value;
1756 self
1756 self
1757 }
1757 }
1758
1758
1759 pub fn with_node(&mut self, value: Node) -> &mut Self {
1759 pub fn with_node(&mut self, value: Node) -> &mut Self {
1760 self.node = value;
1760 self.node = value;
1761 self
1761 self
1762 }
1762 }
1763
1763
1764 pub fn build(&self) -> Vec<u8> {
1764 pub fn build(&self) -> Vec<u8> {
1765 let mut bytes = Vec::with_capacity(INDEX_ENTRY_SIZE);
1765 let mut bytes = Vec::with_capacity(INDEX_ENTRY_SIZE);
1766 if self.is_first {
1766 if self.is_first {
1767 bytes.extend(&match (self.is_general_delta, self.is_inline) {
1767 bytes.extend(&match (self.is_general_delta, self.is_inline) {
1768 (false, false) => [0u8, 0],
1768 (false, false) => [0u8, 0],
1769 (false, true) => [0u8, 1],
1769 (false, true) => [0u8, 1],
1770 (true, false) => [0u8, 2],
1770 (true, false) => [0u8, 2],
1771 (true, true) => [0u8, 3],
1771 (true, true) => [0u8, 3],
1772 });
1772 });
1773 bytes.extend(&self.version.to_be_bytes());
1773 bytes.extend(&self.version.to_be_bytes());
1774 // Remaining offset bytes.
1774 // Remaining offset bytes.
1775 bytes.extend(&[0u8; 2]);
1775 bytes.extend(&[0u8; 2]);
1776 } else {
1776 } else {
1777 // Offset stored on 48 bits (6 bytes)
1777 // Offset stored on 48 bits (6 bytes)
1778 bytes.extend(&(self.offset as u64).to_be_bytes()[2..]);
1778 bytes.extend(&(self.offset as u64).to_be_bytes()[2..]);
1779 }
1779 }
1780 bytes.extend(&[0u8; 2]); // Revision flags.
1780 bytes.extend(&[0u8; 2]); // Revision flags.
1781 bytes.extend(&(self.compressed_len as u32).to_be_bytes());
1781 bytes.extend(&(self.compressed_len as u32).to_be_bytes());
1782 bytes.extend(&(self.uncompressed_len as u32).to_be_bytes());
1782 bytes.extend(&(self.uncompressed_len as u32).to_be_bytes());
1783 bytes.extend(
1783 bytes.extend(
1784 &self.base_revision_or_base_of_delta_chain.0.to_be_bytes(),
1784 &self.base_revision_or_base_of_delta_chain.0.to_be_bytes(),
1785 );
1785 );
1786 bytes.extend(&self.link_revision.0.to_be_bytes());
1786 bytes.extend(&self.link_revision.0.to_be_bytes());
1787 bytes.extend(&self.p1.0.to_be_bytes());
1787 bytes.extend(&self.p1.0.to_be_bytes());
1788 bytes.extend(&self.p2.0.to_be_bytes());
1788 bytes.extend(&self.p2.0.to_be_bytes());
1789 bytes.extend(self.node.as_bytes());
1789 bytes.extend(self.node.as_bytes());
1790 bytes.extend(vec![0u8; 12]);
1790 bytes.extend(vec![0u8; 12]);
1791 bytes
1791 bytes
1792 }
1792 }
1793 }
1793 }
1794
1794
1795 pub fn is_inline(index_bytes: &[u8]) -> bool {
1795 pub fn is_inline(index_bytes: &[u8]) -> bool {
1796 IndexHeader::parse(index_bytes)
1796 IndexHeader::parse(index_bytes)
1797 .expect("too short")
1797 .expect("too short")
1798 .unwrap()
1798 .unwrap()
1799 .format_flags()
1799 .format_flags()
1800 .is_inline()
1800 .is_inline()
1801 }
1801 }
1802
1802
1803 pub fn uses_generaldelta(index_bytes: &[u8]) -> bool {
1803 pub fn uses_generaldelta(index_bytes: &[u8]) -> bool {
1804 IndexHeader::parse(index_bytes)
1804 IndexHeader::parse(index_bytes)
1805 .expect("too short")
1805 .expect("too short")
1806 .unwrap()
1806 .unwrap()
1807 .format_flags()
1807 .format_flags()
1808 .uses_generaldelta()
1808 .uses_generaldelta()
1809 }
1809 }
1810
1810
1811 pub fn get_version(index_bytes: &[u8]) -> u16 {
1811 pub fn get_version(index_bytes: &[u8]) -> u16 {
1812 IndexHeader::parse(index_bytes)
1812 IndexHeader::parse(index_bytes)
1813 .expect("too short")
1813 .expect("too short")
1814 .unwrap()
1814 .unwrap()
1815 .format_version()
1815 .format_version()
1816 }
1816 }
1817
1817
1818 #[test]
1818 #[test]
1819 fn flags_when_no_inline_flag_test() {
1819 fn flags_when_no_inline_flag_test() {
1820 let bytes = IndexEntryBuilder::new()
1820 let bytes = IndexEntryBuilder::new()
1821 .is_first(true)
1821 .is_first(true)
1822 .with_general_delta(false)
1822 .with_general_delta(false)
1823 .with_inline(false)
1823 .with_inline(false)
1824 .build();
1824 .build();
1825
1825
1826 assert!(!is_inline(&bytes));
1826 assert!(!is_inline(&bytes));
1827 assert!(!uses_generaldelta(&bytes));
1827 assert!(!uses_generaldelta(&bytes));
1828 }
1828 }
1829
1829
1830 #[test]
1830 #[test]
1831 fn flags_when_inline_flag_test() {
1831 fn flags_when_inline_flag_test() {
1832 let bytes = IndexEntryBuilder::new()
1832 let bytes = IndexEntryBuilder::new()
1833 .is_first(true)
1833 .is_first(true)
1834 .with_general_delta(false)
1834 .with_general_delta(false)
1835 .with_inline(true)
1835 .with_inline(true)
1836 .build();
1836 .build();
1837
1837
1838 assert!(is_inline(&bytes));
1838 assert!(is_inline(&bytes));
1839 assert!(!uses_generaldelta(&bytes));
1839 assert!(!uses_generaldelta(&bytes));
1840 }
1840 }
1841
1841
1842 #[test]
1842 #[test]
1843 fn flags_when_inline_and_generaldelta_flags_test() {
1843 fn flags_when_inline_and_generaldelta_flags_test() {
1844 let bytes = IndexEntryBuilder::new()
1844 let bytes = IndexEntryBuilder::new()
1845 .is_first(true)
1845 .is_first(true)
1846 .with_general_delta(true)
1846 .with_general_delta(true)
1847 .with_inline(true)
1847 .with_inline(true)
1848 .build();
1848 .build();
1849
1849
1850 assert!(is_inline(&bytes));
1850 assert!(is_inline(&bytes));
1851 assert!(uses_generaldelta(&bytes));
1851 assert!(uses_generaldelta(&bytes));
1852 }
1852 }
1853
1853
1854 #[test]
1854 #[test]
1855 fn test_offset() {
1855 fn test_offset() {
1856 let bytes = IndexEntryBuilder::new().with_offset(1).build();
1856 let bytes = IndexEntryBuilder::new().with_offset(1).build();
1857 let entry = IndexEntry {
1857 let entry = IndexEntry {
1858 bytes: &bytes,
1858 bytes: &bytes,
1859 offset_override: None,
1859 offset_override: None,
1860 };
1860 };
1861
1861
1862 assert_eq!(entry.offset(), 1)
1862 assert_eq!(entry.offset(), 1)
1863 }
1863 }
1864
1864
1865 #[test]
1865 #[test]
1866 fn test_with_overridden_offset() {
1866 fn test_with_overridden_offset() {
1867 let bytes = IndexEntryBuilder::new().with_offset(1).build();
1867 let bytes = IndexEntryBuilder::new().with_offset(1).build();
1868 let entry = IndexEntry {
1868 let entry = IndexEntry {
1869 bytes: &bytes,
1869 bytes: &bytes,
1870 offset_override: Some(2),
1870 offset_override: Some(2),
1871 };
1871 };
1872
1872
1873 assert_eq!(entry.offset(), 2)
1873 assert_eq!(entry.offset(), 2)
1874 }
1874 }
1875
1875
1876 #[test]
1876 #[test]
1877 fn test_compressed_len() {
1877 fn test_compressed_len() {
1878 let bytes = IndexEntryBuilder::new().with_compressed_len(1).build();
1878 let bytes = IndexEntryBuilder::new().with_compressed_len(1).build();
1879 let entry = IndexEntry {
1879 let entry = IndexEntry {
1880 bytes: &bytes,
1880 bytes: &bytes,
1881 offset_override: None,
1881 offset_override: None,
1882 };
1882 };
1883
1883
1884 assert_eq!(entry.compressed_len(), 1)
1884 assert_eq!(entry.compressed_len(), 1)
1885 }
1885 }
1886
1886
1887 #[test]
1887 #[test]
1888 fn test_uncompressed_len() {
1888 fn test_uncompressed_len() {
1889 let bytes = IndexEntryBuilder::new().with_uncompressed_len(1).build();
1889 let bytes = IndexEntryBuilder::new().with_uncompressed_len(1).build();
1890 let entry = IndexEntry {
1890 let entry = IndexEntry {
1891 bytes: &bytes,
1891 bytes: &bytes,
1892 offset_override: None,
1892 offset_override: None,
1893 };
1893 };
1894
1894
1895 assert_eq!(entry.uncompressed_len(), 1)
1895 assert_eq!(entry.uncompressed_len(), 1)
1896 }
1896 }
1897
1897
1898 #[test]
1898 #[test]
1899 fn test_base_revision_or_base_of_delta_chain() {
1899 fn test_base_revision_or_base_of_delta_chain() {
1900 let bytes = IndexEntryBuilder::new()
1900 let bytes = IndexEntryBuilder::new()
1901 .with_base_revision_or_base_of_delta_chain(Revision(1))
1901 .with_base_revision_or_base_of_delta_chain(Revision(1))
1902 .build();
1902 .build();
1903 let entry = IndexEntry {
1903 let entry = IndexEntry {
1904 bytes: &bytes,
1904 bytes: &bytes,
1905 offset_override: None,
1905 offset_override: None,
1906 };
1906 };
1907
1907
1908 assert_eq!(entry.base_revision_or_base_of_delta_chain(), 1.into())
1908 assert_eq!(entry.base_revision_or_base_of_delta_chain(), 1.into())
1909 }
1909 }
1910
1910
1911 #[test]
1911 #[test]
1912 fn link_revision_test() {
1912 fn link_revision_test() {
1913 let bytes = IndexEntryBuilder::new()
1913 let bytes = IndexEntryBuilder::new()
1914 .with_link_revision(Revision(123))
1914 .with_link_revision(Revision(123))
1915 .build();
1915 .build();
1916
1916
1917 let entry = IndexEntry {
1917 let entry = IndexEntry {
1918 bytes: &bytes,
1918 bytes: &bytes,
1919 offset_override: None,
1919 offset_override: None,
1920 };
1920 };
1921
1921
1922 assert_eq!(entry.link_revision(), 123.into());
1922 assert_eq!(entry.link_revision(), 123.into());
1923 }
1923 }
1924
1924
1925 #[test]
1925 #[test]
1926 fn p1_test() {
1926 fn p1_test() {
1927 let bytes = IndexEntryBuilder::new().with_p1(Revision(123)).build();
1927 let bytes = IndexEntryBuilder::new().with_p1(Revision(123)).build();
1928
1928
1929 let entry = IndexEntry {
1929 let entry = IndexEntry {
1930 bytes: &bytes,
1930 bytes: &bytes,
1931 offset_override: None,
1931 offset_override: None,
1932 };
1932 };
1933
1933
1934 assert_eq!(entry.p1(), 123.into());
1934 assert_eq!(entry.p1(), 123.into());
1935 }
1935 }
1936
1936
1937 #[test]
1937 #[test]
1938 fn p2_test() {
1938 fn p2_test() {
1939 let bytes = IndexEntryBuilder::new().with_p2(Revision(123)).build();
1939 let bytes = IndexEntryBuilder::new().with_p2(Revision(123)).build();
1940
1940
1941 let entry = IndexEntry {
1941 let entry = IndexEntry {
1942 bytes: &bytes,
1942 bytes: &bytes,
1943 offset_override: None,
1943 offset_override: None,
1944 };
1944 };
1945
1945
1946 assert_eq!(entry.p2(), 123.into());
1946 assert_eq!(entry.p2(), 123.into());
1947 }
1947 }
1948
1948
1949 #[test]
1949 #[test]
1950 fn node_test() {
1950 fn node_test() {
1951 let node = Node::from_hex("0123456789012345678901234567890123456789")
1951 let node = Node::from_hex("0123456789012345678901234567890123456789")
1952 .unwrap();
1952 .unwrap();
1953 let bytes = IndexEntryBuilder::new().with_node(node).build();
1953 let bytes = IndexEntryBuilder::new().with_node(node).build();
1954
1954
1955 let entry = IndexEntry {
1955 let entry = IndexEntry {
1956 bytes: &bytes,
1956 bytes: &bytes,
1957 offset_override: None,
1957 offset_override: None,
1958 };
1958 };
1959
1959
1960 assert_eq!(*entry.hash(), node);
1960 assert_eq!(*entry.hash(), node);
1961 }
1961 }
1962
1962
1963 #[test]
1963 #[test]
1964 fn version_test() {
1964 fn version_test() {
1965 let bytes = IndexEntryBuilder::new()
1965 let bytes = IndexEntryBuilder::new()
1966 .is_first(true)
1966 .is_first(true)
1967 .with_version(2)
1967 .with_version(2)
1968 .build();
1968 .build();
1969
1969
1970 assert_eq!(get_version(&bytes), 2)
1970 assert_eq!(get_version(&bytes), 2)
1971 }
1971 }
1972 }
1972 }
1973
1973
1974 #[cfg(test)]
1974 #[cfg(test)]
1975 pub use tests::IndexEntryBuilder;
1975 pub use tests::IndexEntryBuilder;
@@ -1,1158 +1,1158 b''
1 // revlog.rs
1 // revlog.rs
2 //
2 //
3 // Copyright 2019-2020 Georges Racinet <georges.racinet@octobus.net>
3 // Copyright 2019-2020 Georges Racinet <georges.racinet@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 use crate::{
8 use crate::{
9 cindex,
9 cindex,
10 conversion::{rev_pyiter_collect, rev_pyiter_collect_or_else},
10 conversion::{rev_pyiter_collect, rev_pyiter_collect_or_else},
11 utils::{node_from_py_bytes, node_from_py_object},
11 utils::{node_from_py_bytes, node_from_py_object},
12 PyRevision,
12 PyRevision,
13 };
13 };
14 use cpython::{
14 use cpython::{
15 buffer::{Element, PyBuffer},
15 buffer::{Element, PyBuffer},
16 exc::{IndexError, ValueError},
16 exc::{IndexError, ValueError},
17 ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyInt, PyList,
17 ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyInt, PyList,
18 PyModule, PyObject, PyResult, PySet, PyString, PyTuple, Python,
18 PyModule, PyObject, PyResult, PySet, PyString, PyTuple, Python,
19 PythonObject, ToPyObject,
19 PythonObject, ToPyObject,
20 };
20 };
21 use hg::{
21 use hg::{
22 errors::HgError,
22 errors::HgError,
23 index::{
23 index::{
24 IndexHeader, Phase, RevisionDataParams, SnapshotsCache,
24 IndexHeader, Phase, RevisionDataParams, SnapshotsCache,
25 INDEX_ENTRY_SIZE,
25 INDEX_ENTRY_SIZE,
26 },
26 },
27 nodemap::{Block, NodeMapError, NodeTree},
27 nodemap::{Block, NodeMapError, NodeTree},
28 revlog::{nodemap::NodeMap, NodePrefix, RevlogError, RevlogIndex},
28 revlog::{nodemap::NodeMap, NodePrefix, RevlogError, RevlogIndex},
29 BaseRevision, Revision, UncheckedRevision, NULL_REVISION,
29 BaseRevision, Revision, UncheckedRevision, NULL_REVISION,
30 };
30 };
31 use std::{cell::RefCell, collections::HashMap};
31 use std::{cell::RefCell, collections::HashMap};
32
32
33 /// Return a Struct implementing the Graph trait
33 /// Return a Struct implementing the Graph trait
34 pub(crate) fn pyindex_to_graph(
34 pub(crate) fn pyindex_to_graph(
35 py: Python,
35 py: Python,
36 index: PyObject,
36 index: PyObject,
37 ) -> PyResult<cindex::Index> {
37 ) -> PyResult<cindex::Index> {
38 match index.extract::<MixedIndex>(py) {
38 match index.extract::<MixedIndex>(py) {
39 Ok(midx) => Ok(midx.clone_cindex(py)),
39 Ok(midx) => Ok(midx.clone_cindex(py)),
40 Err(_) => cindex::Index::new(py, index),
40 Err(_) => cindex::Index::new(py, index),
41 }
41 }
42 }
42 }
43
43
44 py_class!(pub class MixedIndex |py| {
44 py_class!(pub class MixedIndex |py| {
45 data cindex: RefCell<cindex::Index>;
45 data cindex: RefCell<cindex::Index>;
46 data index: RefCell<hg::index::Index>;
46 data index: RefCell<hg::index::Index>;
47 data nt: RefCell<Option<NodeTree>>;
47 data nt: RefCell<Option<NodeTree>>;
48 data docket: RefCell<Option<PyObject>>;
48 data docket: RefCell<Option<PyObject>>;
49 // Holds a reference to the mmap'ed persistent nodemap data
49 // Holds a reference to the mmap'ed persistent nodemap data
50 data nodemap_mmap: RefCell<Option<PyBuffer>>;
50 data nodemap_mmap: RefCell<Option<PyBuffer>>;
51 // Holds a reference to the mmap'ed persistent index data
51 // Holds a reference to the mmap'ed persistent index data
52 data index_mmap: RefCell<Option<PyBuffer>>;
52 data index_mmap: RefCell<Option<PyBuffer>>;
53
53
54 def __new__(
54 def __new__(
55 _cls,
55 _cls,
56 cindex: PyObject,
56 cindex: PyObject,
57 data: PyObject,
57 data: PyObject,
58 default_header: u32,
58 default_header: u32,
59 ) -> PyResult<MixedIndex> {
59 ) -> PyResult<MixedIndex> {
60 Self::new(py, cindex, data, default_header)
60 Self::new(py, cindex, data, default_header)
61 }
61 }
62
62
63 /// Compatibility layer used for Python consumers needing access to the C index
63 /// Compatibility layer used for Python consumers needing access to the C index
64 ///
64 ///
65 /// Only use case so far is `scmutil.shortesthexnodeidprefix`,
65 /// Only use case so far is `scmutil.shortesthexnodeidprefix`,
66 /// that may need to build a custom `nodetree`, based on a specified revset.
66 /// that may need to build a custom `nodetree`, based on a specified revset.
67 /// With a Rust implementation of the nodemap, we will be able to get rid of
67 /// With a Rust implementation of the nodemap, we will be able to get rid of
68 /// this, by exposing our own standalone nodemap class,
68 /// this, by exposing our own standalone nodemap class,
69 /// ready to accept `MixedIndex`.
69 /// ready to accept `MixedIndex`.
70 def get_cindex(&self) -> PyResult<PyObject> {
70 def get_cindex(&self) -> PyResult<PyObject> {
71 Ok(self.cindex(py).borrow().inner().clone_ref(py))
71 Ok(self.cindex(py).borrow().inner().clone_ref(py))
72 }
72 }
73
73
74 // Index API involving nodemap, as defined in mercurial/pure/parsers.py
74 // Index API involving nodemap, as defined in mercurial/pure/parsers.py
75
75
76 /// Return Revision if found, raises a bare `error.RevlogError`
76 /// Return Revision if found, raises a bare `error.RevlogError`
77 /// in case of ambiguity, same as C version does
77 /// in case of ambiguity, same as C version does
78 def get_rev(&self, node: PyBytes) -> PyResult<Option<PyRevision>> {
78 def get_rev(&self, node: PyBytes) -> PyResult<Option<PyRevision>> {
79 let opt = self.get_nodetree(py)?.borrow();
79 let opt = self.get_nodetree(py)?.borrow();
80 let nt = opt.as_ref().unwrap();
80 let nt = opt.as_ref().unwrap();
81 let idx = &*self.cindex(py).borrow();
81 let idx = &*self.cindex(py).borrow();
82 let ridx = &*self.index(py).borrow();
82 let ridx = &*self.index(py).borrow();
83 let node = node_from_py_bytes(py, &node)?;
83 let node = node_from_py_bytes(py, &node)?;
84 let rust_rev =
84 let rust_rev =
85 nt.find_bin(ridx, node.into()).map_err(|e| nodemap_error(py, e))?;
85 nt.find_bin(ridx, node.into()).map_err(|e| nodemap_error(py, e))?;
86 let c_rev =
86 let c_rev =
87 nt.find_bin(idx, node.into()).map_err(|e| nodemap_error(py, e))?;
87 nt.find_bin(idx, node.into()).map_err(|e| nodemap_error(py, e))?;
88 assert_eq!(rust_rev, c_rev);
88 assert_eq!(rust_rev, c_rev);
89 Ok(rust_rev.map(Into::into))
89 Ok(rust_rev.map(Into::into))
90
90
91 }
91 }
92
92
93 /// same as `get_rev()` but raises a bare `error.RevlogError` if node
93 /// same as `get_rev()` but raises a bare `error.RevlogError` if node
94 /// is not found.
94 /// is not found.
95 ///
95 ///
96 /// No need to repeat `node` in the exception, `mercurial/revlog.py`
96 /// No need to repeat `node` in the exception, `mercurial/revlog.py`
97 /// will catch and rewrap with it
97 /// will catch and rewrap with it
98 def rev(&self, node: PyBytes) -> PyResult<PyRevision> {
98 def rev(&self, node: PyBytes) -> PyResult<PyRevision> {
99 self.get_rev(py, node)?.ok_or_else(|| revlog_error(py))
99 self.get_rev(py, node)?.ok_or_else(|| revlog_error(py))
100 }
100 }
101
101
102 /// return True if the node exist in the index
102 /// return True if the node exist in the index
103 def has_node(&self, node: PyBytes) -> PyResult<bool> {
103 def has_node(&self, node: PyBytes) -> PyResult<bool> {
104 // TODO OPTIM we could avoid a needless conversion here,
104 // TODO OPTIM we could avoid a needless conversion here,
105 // to do when scaffolding for pure Rust switch is removed,
105 // to do when scaffolding for pure Rust switch is removed,
106 // as `get_rev()` currently does the necessary assertions
106 // as `get_rev()` currently does the necessary assertions
107 self.get_rev(py, node).map(|opt| opt.is_some())
107 self.get_rev(py, node).map(|opt| opt.is_some())
108 }
108 }
109
109
110 /// find length of shortest hex nodeid of a binary ID
110 /// find length of shortest hex nodeid of a binary ID
111 def shortest(&self, node: PyBytes) -> PyResult<usize> {
111 def shortest(&self, node: PyBytes) -> PyResult<usize> {
112 let opt = self.get_nodetree(py)?.borrow();
112 let opt = self.get_nodetree(py)?.borrow();
113 let nt = opt.as_ref().unwrap();
113 let nt = opt.as_ref().unwrap();
114 let idx = &*self.index(py).borrow();
114 let idx = &*self.index(py).borrow();
115 match nt.unique_prefix_len_node(idx, &node_from_py_bytes(py, &node)?)
115 match nt.unique_prefix_len_node(idx, &node_from_py_bytes(py, &node)?)
116 {
116 {
117 Ok(Some(l)) => Ok(l),
117 Ok(Some(l)) => Ok(l),
118 Ok(None) => Err(revlog_error(py)),
118 Ok(None) => Err(revlog_error(py)),
119 Err(e) => Err(nodemap_error(py, e)),
119 Err(e) => Err(nodemap_error(py, e)),
120 }
120 }
121 }
121 }
122
122
123 def partialmatch(&self, node: PyObject) -> PyResult<Option<PyBytes>> {
123 def partialmatch(&self, node: PyObject) -> PyResult<Option<PyBytes>> {
124 let opt = self.get_nodetree(py)?.borrow();
124 let opt = self.get_nodetree(py)?.borrow();
125 let nt = opt.as_ref().unwrap();
125 let nt = opt.as_ref().unwrap();
126 let idx = &*self.index(py).borrow();
126 let idx = &*self.index(py).borrow();
127
127
128 let node_as_string = if cfg!(feature = "python3-sys") {
128 let node_as_string = if cfg!(feature = "python3-sys") {
129 node.cast_as::<PyString>(py)?.to_string(py)?.to_string()
129 node.cast_as::<PyString>(py)?.to_string(py)?.to_string()
130 }
130 }
131 else {
131 else {
132 let node = node.extract::<PyBytes>(py)?;
132 let node = node.extract::<PyBytes>(py)?;
133 String::from_utf8_lossy(node.data(py)).to_string()
133 String::from_utf8_lossy(node.data(py)).to_string()
134 };
134 };
135
135
136 let prefix = NodePrefix::from_hex(&node_as_string)
136 let prefix = NodePrefix::from_hex(&node_as_string)
137 .map_err(|_| PyErr::new::<ValueError, _>(
137 .map_err(|_| PyErr::new::<ValueError, _>(
138 py, format!("Invalid node or prefix '{}'", node_as_string))
138 py, format!("Invalid node or prefix '{}'", node_as_string))
139 )?;
139 )?;
140
140
141 nt.find_bin(idx, prefix)
141 nt.find_bin(idx, prefix)
142 // TODO make an inner API returning the node directly
142 // TODO make an inner API returning the node directly
143 .map(|opt| opt.map(
143 .map(|opt| opt.map(
144 |rev| PyBytes::new(py, idx.node(rev).unwrap().as_bytes())))
144 |rev| PyBytes::new(py, idx.node(rev).unwrap().as_bytes())))
145 .map_err(|e| nodemap_error(py, e))
145 .map_err(|e| nodemap_error(py, e))
146
146
147 }
147 }
148
148
149 /// append an index entry
149 /// append an index entry
150 def append(&self, tup: PyTuple) -> PyResult<PyObject> {
150 def append(&self, tup: PyTuple) -> PyResult<PyObject> {
151 if tup.len(py) < 8 {
151 if tup.len(py) < 8 {
152 // this is better than the panic promised by tup.get_item()
152 // this is better than the panic promised by tup.get_item()
153 return Err(
153 return Err(
154 PyErr::new::<IndexError, _>(py, "tuple index out of range"))
154 PyErr::new::<IndexError, _>(py, "tuple index out of range"))
155 }
155 }
156 let node_bytes = tup.get_item(py, 7).extract(py)?;
156 let node_bytes = tup.get_item(py, 7).extract(py)?;
157 let node = node_from_py_object(py, &node_bytes)?;
157 let node = node_from_py_object(py, &node_bytes)?;
158
158
159 let rev = self.len(py)? as BaseRevision;
159 let rev = self.len(py)? as BaseRevision;
160 let mut idx = self.cindex(py).borrow_mut();
160 let mut idx = self.cindex(py).borrow_mut();
161
161
162 // This is ok since we will just add the revision to the index
162 // This is ok since we will just add the revision to the index
163 let rev = Revision(rev);
163 let rev = Revision(rev);
164 idx.append(py, tup.clone_ref(py))?;
164 idx.append(py, tup.clone_ref(py))?;
165 self.index(py)
165 self.index(py)
166 .borrow_mut()
166 .borrow_mut()
167 .append(py_tuple_to_revision_data_params(py, tup)?)
167 .append(py_tuple_to_revision_data_params(py, tup)?)
168 .unwrap();
168 .unwrap();
169 self.get_nodetree(py)?.borrow_mut().as_mut().unwrap()
169 self.get_nodetree(py)?.borrow_mut().as_mut().unwrap()
170 .insert(&*idx, &node, rev)
170 .insert(&*idx, &node, rev)
171 .map_err(|e| nodemap_error(py, e))?;
171 .map_err(|e| nodemap_error(py, e))?;
172 Ok(py.None())
172 Ok(py.None())
173 }
173 }
174
174
175 def __delitem__(&self, key: PyObject) -> PyResult<()> {
175 def __delitem__(&self, key: PyObject) -> PyResult<()> {
176 // __delitem__ is both for `del idx[r]` and `del idx[r1:r2]`
176 // __delitem__ is both for `del idx[r]` and `del idx[r1:r2]`
177 self.cindex(py).borrow().inner().del_item(py, &key)?;
177 self.cindex(py).borrow().inner().del_item(py, &key)?;
178 let start = key.getattr(py, "start")?;
178 let start = key.getattr(py, "start")?;
179 let start = UncheckedRevision(start.extract(py)?);
179 let start = UncheckedRevision(start.extract(py)?);
180 let start = self.index(py)
180 let start = self.index(py)
181 .borrow()
181 .borrow()
182 .check_revision(start)
182 .check_revision(start)
183 .ok_or_else(|| {
183 .ok_or_else(|| {
184 nodemap_error(py, NodeMapError::RevisionNotInIndex(start))
184 nodemap_error(py, NodeMapError::RevisionNotInIndex(start))
185 })?;
185 })?;
186 self.index(py).borrow_mut().remove(start).unwrap();
186 self.index(py).borrow_mut().remove(start).unwrap();
187 let mut opt = self.get_nodetree(py)?.borrow_mut();
187 let mut opt = self.get_nodetree(py)?.borrow_mut();
188 let nt = opt.as_mut().unwrap();
188 let nt = opt.as_mut().unwrap();
189 nt.invalidate_all();
189 nt.invalidate_all();
190 self.fill_nodemap(py, nt)?;
190 self.fill_nodemap(py, nt)?;
191 Ok(())
191 Ok(())
192 }
192 }
193
193
194 //
194 //
195 // Reforwarded C index API
195 // Reforwarded C index API
196 //
196 //
197
197
198 // index_methods (tp_methods). Same ordering as in revlog.c
198 // index_methods (tp_methods). Same ordering as in revlog.c
199
199
200 /// return the gca set of the given revs
200 /// return the gca set of the given revs
201 def ancestors(&self, *args, **kw) -> PyResult<PyObject> {
201 def ancestors(&self, *args, **kw) -> PyResult<PyObject> {
202 let rust_res = self.inner_ancestors(py, args)?;
202 let rust_res = self.inner_ancestors(py, args)?;
203
203
204 let c_res = self.call_cindex(py, "ancestors", args, kw)?;
204 let c_res = self.call_cindex(py, "ancestors", args, kw)?;
205 // the algorithm should always provide the results in reverse ordering
205 // the algorithm should always provide the results in reverse ordering
206 assert_py_eq(py, "ancestors", &rust_res, &c_res)?;
206 assert_py_eq(py, "ancestors", &rust_res, &c_res)?;
207
207
208 Ok(rust_res)
208 Ok(rust_res)
209 }
209 }
210
210
211 /// return the heads of the common ancestors of the given revs
211 /// return the heads of the common ancestors of the given revs
212 def commonancestorsheads(&self, *args, **kw) -> PyResult<PyObject> {
212 def commonancestorsheads(&self, *args, **kw) -> PyResult<PyObject> {
213 let rust_res = self.inner_commonancestorsheads(py, args)?;
213 let rust_res = self.inner_commonancestorsheads(py, args)?;
214
214
215 let c_res = self.call_cindex(py, "commonancestorsheads", args, kw)?;
215 let c_res = self.call_cindex(py, "commonancestorsheads", args, kw)?;
216 // the algorithm should always provide the results in reverse ordering
216 // the algorithm should always provide the results in reverse ordering
217 assert_py_eq(py, "commonancestorsheads", &rust_res, &c_res)?;
217 assert_py_eq(py, "commonancestorsheads", &rust_res, &c_res)?;
218
218
219 Ok(rust_res)
219 Ok(rust_res)
220 }
220 }
221
221
222 /// Clear the index caches and inner py_class data.
222 /// Clear the index caches and inner py_class data.
223 /// It is Python's responsibility to call `update_nodemap_data` again.
223 /// It is Python's responsibility to call `update_nodemap_data` again.
224 def clearcaches(&self, *args, **kw) -> PyResult<PyObject> {
224 def clearcaches(&self, *args, **kw) -> PyResult<PyObject> {
225 self.nt(py).borrow_mut().take();
225 self.nt(py).borrow_mut().take();
226 self.docket(py).borrow_mut().take();
226 self.docket(py).borrow_mut().take();
227 self.nodemap_mmap(py).borrow_mut().take();
227 self.nodemap_mmap(py).borrow_mut().take();
228 self.index(py).borrow_mut().clear_caches();
228 self.index(py).borrow_mut().clear_caches();
229 self.call_cindex(py, "clearcaches", args, kw)
229 self.call_cindex(py, "clearcaches", args, kw)
230 }
230 }
231
231
232 /// return the raw binary string representing a revision
232 /// return the raw binary string representing a revision
233 def entry_binary(&self, *args, **kw) -> PyResult<PyObject> {
233 def entry_binary(&self, *args, **kw) -> PyResult<PyObject> {
234 let rindex = self.index(py).borrow();
234 let rindex = self.index(py).borrow();
235 let rev = UncheckedRevision(args.get_item(py, 0).extract(py)?);
235 let rev = UncheckedRevision(args.get_item(py, 0).extract(py)?);
236 let rust_bytes = rindex.check_revision(rev).and_then(
236 let rust_bytes = rindex.check_revision(rev).and_then(
237 |r| rindex.entry_binary(r))
237 |r| rindex.entry_binary(r))
238 .ok_or_else(|| rev_not_in_index(py, rev))?;
238 .ok_or_else(|| rev_not_in_index(py, rev))?;
239 let rust_res = PyBytes::new(py, rust_bytes).into_object();
239 let rust_res = PyBytes::new(py, rust_bytes).into_object();
240
240
241 let c_res = self.call_cindex(py, "entry_binary", args, kw)?;
241 let c_res = self.call_cindex(py, "entry_binary", args, kw)?;
242 assert_py_eq(py, "entry_binary", &rust_res, &c_res)?;
242 assert_py_eq(py, "entry_binary", &rust_res, &c_res)?;
243 Ok(rust_res)
243 Ok(rust_res)
244 }
244 }
245
245
246 /// return a binary packed version of the header
246 /// return a binary packed version of the header
247 def pack_header(&self, *args, **kw) -> PyResult<PyObject> {
247 def pack_header(&self, *args, **kw) -> PyResult<PyObject> {
248 let rindex = self.index(py).borrow();
248 let rindex = self.index(py).borrow();
249 let packed = rindex.pack_header(args.get_item(py, 0).extract(py)?);
249 let packed = rindex.pack_header(args.get_item(py, 0).extract(py)?);
250 let rust_res = PyBytes::new(py, &packed).into_object();
250 let rust_res = PyBytes::new(py, &packed).into_object();
251
251
252 let c_res = self.call_cindex(py, "pack_header", args, kw)?;
252 let c_res = self.call_cindex(py, "pack_header", args, kw)?;
253 assert_py_eq(py, "pack_header", &rust_res, &c_res)?;
253 assert_py_eq(py, "pack_header", &rust_res, &c_res)?;
254 Ok(rust_res)
254 Ok(rust_res)
255 }
255 }
256
256
257 /// compute phases
257 /// compute phases
258 def computephasesmapsets(&self, *args, **kw) -> PyResult<PyObject> {
258 def computephasesmapsets(&self, *args, **kw) -> PyResult<PyObject> {
259 let py_roots = args.get_item(py, 0).extract::<PyDict>(py)?;
259 let py_roots = args.get_item(py, 0).extract::<PyDict>(py)?;
260 let rust_res = self.inner_computephasesmapsets(py, py_roots)?;
260 let rust_res = self.inner_computephasesmapsets(py, py_roots)?;
261
261
262 let c_res = self.call_cindex(py, "computephasesmapsets", args, kw)?;
262 let c_res = self.call_cindex(py, "computephasesmapsets", args, kw)?;
263 assert_py_eq(py, "computephasesmapsets", &rust_res, &c_res)?;
263 assert_py_eq(py, "computephasesmapsets", &rust_res, &c_res)?;
264 Ok(rust_res)
264 Ok(rust_res)
265 }
265 }
266
266
267 /// reachableroots
267 /// reachableroots
268 def reachableroots2(&self, *args, **kw) -> PyResult<PyObject> {
268 def reachableroots2(&self, *args, **kw) -> PyResult<PyObject> {
269 let rust_res = self.inner_reachableroots2(
269 let rust_res = self.inner_reachableroots2(
270 py,
270 py,
271 UncheckedRevision(args.get_item(py, 0).extract(py)?),
271 UncheckedRevision(args.get_item(py, 0).extract(py)?),
272 args.get_item(py, 1),
272 args.get_item(py, 1),
273 args.get_item(py, 2),
273 args.get_item(py, 2),
274 args.get_item(py, 3).extract(py)?,
274 args.get_item(py, 3).extract(py)?,
275 )?;
275 )?;
276
276
277 let c_res = self.call_cindex(py, "reachableroots2", args, kw)?;
277 let c_res = self.call_cindex(py, "reachableroots2", args, kw)?;
278 // ordering of C result depends on how the computation went, and
278 // ordering of C result depends on how the computation went, and
279 // Rust result ordering is arbitrary. Hence we compare after
279 // Rust result ordering is arbitrary. Hence we compare after
280 // sorting the results (in Python to avoid reconverting everything
280 // sorting the results (in Python to avoid reconverting everything
281 // back to Rust structs).
281 // back to Rust structs).
282 assert_py_eq_normalized(py, "reachableroots2", &rust_res, &c_res,
282 assert_py_eq_normalized(py, "reachableroots2", &rust_res, &c_res,
283 |v| format!("sorted({})", v))?;
283 |v| format!("sorted({})", v))?;
284
284
285 Ok(rust_res)
285 Ok(rust_res)
286 }
286 }
287
287
288 /// get head revisions
288 /// get head revisions
289 def headrevs(&self, *args, **kw) -> PyResult<PyObject> {
289 def headrevs(&self, *args, **kw) -> PyResult<PyObject> {
290 let rust_res = self.inner_headrevs(py)?;
290 let rust_res = self.inner_headrevs(py)?;
291
291
292 let c_res = self.call_cindex(py, "headrevs", args, kw)?;
292 let c_res = self.call_cindex(py, "headrevs", args, kw)?;
293 assert_py_eq(py, "headrevs", &rust_res, &c_res)?;
293 assert_py_eq(py, "headrevs", &rust_res, &c_res)?;
294 Ok(rust_res)
294 Ok(rust_res)
295 }
295 }
296
296
297 /// get filtered head revisions
297 /// get filtered head revisions
298 def headrevsfiltered(&self, *args, **kw) -> PyResult<PyObject> {
298 def headrevsfiltered(&self, *args, **kw) -> PyResult<PyObject> {
299 let rust_res = self.inner_headrevsfiltered(py, &args.get_item(py, 0))?;
299 let rust_res = self.inner_headrevsfiltered(py, &args.get_item(py, 0))?;
300 let c_res = self.call_cindex(py, "headrevsfiltered", args, kw)?;
300 let c_res = self.call_cindex(py, "headrevsfiltered", args, kw)?;
301
301
302 assert_py_eq(py, "headrevsfiltered", &rust_res, &c_res)?;
302 assert_py_eq(py, "headrevsfiltered", &rust_res, &c_res)?;
303 Ok(rust_res)
303 Ok(rust_res)
304 }
304 }
305
305
306 /// True if the object is a snapshot
306 /// True if the object is a snapshot
307 def issnapshot(&self, *args, **kw) -> PyResult<bool> {
307 def issnapshot(&self, *args, **kw) -> PyResult<bool> {
308 let index = self.index(py).borrow();
308 let index = self.index(py).borrow();
309 let result = index
309 let result = index
310 .is_snapshot(UncheckedRevision(args.get_item(py, 0).extract(py)?))
310 .is_snapshot(UncheckedRevision(args.get_item(py, 0).extract(py)?))
311 .map_err(|e| {
311 .map_err(|e| {
312 PyErr::new::<cpython::exc::ValueError, _>(py, e.to_string())
312 PyErr::new::<cpython::exc::ValueError, _>(py, e.to_string())
313 })?;
313 })?;
314 let cresult = self.call_cindex(py, "issnapshot", args, kw)?;
314 let cresult = self.call_cindex(py, "issnapshot", args, kw)?;
315 assert_eq!(result, cresult.extract(py)?);
315 assert_eq!(result, cresult.extract(py)?);
316 Ok(result)
316 Ok(result)
317 }
317 }
318
318
319 /// Gather snapshot data in a cache dict
319 /// Gather snapshot data in a cache dict
320 def findsnapshots(&self, *args, **kw) -> PyResult<PyObject> {
320 def findsnapshots(&self, *args, **kw) -> PyResult<PyObject> {
321 let index = self.index(py).borrow();
321 let index = self.index(py).borrow();
322 let cache: PyDict = args.get_item(py, 0).extract(py)?;
322 let cache: PyDict = args.get_item(py, 0).extract(py)?;
323 // this methods operates by setting new values in the cache,
323 // this methods operates by setting new values in the cache,
324 // hence we will compare results by letting the C implementation
324 // hence we will compare results by letting the C implementation
325 // operate over a deepcopy of the cache, and finally compare both
325 // operate over a deepcopy of the cache, and finally compare both
326 // caches.
326 // caches.
327 let c_cache = PyDict::new(py);
327 let c_cache = PyDict::new(py);
328 for (k, v) in cache.items(py) {
328 for (k, v) in cache.items(py) {
329 c_cache.set_item(py, k, PySet::new(py, v)?)?;
329 c_cache.set_item(py, k, PySet::new(py, v)?)?;
330 }
330 }
331
331
332 let start_rev = UncheckedRevision(args.get_item(py, 1).extract(py)?);
332 let start_rev = UncheckedRevision(args.get_item(py, 1).extract(py)?);
333 let end_rev = UncheckedRevision(args.get_item(py, 2).extract(py)?);
333 let end_rev = UncheckedRevision(args.get_item(py, 2).extract(py)?);
334 let mut cache_wrapper = PySnapshotsCache{ py, dict: cache };
334 let mut cache_wrapper = PySnapshotsCache{ py, dict: cache };
335 index.find_snapshots(
335 index.find_snapshots(
336 start_rev,
336 start_rev,
337 end_rev,
337 end_rev,
338 &mut cache_wrapper,
338 &mut cache_wrapper,
339 ).map_err(|_| revlog_error(py))?;
339 ).map_err(|_| revlog_error(py))?;
340
340
341 let c_args = PyTuple::new(
341 let c_args = PyTuple::new(
342 py,
342 py,
343 &[
343 &[
344 c_cache.clone_ref(py).into_object(),
344 c_cache.clone_ref(py).into_object(),
345 args.get_item(py, 1),
345 args.get_item(py, 1),
346 args.get_item(py, 2)
346 args.get_item(py, 2)
347 ]
347 ]
348 );
348 );
349 self.call_cindex(py, "findsnapshots", &c_args, kw)?;
349 self.call_cindex(py, "findsnapshots", &c_args, kw)?;
350 assert_py_eq(py, "findsnapshots cache",
350 assert_py_eq(py, "findsnapshots cache",
351 &cache_wrapper.into_object(),
351 &cache_wrapper.into_object(),
352 &c_cache.into_object())?;
352 &c_cache.into_object())?;
353 Ok(py.None())
353 Ok(py.None())
354 }
354 }
355
355
356 /// determine revisions with deltas to reconstruct fulltext
356 /// determine revisions with deltas to reconstruct fulltext
357 def deltachain(&self, *args, **kw) -> PyResult<PyObject> {
357 def deltachain(&self, *args, **kw) -> PyResult<PyObject> {
358 let index = self.index(py).borrow();
358 let index = self.index(py).borrow();
359 let rev = args.get_item(py, 0).extract::<BaseRevision>(py)?.into();
359 let rev = args.get_item(py, 0).extract::<BaseRevision>(py)?.into();
360 let stop_rev =
360 let stop_rev =
361 args.get_item(py, 1).extract::<Option<BaseRevision>>(py)?;
361 args.get_item(py, 1).extract::<Option<BaseRevision>>(py)?;
362 let rev = index.check_revision(rev).ok_or_else(|| {
362 let rev = index.check_revision(rev).ok_or_else(|| {
363 nodemap_error(py, NodeMapError::RevisionNotInIndex(rev))
363 nodemap_error(py, NodeMapError::RevisionNotInIndex(rev))
364 })?;
364 })?;
365 let stop_rev = if let Some(stop_rev) = stop_rev {
365 let stop_rev = if let Some(stop_rev) = stop_rev {
366 let stop_rev = UncheckedRevision(stop_rev);
366 let stop_rev = UncheckedRevision(stop_rev);
367 Some(index.check_revision(stop_rev).ok_or_else(|| {
367 Some(index.check_revision(stop_rev).ok_or_else(|| {
368 nodemap_error(py, NodeMapError::RevisionNotInIndex(stop_rev))
368 nodemap_error(py, NodeMapError::RevisionNotInIndex(stop_rev))
369 })?)
369 })?)
370 } else {None};
370 } else {None};
371 let (chain, stopped) = index.delta_chain(rev, stop_rev).map_err(|e| {
371 let (chain, stopped) = index.delta_chain(rev, stop_rev).map_err(|e| {
372 PyErr::new::<cpython::exc::ValueError, _>(py, e.to_string())
372 PyErr::new::<cpython::exc::ValueError, _>(py, e.to_string())
373 })?;
373 })?;
374
374
375 let cresult = self.call_cindex(py, "deltachain", args, kw)?;
375 let cresult = self.call_cindex(py, "deltachain", args, kw)?;
376 let cchain: Vec<BaseRevision> =
376 let cchain: Vec<BaseRevision> =
377 cresult.get_item(py, 0)?.extract::<Vec<BaseRevision>>(py)?;
377 cresult.get_item(py, 0)?.extract::<Vec<BaseRevision>>(py)?;
378 let chain: Vec<_> = chain.into_iter().map(|r| r.0).collect();
378 let chain: Vec<_> = chain.into_iter().map(|r| r.0).collect();
379 assert_eq!(chain, cchain);
379 assert_eq!(chain, cchain);
380 assert_eq!(stopped, cresult.get_item(py, 1)?.extract(py)?);
380 assert_eq!(stopped, cresult.get_item(py, 1)?.extract(py)?);
381
381
382 Ok(
382 Ok(
383 PyTuple::new(
383 PyTuple::new(
384 py,
384 py,
385 &[
385 &[
386 chain.into_py_object(py).into_object(),
386 chain.into_py_object(py).into_object(),
387 stopped.into_py_object(py).into_object()
387 stopped.into_py_object(py).into_object()
388 ]
388 ]
389 ).into_object()
389 ).into_object()
390 )
390 )
391
391
392 }
392 }
393
393
394 /// slice planned chunk read to reach a density threshold
394 /// slice planned chunk read to reach a density threshold
395 def slicechunktodensity(&self, *args, **kw) -> PyResult<PyObject> {
395 def slicechunktodensity(&self, *args, **kw) -> PyResult<PyObject> {
396 let rust_res = self.inner_slicechunktodensity(
396 let rust_res = self.inner_slicechunktodensity(
397 py,
397 py,
398 args.get_item(py, 0),
398 args.get_item(py, 0),
399 args.get_item(py, 1).extract(py)?,
399 args.get_item(py, 1).extract(py)?,
400 args.get_item(py, 2).extract(py)?
400 args.get_item(py, 2).extract(py)?
401 )?;
401 )?;
402
402
403 let c_res = self.call_cindex(py, "slicechunktodensity", args, kw)?;
403 let c_res = self.call_cindex(py, "slicechunktodensity", args, kw)?;
404 assert_py_eq(py, "slicechunktodensity", &rust_res, &c_res)?;
404 assert_py_eq(py, "slicechunktodensity", &rust_res, &c_res)?;
405 Ok(rust_res)
405 Ok(rust_res)
406 }
406 }
407
407
408 // index_sequence_methods and index_mapping_methods.
408 // index_sequence_methods and index_mapping_methods.
409 //
409 //
410 // Since we call back through the high level Python API,
410 // Since we call back through the high level Python API,
411 // there's no point making a distinction between index_get
411 // there's no point making a distinction between index_get
412 // and index_getitem.
412 // and index_getitem.
413 // gracinet 2023: this above is no longer true for the pure Rust impl
413 // gracinet 2023: this above is no longer true for the pure Rust impl
414
414
415 def __len__(&self) -> PyResult<usize> {
415 def __len__(&self) -> PyResult<usize> {
416 self.len(py)
416 self.len(py)
417 }
417 }
418
418
419 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
419 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
420 let rust_res = self.inner_getitem(py, key.clone_ref(py))?;
420 let rust_res = self.inner_getitem(py, key.clone_ref(py))?;
421
421
422 // this conversion seems needless, but that's actually because
422 // this conversion seems needless, but that's actually because
423 // `index_getitem` does not handle conversion from PyLong,
423 // `index_getitem` does not handle conversion from PyLong,
424 // which expressions such as [e for e in index] internally use.
424 // which expressions such as [e for e in index] internally use.
425 // Note that we don't seem to have a direct way to call
425 // Note that we don't seem to have a direct way to call
426 // PySequence_GetItem (does the job), which would possibly be better
426 // PySequence_GetItem (does the job), which would possibly be better
427 // for performance
427 // for performance
428 // gracinet 2023: the above comment can be removed when we use
428 // gracinet 2023: the above comment can be removed when we use
429 // the pure Rust impl only. Note also that `key` can be a binary
429 // the pure Rust impl only. Note also that `key` can be a binary
430 // node id.
430 // node id.
431 let c_key = match key.extract::<BaseRevision>(py) {
431 let c_key = match key.extract::<BaseRevision>(py) {
432 Ok(rev) => rev.to_py_object(py).into_object(),
432 Ok(rev) => rev.to_py_object(py).into_object(),
433 Err(_) => key,
433 Err(_) => key,
434 };
434 };
435 let c_res = self.cindex(py).borrow().inner().get_item(py, c_key)?;
435 let c_res = self.cindex(py).borrow().inner().get_item(py, c_key)?;
436
436
437 assert_py_eq(py, "__getitem__", &rust_res, &c_res)?;
437 assert_py_eq(py, "__getitem__", &rust_res, &c_res)?;
438 Ok(rust_res)
438 Ok(rust_res)
439 }
439 }
440
440
441 def __contains__(&self, item: PyObject) -> PyResult<bool> {
441 def __contains__(&self, item: PyObject) -> PyResult<bool> {
442 // ObjectProtocol does not seem to provide contains(), so
442 // ObjectProtocol does not seem to provide contains(), so
443 // this is an equivalent implementation of the index_contains()
443 // this is an equivalent implementation of the index_contains()
444 // defined in revlog.c
444 // defined in revlog.c
445 let cindex = self.cindex(py).borrow();
445 let cindex = self.cindex(py).borrow();
446 match item.extract::<i32>(py) {
446 match item.extract::<i32>(py) {
447 Ok(rev) => {
447 Ok(rev) => {
448 Ok(rev >= -1 && rev < self.len(py)? as BaseRevision)
448 Ok(rev >= -1 && rev < self.len(py)? as BaseRevision)
449 }
449 }
450 Err(_) => {
450 Err(_) => {
451 let item_bytes: PyBytes = item.extract(py)?;
451 let item_bytes: PyBytes = item.extract(py)?;
452 let rust_res = self.has_node(py, item_bytes)?;
452 let rust_res = self.has_node(py, item_bytes)?;
453
453
454 let c_res = cindex.inner().call_method(
454 let c_res = cindex.inner().call_method(
455 py,
455 py,
456 "has_node",
456 "has_node",
457 PyTuple::new(py, &[item.clone_ref(py)]),
457 PyTuple::new(py, &[item.clone_ref(py)]),
458 None)?
458 None)?
459 .extract(py)?;
459 .extract(py)?;
460
460
461 assert_eq!(rust_res, c_res);
461 assert_eq!(rust_res, c_res);
462 Ok(rust_res)
462 Ok(rust_res)
463 }
463 }
464 }
464 }
465 }
465 }
466
466
467 def nodemap_data_all(&self) -> PyResult<PyBytes> {
467 def nodemap_data_all(&self) -> PyResult<PyBytes> {
468 self.inner_nodemap_data_all(py)
468 self.inner_nodemap_data_all(py)
469 }
469 }
470
470
471 def nodemap_data_incremental(&self) -> PyResult<PyObject> {
471 def nodemap_data_incremental(&self) -> PyResult<PyObject> {
472 self.inner_nodemap_data_incremental(py)
472 self.inner_nodemap_data_incremental(py)
473 }
473 }
474 def update_nodemap_data(
474 def update_nodemap_data(
475 &self,
475 &self,
476 docket: PyObject,
476 docket: PyObject,
477 nm_data: PyObject
477 nm_data: PyObject
478 ) -> PyResult<PyObject> {
478 ) -> PyResult<PyObject> {
479 self.inner_update_nodemap_data(py, docket, nm_data)
479 self.inner_update_nodemap_data(py, docket, nm_data)
480 }
480 }
481
481
482 @property
482 @property
483 def entry_size(&self) -> PyResult<PyInt> {
483 def entry_size(&self) -> PyResult<PyInt> {
484 let rust_res: PyInt = INDEX_ENTRY_SIZE.to_py_object(py);
484 let rust_res: PyInt = INDEX_ENTRY_SIZE.to_py_object(py);
485
485
486 let c_res = self.cindex(py).borrow().inner()
486 let c_res = self.cindex(py).borrow().inner()
487 .getattr(py, "entry_size")?;
487 .getattr(py, "entry_size")?;
488 assert_py_eq(py, "entry_size", rust_res.as_object(), &c_res)?;
488 assert_py_eq(py, "entry_size", rust_res.as_object(), &c_res)?;
489
489
490 Ok(rust_res)
490 Ok(rust_res)
491 }
491 }
492
492
493 @property
493 @property
494 def rust_ext_compat(&self) -> PyResult<PyInt> {
494 def rust_ext_compat(&self) -> PyResult<PyInt> {
495 // will be entirely removed when the Rust index yet useful to
495 // will be entirely removed when the Rust index yet useful to
496 // implement in Rust to detangle things when removing `self.cindex`
496 // implement in Rust to detangle things when removing `self.cindex`
497 let rust_res: PyInt = 1.to_py_object(py);
497 let rust_res: PyInt = 1.to_py_object(py);
498
498
499 let c_res = self.cindex(py).borrow().inner()
499 let c_res = self.cindex(py).borrow().inner()
500 .getattr(py, "rust_ext_compat")?;
500 .getattr(py, "rust_ext_compat")?;
501 assert_py_eq(py, "rust_ext_compat", rust_res.as_object(), &c_res)?;
501 assert_py_eq(py, "rust_ext_compat", rust_res.as_object(), &c_res)?;
502
502
503 Ok(rust_res)
503 Ok(rust_res)
504 }
504 }
505
505
506 });
506 });
507
507
508 /// Take a (potentially) mmap'ed buffer, and return the underlying Python
508 /// Take a (potentially) mmap'ed buffer, and return the underlying Python
509 /// buffer along with the Rust slice into said buffer. We need to keep the
509 /// buffer along with the Rust slice into said buffer. We need to keep the
510 /// Python buffer around, otherwise we'd get a dangling pointer once the buffer
510 /// Python buffer around, otherwise we'd get a dangling pointer once the buffer
511 /// is freed from Python's side.
511 /// is freed from Python's side.
512 ///
512 ///
513 /// # Safety
513 /// # Safety
514 ///
514 ///
515 /// The caller must make sure that the buffer is kept around for at least as
515 /// The caller must make sure that the buffer is kept around for at least as
516 /// long as the slice.
516 /// long as the slice.
517 #[deny(unsafe_op_in_unsafe_fn)]
517 #[deny(unsafe_op_in_unsafe_fn)]
518 unsafe fn mmap_keeparound(
518 unsafe fn mmap_keeparound(
519 py: Python,
519 py: Python,
520 data: PyObject,
520 data: PyObject,
521 ) -> PyResult<(
521 ) -> PyResult<(
522 PyBuffer,
522 PyBuffer,
523 Box<dyn std::ops::Deref<Target = [u8]> + Send + 'static>,
523 Box<dyn std::ops::Deref<Target = [u8]> + Send + Sync + 'static>,
524 )> {
524 )> {
525 let buf = PyBuffer::get(py, &data)?;
525 let buf = PyBuffer::get(py, &data)?;
526 let len = buf.item_count();
526 let len = buf.item_count();
527
527
528 // Build a slice from the mmap'ed buffer data
528 // Build a slice from the mmap'ed buffer data
529 let cbuf = buf.buf_ptr();
529 let cbuf = buf.buf_ptr();
530 let bytes = if std::mem::size_of::<u8>() == buf.item_size()
530 let bytes = if std::mem::size_of::<u8>() == buf.item_size()
531 && buf.is_c_contiguous()
531 && buf.is_c_contiguous()
532 && u8::is_compatible_format(buf.format())
532 && u8::is_compatible_format(buf.format())
533 {
533 {
534 unsafe { std::slice::from_raw_parts(cbuf as *const u8, len) }
534 unsafe { std::slice::from_raw_parts(cbuf as *const u8, len) }
535 } else {
535 } else {
536 return Err(PyErr::new::<ValueError, _>(
536 return Err(PyErr::new::<ValueError, _>(
537 py,
537 py,
538 "Nodemap data buffer has an invalid memory representation"
538 "Nodemap data buffer has an invalid memory representation"
539 .to_string(),
539 .to_string(),
540 ));
540 ));
541 };
541 };
542
542
543 Ok((buf, Box::new(bytes)))
543 Ok((buf, Box::new(bytes)))
544 }
544 }
545
545
546 fn py_tuple_to_revision_data_params(
546 fn py_tuple_to_revision_data_params(
547 py: Python,
547 py: Python,
548 tuple: PyTuple,
548 tuple: PyTuple,
549 ) -> PyResult<RevisionDataParams> {
549 ) -> PyResult<RevisionDataParams> {
550 if tuple.len(py) < 8 {
550 if tuple.len(py) < 8 {
551 // this is better than the panic promised by tup.get_item()
551 // this is better than the panic promised by tup.get_item()
552 return Err(PyErr::new::<IndexError, _>(
552 return Err(PyErr::new::<IndexError, _>(
553 py,
553 py,
554 "tuple index out of range",
554 "tuple index out of range",
555 ));
555 ));
556 }
556 }
557 let offset_or_flags: u64 = tuple.get_item(py, 0).extract(py)?;
557 let offset_or_flags: u64 = tuple.get_item(py, 0).extract(py)?;
558 let node_id = tuple
558 let node_id = tuple
559 .get_item(py, 7)
559 .get_item(py, 7)
560 .extract::<PyBytes>(py)?
560 .extract::<PyBytes>(py)?
561 .data(py)
561 .data(py)
562 .try_into()
562 .try_into()
563 .unwrap();
563 .unwrap();
564 let flags = (offset_or_flags & 0xFFFF) as u16;
564 let flags = (offset_or_flags & 0xFFFF) as u16;
565 let data_offset = offset_or_flags >> 16;
565 let data_offset = offset_or_flags >> 16;
566 Ok(RevisionDataParams {
566 Ok(RevisionDataParams {
567 flags,
567 flags,
568 data_offset,
568 data_offset,
569 data_compressed_length: tuple.get_item(py, 1).extract(py)?,
569 data_compressed_length: tuple.get_item(py, 1).extract(py)?,
570 data_uncompressed_length: tuple.get_item(py, 2).extract(py)?,
570 data_uncompressed_length: tuple.get_item(py, 2).extract(py)?,
571 data_delta_base: tuple.get_item(py, 3).extract(py)?,
571 data_delta_base: tuple.get_item(py, 3).extract(py)?,
572 link_rev: tuple.get_item(py, 4).extract(py)?,
572 link_rev: tuple.get_item(py, 4).extract(py)?,
573 parent_rev_1: tuple.get_item(py, 5).extract(py)?,
573 parent_rev_1: tuple.get_item(py, 5).extract(py)?,
574 parent_rev_2: tuple.get_item(py, 6).extract(py)?,
574 parent_rev_2: tuple.get_item(py, 6).extract(py)?,
575 node_id,
575 node_id,
576 ..Default::default()
576 ..Default::default()
577 })
577 })
578 }
578 }
579 fn revision_data_params_to_py_tuple(
579 fn revision_data_params_to_py_tuple(
580 py: Python,
580 py: Python,
581 params: RevisionDataParams,
581 params: RevisionDataParams,
582 ) -> PyTuple {
582 ) -> PyTuple {
583 PyTuple::new(
583 PyTuple::new(
584 py,
584 py,
585 &[
585 &[
586 params.data_offset.into_py_object(py).into_object(),
586 params.data_offset.into_py_object(py).into_object(),
587 params
587 params
588 .data_compressed_length
588 .data_compressed_length
589 .into_py_object(py)
589 .into_py_object(py)
590 .into_object(),
590 .into_object(),
591 params
591 params
592 .data_uncompressed_length
592 .data_uncompressed_length
593 .into_py_object(py)
593 .into_py_object(py)
594 .into_object(),
594 .into_object(),
595 params.data_delta_base.into_py_object(py).into_object(),
595 params.data_delta_base.into_py_object(py).into_object(),
596 params.link_rev.into_py_object(py).into_object(),
596 params.link_rev.into_py_object(py).into_object(),
597 params.parent_rev_1.into_py_object(py).into_object(),
597 params.parent_rev_1.into_py_object(py).into_object(),
598 params.parent_rev_2.into_py_object(py).into_object(),
598 params.parent_rev_2.into_py_object(py).into_object(),
599 PyBytes::new(py, &params.node_id)
599 PyBytes::new(py, &params.node_id)
600 .into_py_object(py)
600 .into_py_object(py)
601 .into_object(),
601 .into_object(),
602 params._sidedata_offset.into_py_object(py).into_object(),
602 params._sidedata_offset.into_py_object(py).into_object(),
603 params
603 params
604 ._sidedata_compressed_length
604 ._sidedata_compressed_length
605 .into_py_object(py)
605 .into_py_object(py)
606 .into_object(),
606 .into_object(),
607 params
607 params
608 .data_compression_mode
608 .data_compression_mode
609 .into_py_object(py)
609 .into_py_object(py)
610 .into_object(),
610 .into_object(),
611 params
611 params
612 ._sidedata_compression_mode
612 ._sidedata_compression_mode
613 .into_py_object(py)
613 .into_py_object(py)
614 .into_object(),
614 .into_object(),
615 params._rank.into_py_object(py).into_object(),
615 params._rank.into_py_object(py).into_object(),
616 ],
616 ],
617 )
617 )
618 }
618 }
619
619
620 struct PySnapshotsCache<'p> {
620 struct PySnapshotsCache<'p> {
621 py: Python<'p>,
621 py: Python<'p>,
622 dict: PyDict,
622 dict: PyDict,
623 }
623 }
624
624
625 impl<'p> PySnapshotsCache<'p> {
625 impl<'p> PySnapshotsCache<'p> {
626 fn into_object(self) -> PyObject {
626 fn into_object(self) -> PyObject {
627 self.dict.into_object()
627 self.dict.into_object()
628 }
628 }
629 }
629 }
630
630
631 impl<'p> SnapshotsCache for PySnapshotsCache<'p> {
631 impl<'p> SnapshotsCache for PySnapshotsCache<'p> {
632 fn insert_for(
632 fn insert_for(
633 &mut self,
633 &mut self,
634 rev: BaseRevision,
634 rev: BaseRevision,
635 value: BaseRevision,
635 value: BaseRevision,
636 ) -> Result<(), RevlogError> {
636 ) -> Result<(), RevlogError> {
637 let pyvalue = value.into_py_object(self.py).into_object();
637 let pyvalue = value.into_py_object(self.py).into_object();
638 match self.dict.get_item(self.py, rev) {
638 match self.dict.get_item(self.py, rev) {
639 Some(obj) => obj
639 Some(obj) => obj
640 .extract::<PySet>(self.py)
640 .extract::<PySet>(self.py)
641 .and_then(|set| set.add(self.py, pyvalue)),
641 .and_then(|set| set.add(self.py, pyvalue)),
642 None => PySet::new(self.py, vec![pyvalue])
642 None => PySet::new(self.py, vec![pyvalue])
643 .and_then(|set| self.dict.set_item(self.py, rev, set)),
643 .and_then(|set| self.dict.set_item(self.py, rev, set)),
644 }
644 }
645 .map_err(|_| {
645 .map_err(|_| {
646 RevlogError::Other(HgError::unsupported(
646 RevlogError::Other(HgError::unsupported(
647 "Error in Python caches handling",
647 "Error in Python caches handling",
648 ))
648 ))
649 })
649 })
650 }
650 }
651 }
651 }
652
652
653 impl MixedIndex {
653 impl MixedIndex {
654 fn new(
654 fn new(
655 py: Python,
655 py: Python,
656 cindex: PyObject,
656 cindex: PyObject,
657 data: PyObject,
657 data: PyObject,
658 header: u32,
658 header: u32,
659 ) -> PyResult<MixedIndex> {
659 ) -> PyResult<MixedIndex> {
660 // Safety: we keep the buffer around inside the class as `index_mmap`
660 // Safety: we keep the buffer around inside the class as `index_mmap`
661 let (buf, bytes) = unsafe { mmap_keeparound(py, data)? };
661 let (buf, bytes) = unsafe { mmap_keeparound(py, data)? };
662
662
663 Self::create_instance(
663 Self::create_instance(
664 py,
664 py,
665 RefCell::new(cindex::Index::new(py, cindex)?),
665 RefCell::new(cindex::Index::new(py, cindex)?),
666 RefCell::new(
666 RefCell::new(
667 hg::index::Index::new(
667 hg::index::Index::new(
668 bytes,
668 bytes,
669 IndexHeader::parse(&header.to_be_bytes())
669 IndexHeader::parse(&header.to_be_bytes())
670 .expect("default header is broken")
670 .expect("default header is broken")
671 .unwrap(),
671 .unwrap(),
672 )
672 )
673 .map_err(|e| {
673 .map_err(|e| {
674 revlog_error_with_msg(py, e.to_string().as_bytes())
674 revlog_error_with_msg(py, e.to_string().as_bytes())
675 })?,
675 })?,
676 ),
676 ),
677 RefCell::new(None),
677 RefCell::new(None),
678 RefCell::new(None),
678 RefCell::new(None),
679 RefCell::new(None),
679 RefCell::new(None),
680 RefCell::new(Some(buf)),
680 RefCell::new(Some(buf)),
681 )
681 )
682 }
682 }
683
683
684 fn len(&self, py: Python) -> PyResult<usize> {
684 fn len(&self, py: Python) -> PyResult<usize> {
685 let rust_index_len = self.index(py).borrow().len();
685 let rust_index_len = self.index(py).borrow().len();
686 let cindex_len = self.cindex(py).borrow().inner().len(py)?;
686 let cindex_len = self.cindex(py).borrow().inner().len(py)?;
687 assert_eq!(rust_index_len, cindex_len);
687 assert_eq!(rust_index_len, cindex_len);
688 Ok(rust_index_len)
688 Ok(rust_index_len)
689 }
689 }
690
690
691 /// This is scaffolding at this point, but it could also become
691 /// This is scaffolding at this point, but it could also become
692 /// a way to start a persistent nodemap or perform a
692 /// a way to start a persistent nodemap or perform a
693 /// vacuum / repack operation
693 /// vacuum / repack operation
694 fn fill_nodemap(
694 fn fill_nodemap(
695 &self,
695 &self,
696 py: Python,
696 py: Python,
697 nt: &mut NodeTree,
697 nt: &mut NodeTree,
698 ) -> PyResult<PyObject> {
698 ) -> PyResult<PyObject> {
699 let index = self.index(py).borrow();
699 let index = self.index(py).borrow();
700 for r in 0..self.len(py)? {
700 for r in 0..self.len(py)? {
701 let rev = Revision(r as BaseRevision);
701 let rev = Revision(r as BaseRevision);
702 // in this case node() won't ever return None
702 // in this case node() won't ever return None
703 nt.insert(&*index, index.node(rev).unwrap(), rev)
703 nt.insert(&*index, index.node(rev).unwrap(), rev)
704 .map_err(|e| nodemap_error(py, e))?
704 .map_err(|e| nodemap_error(py, e))?
705 }
705 }
706 Ok(py.None())
706 Ok(py.None())
707 }
707 }
708
708
709 fn get_nodetree<'a>(
709 fn get_nodetree<'a>(
710 &'a self,
710 &'a self,
711 py: Python<'a>,
711 py: Python<'a>,
712 ) -> PyResult<&'a RefCell<Option<NodeTree>>> {
712 ) -> PyResult<&'a RefCell<Option<NodeTree>>> {
713 if self.nt(py).borrow().is_none() {
713 if self.nt(py).borrow().is_none() {
714 let readonly = Box::<Vec<_>>::default();
714 let readonly = Box::<Vec<_>>::default();
715 let mut nt = NodeTree::load_bytes(readonly, 0);
715 let mut nt = NodeTree::load_bytes(readonly, 0);
716 self.fill_nodemap(py, &mut nt)?;
716 self.fill_nodemap(py, &mut nt)?;
717 self.nt(py).borrow_mut().replace(nt);
717 self.nt(py).borrow_mut().replace(nt);
718 }
718 }
719 Ok(self.nt(py))
719 Ok(self.nt(py))
720 }
720 }
721
721
722 /// forward a method call to the underlying C index
722 /// forward a method call to the underlying C index
723 fn call_cindex(
723 fn call_cindex(
724 &self,
724 &self,
725 py: Python,
725 py: Python,
726 name: &str,
726 name: &str,
727 args: &PyTuple,
727 args: &PyTuple,
728 kwargs: Option<&PyDict>,
728 kwargs: Option<&PyDict>,
729 ) -> PyResult<PyObject> {
729 ) -> PyResult<PyObject> {
730 self.cindex(py)
730 self.cindex(py)
731 .borrow()
731 .borrow()
732 .inner()
732 .inner()
733 .call_method(py, name, args, kwargs)
733 .call_method(py, name, args, kwargs)
734 }
734 }
735
735
736 pub fn clone_cindex(&self, py: Python) -> cindex::Index {
736 pub fn clone_cindex(&self, py: Python) -> cindex::Index {
737 self.cindex(py).borrow().clone_ref(py)
737 self.cindex(py).borrow().clone_ref(py)
738 }
738 }
739
739
740 /// Returns the full nodemap bytes to be written as-is to disk
740 /// Returns the full nodemap bytes to be written as-is to disk
741 fn inner_nodemap_data_all(&self, py: Python) -> PyResult<PyBytes> {
741 fn inner_nodemap_data_all(&self, py: Python) -> PyResult<PyBytes> {
742 let nodemap = self.get_nodetree(py)?.borrow_mut().take().unwrap();
742 let nodemap = self.get_nodetree(py)?.borrow_mut().take().unwrap();
743 let (readonly, bytes) = nodemap.into_readonly_and_added_bytes();
743 let (readonly, bytes) = nodemap.into_readonly_and_added_bytes();
744
744
745 // If there's anything readonly, we need to build the data again from
745 // If there's anything readonly, we need to build the data again from
746 // scratch
746 // scratch
747 let bytes = if readonly.len() > 0 {
747 let bytes = if readonly.len() > 0 {
748 let mut nt = NodeTree::load_bytes(Box::<Vec<_>>::default(), 0);
748 let mut nt = NodeTree::load_bytes(Box::<Vec<_>>::default(), 0);
749 self.fill_nodemap(py, &mut nt)?;
749 self.fill_nodemap(py, &mut nt)?;
750
750
751 let (readonly, bytes) = nt.into_readonly_and_added_bytes();
751 let (readonly, bytes) = nt.into_readonly_and_added_bytes();
752 assert_eq!(readonly.len(), 0);
752 assert_eq!(readonly.len(), 0);
753
753
754 bytes
754 bytes
755 } else {
755 } else {
756 bytes
756 bytes
757 };
757 };
758
758
759 let bytes = PyBytes::new(py, &bytes);
759 let bytes = PyBytes::new(py, &bytes);
760 Ok(bytes)
760 Ok(bytes)
761 }
761 }
762
762
763 /// Returns the last saved docket along with the size of any changed data
763 /// Returns the last saved docket along with the size of any changed data
764 /// (in number of blocks), and said data as bytes.
764 /// (in number of blocks), and said data as bytes.
765 fn inner_nodemap_data_incremental(
765 fn inner_nodemap_data_incremental(
766 &self,
766 &self,
767 py: Python,
767 py: Python,
768 ) -> PyResult<PyObject> {
768 ) -> PyResult<PyObject> {
769 let docket = self.docket(py).borrow();
769 let docket = self.docket(py).borrow();
770 let docket = match docket.as_ref() {
770 let docket = match docket.as_ref() {
771 Some(d) => d,
771 Some(d) => d,
772 None => return Ok(py.None()),
772 None => return Ok(py.None()),
773 };
773 };
774
774
775 let node_tree = self.get_nodetree(py)?.borrow_mut().take().unwrap();
775 let node_tree = self.get_nodetree(py)?.borrow_mut().take().unwrap();
776 let masked_blocks = node_tree.masked_readonly_blocks();
776 let masked_blocks = node_tree.masked_readonly_blocks();
777 let (_, data) = node_tree.into_readonly_and_added_bytes();
777 let (_, data) = node_tree.into_readonly_and_added_bytes();
778 let changed = masked_blocks * std::mem::size_of::<Block>();
778 let changed = masked_blocks * std::mem::size_of::<Block>();
779
779
780 Ok((docket, changed, PyBytes::new(py, &data))
780 Ok((docket, changed, PyBytes::new(py, &data))
781 .to_py_object(py)
781 .to_py_object(py)
782 .into_object())
782 .into_object())
783 }
783 }
784
784
785 /// Update the nodemap from the new (mmaped) data.
785 /// Update the nodemap from the new (mmaped) data.
786 /// The docket is kept as a reference for later incremental calls.
786 /// The docket is kept as a reference for later incremental calls.
787 fn inner_update_nodemap_data(
787 fn inner_update_nodemap_data(
788 &self,
788 &self,
789 py: Python,
789 py: Python,
790 docket: PyObject,
790 docket: PyObject,
791 nm_data: PyObject,
791 nm_data: PyObject,
792 ) -> PyResult<PyObject> {
792 ) -> PyResult<PyObject> {
793 // Safety: we keep the buffer around inside the class as `nodemap_mmap`
793 // Safety: we keep the buffer around inside the class as `nodemap_mmap`
794 let (buf, bytes) = unsafe { mmap_keeparound(py, nm_data)? };
794 let (buf, bytes) = unsafe { mmap_keeparound(py, nm_data)? };
795 let len = buf.item_count();
795 let len = buf.item_count();
796 self.nodemap_mmap(py).borrow_mut().replace(buf);
796 self.nodemap_mmap(py).borrow_mut().replace(buf);
797
797
798 let mut nt = NodeTree::load_bytes(bytes, len);
798 let mut nt = NodeTree::load_bytes(bytes, len);
799
799
800 let data_tip = docket
800 let data_tip = docket
801 .getattr(py, "tip_rev")?
801 .getattr(py, "tip_rev")?
802 .extract::<BaseRevision>(py)?
802 .extract::<BaseRevision>(py)?
803 .into();
803 .into();
804 self.docket(py).borrow_mut().replace(docket.clone_ref(py));
804 self.docket(py).borrow_mut().replace(docket.clone_ref(py));
805 let idx = self.index(py).borrow();
805 let idx = self.index(py).borrow();
806 let data_tip = idx.check_revision(data_tip).ok_or_else(|| {
806 let data_tip = idx.check_revision(data_tip).ok_or_else(|| {
807 nodemap_error(py, NodeMapError::RevisionNotInIndex(data_tip))
807 nodemap_error(py, NodeMapError::RevisionNotInIndex(data_tip))
808 })?;
808 })?;
809 let current_tip = idx.len();
809 let current_tip = idx.len();
810
810
811 for r in (data_tip.0 + 1)..current_tip as BaseRevision {
811 for r in (data_tip.0 + 1)..current_tip as BaseRevision {
812 let rev = Revision(r);
812 let rev = Revision(r);
813 // in this case node() won't ever return None
813 // in this case node() won't ever return None
814 nt.insert(&*idx, idx.node(rev).unwrap(), rev)
814 nt.insert(&*idx, idx.node(rev).unwrap(), rev)
815 .map_err(|e| nodemap_error(py, e))?
815 .map_err(|e| nodemap_error(py, e))?
816 }
816 }
817
817
818 *self.nt(py).borrow_mut() = Some(nt);
818 *self.nt(py).borrow_mut() = Some(nt);
819
819
820 Ok(py.None())
820 Ok(py.None())
821 }
821 }
822
822
823 fn inner_getitem(&self, py: Python, key: PyObject) -> PyResult<PyObject> {
823 fn inner_getitem(&self, py: Python, key: PyObject) -> PyResult<PyObject> {
824 let idx = self.index(py).borrow();
824 let idx = self.index(py).borrow();
825 Ok(match key.extract::<BaseRevision>(py) {
825 Ok(match key.extract::<BaseRevision>(py) {
826 Ok(key_as_int) => {
826 Ok(key_as_int) => {
827 let entry_params = if key_as_int == NULL_REVISION.0 {
827 let entry_params = if key_as_int == NULL_REVISION.0 {
828 RevisionDataParams::default()
828 RevisionDataParams::default()
829 } else {
829 } else {
830 let rev = UncheckedRevision(key_as_int);
830 let rev = UncheckedRevision(key_as_int);
831 match idx.entry_as_params(rev) {
831 match idx.entry_as_params(rev) {
832 Some(e) => e,
832 Some(e) => e,
833 None => {
833 None => {
834 return Err(PyErr::new::<IndexError, _>(
834 return Err(PyErr::new::<IndexError, _>(
835 py,
835 py,
836 "revlog index out of range",
836 "revlog index out of range",
837 ));
837 ));
838 }
838 }
839 }
839 }
840 };
840 };
841 revision_data_params_to_py_tuple(py, entry_params)
841 revision_data_params_to_py_tuple(py, entry_params)
842 .into_object()
842 .into_object()
843 }
843 }
844 _ => self.get_rev(py, key.extract::<PyBytes>(py)?)?.map_or_else(
844 _ => self.get_rev(py, key.extract::<PyBytes>(py)?)?.map_or_else(
845 || py.None(),
845 || py.None(),
846 |py_rev| py_rev.into_py_object(py).into_object(),
846 |py_rev| py_rev.into_py_object(py).into_object(),
847 ),
847 ),
848 })
848 })
849 }
849 }
850
850
851 fn inner_headrevs(&self, py: Python) -> PyResult<PyObject> {
851 fn inner_headrevs(&self, py: Python) -> PyResult<PyObject> {
852 let index = &mut *self.index(py).borrow_mut();
852 let index = &mut *self.index(py).borrow_mut();
853 let as_vec: Vec<PyObject> = index
853 let as_vec: Vec<PyObject> = index
854 .head_revs()
854 .head_revs()
855 .map_err(|e| graph_error(py, e))?
855 .map_err(|e| graph_error(py, e))?
856 .iter()
856 .iter()
857 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
857 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
858 .collect();
858 .collect();
859 Ok(PyList::new(py, &as_vec).into_object())
859 Ok(PyList::new(py, &as_vec).into_object())
860 }
860 }
861
861
862 fn inner_headrevsfiltered(
862 fn inner_headrevsfiltered(
863 &self,
863 &self,
864 py: Python,
864 py: Python,
865 filtered_revs: &PyObject,
865 filtered_revs: &PyObject,
866 ) -> PyResult<PyObject> {
866 ) -> PyResult<PyObject> {
867 let index = &mut *self.index(py).borrow_mut();
867 let index = &mut *self.index(py).borrow_mut();
868 let filtered_revs = rev_pyiter_collect(py, filtered_revs, index)?;
868 let filtered_revs = rev_pyiter_collect(py, filtered_revs, index)?;
869
869
870 let as_vec: Vec<PyObject> = index
870 let as_vec: Vec<PyObject> = index
871 .head_revs_filtered(&filtered_revs)
871 .head_revs_filtered(&filtered_revs)
872 .map_err(|e| graph_error(py, e))?
872 .map_err(|e| graph_error(py, e))?
873 .iter()
873 .iter()
874 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
874 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
875 .collect();
875 .collect();
876 Ok(PyList::new(py, &as_vec).into_object())
876 Ok(PyList::new(py, &as_vec).into_object())
877 }
877 }
878
878
879 fn inner_ancestors(
879 fn inner_ancestors(
880 &self,
880 &self,
881 py: Python,
881 py: Python,
882 py_revs: &PyTuple,
882 py_revs: &PyTuple,
883 ) -> PyResult<PyObject> {
883 ) -> PyResult<PyObject> {
884 let index = &mut *self.index(py).borrow_mut();
884 let index = &mut *self.index(py).borrow_mut();
885 let revs: Vec<_> = rev_pyiter_collect(py, py_revs.as_object(), index)?;
885 let revs: Vec<_> = rev_pyiter_collect(py, py_revs.as_object(), index)?;
886 let as_vec: Vec<_> = index
886 let as_vec: Vec<_> = index
887 .ancestors(&revs)
887 .ancestors(&revs)
888 .map_err(|e| graph_error(py, e))?
888 .map_err(|e| graph_error(py, e))?
889 .iter()
889 .iter()
890 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
890 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
891 .collect();
891 .collect();
892 Ok(PyList::new(py, &as_vec).into_object())
892 Ok(PyList::new(py, &as_vec).into_object())
893 }
893 }
894
894
895 fn inner_commonancestorsheads(
895 fn inner_commonancestorsheads(
896 &self,
896 &self,
897 py: Python,
897 py: Python,
898 py_revs: &PyTuple,
898 py_revs: &PyTuple,
899 ) -> PyResult<PyObject> {
899 ) -> PyResult<PyObject> {
900 let index = &mut *self.index(py).borrow_mut();
900 let index = &mut *self.index(py).borrow_mut();
901 let revs: Vec<_> = rev_pyiter_collect(py, py_revs.as_object(), index)?;
901 let revs: Vec<_> = rev_pyiter_collect(py, py_revs.as_object(), index)?;
902 let as_vec: Vec<_> = index
902 let as_vec: Vec<_> = index
903 .common_ancestor_heads(&revs)
903 .common_ancestor_heads(&revs)
904 .map_err(|e| graph_error(py, e))?
904 .map_err(|e| graph_error(py, e))?
905 .iter()
905 .iter()
906 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
906 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
907 .collect();
907 .collect();
908 Ok(PyList::new(py, &as_vec).into_object())
908 Ok(PyList::new(py, &as_vec).into_object())
909 }
909 }
910
910
911 fn inner_computephasesmapsets(
911 fn inner_computephasesmapsets(
912 &self,
912 &self,
913 py: Python,
913 py: Python,
914 py_roots: PyDict,
914 py_roots: PyDict,
915 ) -> PyResult<PyObject> {
915 ) -> PyResult<PyObject> {
916 let index = &*self.index(py).borrow();
916 let index = &*self.index(py).borrow();
917 let opt = self.get_nodetree(py)?.borrow();
917 let opt = self.get_nodetree(py)?.borrow();
918 let nt = opt.as_ref().unwrap();
918 let nt = opt.as_ref().unwrap();
919 let roots: Result<HashMap<Phase, Vec<Revision>>, PyErr> = py_roots
919 let roots: Result<HashMap<Phase, Vec<Revision>>, PyErr> = py_roots
920 .items_list(py)
920 .items_list(py)
921 .iter(py)
921 .iter(py)
922 .map(|r| {
922 .map(|r| {
923 let phase = r.get_item(py, 0)?;
923 let phase = r.get_item(py, 0)?;
924 let nodes = r.get_item(py, 1)?;
924 let nodes = r.get_item(py, 1)?;
925 // Transform the nodes from Python to revs here since we
925 // Transform the nodes from Python to revs here since we
926 // have access to the nodemap
926 // have access to the nodemap
927 let revs: Result<_, _> = nodes
927 let revs: Result<_, _> = nodes
928 .iter(py)?
928 .iter(py)?
929 .map(|node| match node?.extract::<PyBytes>(py) {
929 .map(|node| match node?.extract::<PyBytes>(py) {
930 Ok(py_bytes) => {
930 Ok(py_bytes) => {
931 let node = node_from_py_bytes(py, &py_bytes)?;
931 let node = node_from_py_bytes(py, &py_bytes)?;
932 nt.find_bin(index, node.into())
932 nt.find_bin(index, node.into())
933 .map_err(|e| nodemap_error(py, e))?
933 .map_err(|e| nodemap_error(py, e))?
934 .ok_or_else(|| revlog_error(py))
934 .ok_or_else(|| revlog_error(py))
935 }
935 }
936 Err(e) => Err(e),
936 Err(e) => Err(e),
937 })
937 })
938 .collect();
938 .collect();
939 let phase = Phase::try_from(phase.extract::<usize>(py)?)
939 let phase = Phase::try_from(phase.extract::<usize>(py)?)
940 .map_err(|_| revlog_error(py));
940 .map_err(|_| revlog_error(py));
941 Ok((phase?, revs?))
941 Ok((phase?, revs?))
942 })
942 })
943 .collect();
943 .collect();
944 let (len, phase_maps) = index
944 let (len, phase_maps) = index
945 .compute_phases_map_sets(roots?)
945 .compute_phases_map_sets(roots?)
946 .map_err(|e| graph_error(py, e))?;
946 .map_err(|e| graph_error(py, e))?;
947
947
948 // Ugly hack, but temporary
948 // Ugly hack, but temporary
949 const IDX_TO_PHASE_NUM: [usize; 4] = [1, 2, 32, 96];
949 const IDX_TO_PHASE_NUM: [usize; 4] = [1, 2, 32, 96];
950 let py_phase_maps = PyDict::new(py);
950 let py_phase_maps = PyDict::new(py);
951 for (idx, roots) in phase_maps.iter().enumerate() {
951 for (idx, roots) in phase_maps.iter().enumerate() {
952 let phase_num = IDX_TO_PHASE_NUM[idx].into_py_object(py);
952 let phase_num = IDX_TO_PHASE_NUM[idx].into_py_object(py);
953 // OPTIM too bad we have to collect here. At least, we could
953 // OPTIM too bad we have to collect here. At least, we could
954 // reuse the same Vec and allocate it with capacity at
954 // reuse the same Vec and allocate it with capacity at
955 // max(len(phase_maps)
955 // max(len(phase_maps)
956 let roots_vec: Vec<PyInt> = roots
956 let roots_vec: Vec<PyInt> = roots
957 .iter()
957 .iter()
958 .map(|r| PyRevision::from(*r).into_py_object(py))
958 .map(|r| PyRevision::from(*r).into_py_object(py))
959 .collect();
959 .collect();
960 py_phase_maps.set_item(
960 py_phase_maps.set_item(
961 py,
961 py,
962 phase_num,
962 phase_num,
963 PySet::new(py, roots_vec)?,
963 PySet::new(py, roots_vec)?,
964 )?;
964 )?;
965 }
965 }
966 Ok(PyTuple::new(
966 Ok(PyTuple::new(
967 py,
967 py,
968 &[
968 &[
969 len.into_py_object(py).into_object(),
969 len.into_py_object(py).into_object(),
970 py_phase_maps.into_object(),
970 py_phase_maps.into_object(),
971 ],
971 ],
972 )
972 )
973 .into_object())
973 .into_object())
974 }
974 }
975
975
976 fn inner_slicechunktodensity(
976 fn inner_slicechunktodensity(
977 &self,
977 &self,
978 py: Python,
978 py: Python,
979 revs: PyObject,
979 revs: PyObject,
980 target_density: f64,
980 target_density: f64,
981 min_gap_size: usize,
981 min_gap_size: usize,
982 ) -> PyResult<PyObject> {
982 ) -> PyResult<PyObject> {
983 let index = &mut *self.index(py).borrow_mut();
983 let index = &mut *self.index(py).borrow_mut();
984 let revs: Vec<_> = rev_pyiter_collect(py, &revs, index)?;
984 let revs: Vec<_> = rev_pyiter_collect(py, &revs, index)?;
985 let as_nested_vec =
985 let as_nested_vec =
986 index.slice_chunk_to_density(&revs, target_density, min_gap_size);
986 index.slice_chunk_to_density(&revs, target_density, min_gap_size);
987 let mut res = Vec::with_capacity(as_nested_vec.len());
987 let mut res = Vec::with_capacity(as_nested_vec.len());
988 let mut py_chunk = Vec::new();
988 let mut py_chunk = Vec::new();
989 for chunk in as_nested_vec {
989 for chunk in as_nested_vec {
990 py_chunk.clear();
990 py_chunk.clear();
991 py_chunk.reserve_exact(chunk.len());
991 py_chunk.reserve_exact(chunk.len());
992 for rev in chunk {
992 for rev in chunk {
993 py_chunk.push(
993 py_chunk.push(
994 PyRevision::from(rev).into_py_object(py).into_object(),
994 PyRevision::from(rev).into_py_object(py).into_object(),
995 );
995 );
996 }
996 }
997 res.push(PyList::new(py, &py_chunk).into_object());
997 res.push(PyList::new(py, &py_chunk).into_object());
998 }
998 }
999 // This is just to do the same as C, not sure why it does this
999 // This is just to do the same as C, not sure why it does this
1000 if res.len() == 1 {
1000 if res.len() == 1 {
1001 Ok(PyTuple::new(py, &res).into_object())
1001 Ok(PyTuple::new(py, &res).into_object())
1002 } else {
1002 } else {
1003 Ok(PyList::new(py, &res).into_object())
1003 Ok(PyList::new(py, &res).into_object())
1004 }
1004 }
1005 }
1005 }
1006
1006
1007 fn inner_reachableroots2(
1007 fn inner_reachableroots2(
1008 &self,
1008 &self,
1009 py: Python,
1009 py: Python,
1010 min_root: UncheckedRevision,
1010 min_root: UncheckedRevision,
1011 heads: PyObject,
1011 heads: PyObject,
1012 roots: PyObject,
1012 roots: PyObject,
1013 include_path: bool,
1013 include_path: bool,
1014 ) -> PyResult<PyObject> {
1014 ) -> PyResult<PyObject> {
1015 let index = &*self.index(py).borrow();
1015 let index = &*self.index(py).borrow();
1016 let heads = rev_pyiter_collect_or_else(py, &heads, index, |_rev| {
1016 let heads = rev_pyiter_collect_or_else(py, &heads, index, |_rev| {
1017 PyErr::new::<IndexError, _>(py, "head out of range")
1017 PyErr::new::<IndexError, _>(py, "head out of range")
1018 })?;
1018 })?;
1019 let roots: Result<_, _> = roots
1019 let roots: Result<_, _> = roots
1020 .iter(py)?
1020 .iter(py)?
1021 .map(|r| {
1021 .map(|r| {
1022 r.and_then(|o| match o.extract::<PyRevision>(py) {
1022 r.and_then(|o| match o.extract::<PyRevision>(py) {
1023 Ok(r) => Ok(UncheckedRevision(r.0)),
1023 Ok(r) => Ok(UncheckedRevision(r.0)),
1024 Err(e) => Err(e),
1024 Err(e) => Err(e),
1025 })
1025 })
1026 })
1026 })
1027 .collect();
1027 .collect();
1028 let as_set = index
1028 let as_set = index
1029 .reachable_roots(min_root, heads, roots?, include_path)
1029 .reachable_roots(min_root, heads, roots?, include_path)
1030 .map_err(|e| graph_error(py, e))?;
1030 .map_err(|e| graph_error(py, e))?;
1031 let as_vec: Vec<PyObject> = as_set
1031 let as_vec: Vec<PyObject> = as_set
1032 .iter()
1032 .iter()
1033 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
1033 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
1034 .collect();
1034 .collect();
1035 Ok(PyList::new(py, &as_vec).into_object())
1035 Ok(PyList::new(py, &as_vec).into_object())
1036 }
1036 }
1037 }
1037 }
1038
1038
1039 fn revlog_error(py: Python) -> PyErr {
1039 fn revlog_error(py: Python) -> PyErr {
1040 match py
1040 match py
1041 .import("mercurial.error")
1041 .import("mercurial.error")
1042 .and_then(|m| m.get(py, "RevlogError"))
1042 .and_then(|m| m.get(py, "RevlogError"))
1043 {
1043 {
1044 Err(e) => e,
1044 Err(e) => e,
1045 Ok(cls) => PyErr::from_instance(
1045 Ok(cls) => PyErr::from_instance(
1046 py,
1046 py,
1047 cls.call(py, (py.None(),), None).ok().into_py_object(py),
1047 cls.call(py, (py.None(),), None).ok().into_py_object(py),
1048 ),
1048 ),
1049 }
1049 }
1050 }
1050 }
1051
1051
1052 fn revlog_error_with_msg(py: Python, msg: &[u8]) -> PyErr {
1052 fn revlog_error_with_msg(py: Python, msg: &[u8]) -> PyErr {
1053 match py
1053 match py
1054 .import("mercurial.error")
1054 .import("mercurial.error")
1055 .and_then(|m| m.get(py, "RevlogError"))
1055 .and_then(|m| m.get(py, "RevlogError"))
1056 {
1056 {
1057 Err(e) => e,
1057 Err(e) => e,
1058 Ok(cls) => PyErr::from_instance(
1058 Ok(cls) => PyErr::from_instance(
1059 py,
1059 py,
1060 cls.call(py, (PyBytes::new(py, msg),), None)
1060 cls.call(py, (PyBytes::new(py, msg),), None)
1061 .ok()
1061 .ok()
1062 .into_py_object(py),
1062 .into_py_object(py),
1063 ),
1063 ),
1064 }
1064 }
1065 }
1065 }
1066
1066
1067 fn graph_error(py: Python, _err: hg::GraphError) -> PyErr {
1067 fn graph_error(py: Python, _err: hg::GraphError) -> PyErr {
1068 // ParentOutOfRange is currently the only alternative
1068 // ParentOutOfRange is currently the only alternative
1069 // in `hg::GraphError`. The C index always raises this simple ValueError.
1069 // in `hg::GraphError`. The C index always raises this simple ValueError.
1070 PyErr::new::<ValueError, _>(py, "parent out of range")
1070 PyErr::new::<ValueError, _>(py, "parent out of range")
1071 }
1071 }
1072
1072
1073 fn nodemap_rev_not_in_index(py: Python, rev: UncheckedRevision) -> PyErr {
1073 fn nodemap_rev_not_in_index(py: Python, rev: UncheckedRevision) -> PyErr {
1074 PyErr::new::<ValueError, _>(
1074 PyErr::new::<ValueError, _>(
1075 py,
1075 py,
1076 format!(
1076 format!(
1077 "Inconsistency: Revision {} found in nodemap \
1077 "Inconsistency: Revision {} found in nodemap \
1078 is not in revlog index",
1078 is not in revlog index",
1079 rev
1079 rev
1080 ),
1080 ),
1081 )
1081 )
1082 }
1082 }
1083
1083
1084 fn rev_not_in_index(py: Python, rev: UncheckedRevision) -> PyErr {
1084 fn rev_not_in_index(py: Python, rev: UncheckedRevision) -> PyErr {
1085 PyErr::new::<ValueError, _>(
1085 PyErr::new::<ValueError, _>(
1086 py,
1086 py,
1087 format!("revlog index out of range: {}", rev),
1087 format!("revlog index out of range: {}", rev),
1088 )
1088 )
1089 }
1089 }
1090
1090
1091 /// Standard treatment of NodeMapError
1091 /// Standard treatment of NodeMapError
1092 fn nodemap_error(py: Python, err: NodeMapError) -> PyErr {
1092 fn nodemap_error(py: Python, err: NodeMapError) -> PyErr {
1093 match err {
1093 match err {
1094 NodeMapError::MultipleResults => revlog_error(py),
1094 NodeMapError::MultipleResults => revlog_error(py),
1095 NodeMapError::RevisionNotInIndex(r) => nodemap_rev_not_in_index(py, r),
1095 NodeMapError::RevisionNotInIndex(r) => nodemap_rev_not_in_index(py, r),
1096 }
1096 }
1097 }
1097 }
1098
1098
1099 /// assert two Python objects to be equal from a Python point of view
1099 /// assert two Python objects to be equal from a Python point of view
1100 ///
1100 ///
1101 /// `method` is a label for the assertion error message, intended to be the
1101 /// `method` is a label for the assertion error message, intended to be the
1102 /// name of the caller.
1102 /// name of the caller.
1103 /// `normalizer` is a function that takes a Python variable name and returns
1103 /// `normalizer` is a function that takes a Python variable name and returns
1104 /// an expression that the conparison will actually use.
1104 /// an expression that the conparison will actually use.
1105 /// Foe example: `|v| format!("sorted({})", v)`
1105 /// Foe example: `|v| format!("sorted({})", v)`
1106 fn assert_py_eq_normalized(
1106 fn assert_py_eq_normalized(
1107 py: Python,
1107 py: Python,
1108 method: &str,
1108 method: &str,
1109 rust: &PyObject,
1109 rust: &PyObject,
1110 c: &PyObject,
1110 c: &PyObject,
1111 normalizer: impl FnOnce(&str) -> String + Copy,
1111 normalizer: impl FnOnce(&str) -> String + Copy,
1112 ) -> PyResult<()> {
1112 ) -> PyResult<()> {
1113 let locals = PyDict::new(py);
1113 let locals = PyDict::new(py);
1114 locals.set_item(py, "rust".into_py_object(py).into_object(), rust)?;
1114 locals.set_item(py, "rust".into_py_object(py).into_object(), rust)?;
1115 locals.set_item(py, "c".into_py_object(py).into_object(), c)?;
1115 locals.set_item(py, "c".into_py_object(py).into_object(), c)?;
1116 // let lhs = format!(normalizer_fmt, "rust");
1116 // let lhs = format!(normalizer_fmt, "rust");
1117 // let rhs = format!(normalizer_fmt, "c");
1117 // let rhs = format!(normalizer_fmt, "c");
1118 let is_eq: PyBool = py
1118 let is_eq: PyBool = py
1119 .eval(
1119 .eval(
1120 &format!("{} == {}", &normalizer("rust"), &normalizer("c")),
1120 &format!("{} == {}", &normalizer("rust"), &normalizer("c")),
1121 None,
1121 None,
1122 Some(&locals),
1122 Some(&locals),
1123 )?
1123 )?
1124 .extract(py)?;
1124 .extract(py)?;
1125 assert!(
1125 assert!(
1126 is_eq.is_true(),
1126 is_eq.is_true(),
1127 "{} results differ. Rust: {:?} C: {:?} (before any normalization)",
1127 "{} results differ. Rust: {:?} C: {:?} (before any normalization)",
1128 method,
1128 method,
1129 rust,
1129 rust,
1130 c
1130 c
1131 );
1131 );
1132 Ok(())
1132 Ok(())
1133 }
1133 }
1134
1134
1135 fn assert_py_eq(
1135 fn assert_py_eq(
1136 py: Python,
1136 py: Python,
1137 method: &str,
1137 method: &str,
1138 rust: &PyObject,
1138 rust: &PyObject,
1139 c: &PyObject,
1139 c: &PyObject,
1140 ) -> PyResult<()> {
1140 ) -> PyResult<()> {
1141 assert_py_eq_normalized(py, method, rust, c, |v| v.to_owned())
1141 assert_py_eq_normalized(py, method, rust, c, |v| v.to_owned())
1142 }
1142 }
1143
1143
1144 /// Create the module, with __package__ given from parent
1144 /// Create the module, with __package__ given from parent
1145 pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> {
1145 pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> {
1146 let dotted_name = &format!("{}.revlog", package);
1146 let dotted_name = &format!("{}.revlog", package);
1147 let m = PyModule::new(py, dotted_name)?;
1147 let m = PyModule::new(py, dotted_name)?;
1148 m.add(py, "__package__", package)?;
1148 m.add(py, "__package__", package)?;
1149 m.add(py, "__doc__", "RevLog - Rust implementations")?;
1149 m.add(py, "__doc__", "RevLog - Rust implementations")?;
1150
1150
1151 m.add_class::<MixedIndex>(py)?;
1151 m.add_class::<MixedIndex>(py)?;
1152
1152
1153 let sys = PyModule::import(py, "sys")?;
1153 let sys = PyModule::import(py, "sys")?;
1154 let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?;
1154 let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?;
1155 sys_modules.set_item(py, dotted_name, &m)?;
1155 sys_modules.set_item(py, dotted_name, &m)?;
1156
1156
1157 Ok(m)
1157 Ok(m)
1158 }
1158 }
General Comments 0
You need to be logged in to leave comments. Login now