##// END OF EJS Templates
rust-index: support cache clearing...
Raphaël Gomès -
r52090:4e6620b7 default
parent child Browse files
Show More
@@ -1,809 +1,860 b''
1 use std::fmt::Debug;
1 use std::fmt::Debug;
2 use std::ops::Deref;
2 use std::ops::Deref;
3 use std::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard};
3
4
4 use byteorder::{BigEndian, ByteOrder};
5 use byteorder::{BigEndian, ByteOrder};
5 use bytes_cast::{unaligned, BytesCast};
6 use bytes_cast::{unaligned, BytesCast};
6
7
7 use super::REVIDX_KNOWN_FLAGS;
8 use super::REVIDX_KNOWN_FLAGS;
8 use crate::errors::HgError;
9 use crate::errors::HgError;
9 use crate::node::{NODE_BYTES_LENGTH, STORED_NODE_ID_BYTES};
10 use crate::node::{NODE_BYTES_LENGTH, STORED_NODE_ID_BYTES};
10 use crate::revlog::node::Node;
11 use crate::revlog::node::Node;
11 use crate::revlog::{Revision, NULL_REVISION};
12 use crate::revlog::{Revision, NULL_REVISION};
12 use crate::{Graph, GraphError, RevlogError, RevlogIndex, UncheckedRevision};
13 use crate::{Graph, GraphError, RevlogError, RevlogIndex, UncheckedRevision};
13
14
14 pub const INDEX_ENTRY_SIZE: usize = 64;
15 pub const INDEX_ENTRY_SIZE: usize = 64;
15 pub const COMPRESSION_MODE_INLINE: u8 = 2;
16 pub const COMPRESSION_MODE_INLINE: u8 = 2;
16
17
17 pub struct IndexHeader {
18 pub struct IndexHeader {
18 pub(super) header_bytes: [u8; 4],
19 pub(super) header_bytes: [u8; 4],
19 }
20 }
20
21
21 #[derive(Copy, Clone)]
22 #[derive(Copy, Clone)]
22 pub struct IndexHeaderFlags {
23 pub struct IndexHeaderFlags {
23 flags: u16,
24 flags: u16,
24 }
25 }
25
26
26 /// Corresponds to the high bits of `_format_flags` in python
27 /// Corresponds to the high bits of `_format_flags` in python
27 impl IndexHeaderFlags {
28 impl IndexHeaderFlags {
28 /// Corresponds to FLAG_INLINE_DATA in python
29 /// Corresponds to FLAG_INLINE_DATA in python
29 pub fn is_inline(self) -> bool {
30 pub fn is_inline(self) -> bool {
30 self.flags & 1 != 0
31 self.flags & 1 != 0
31 }
32 }
32 /// Corresponds to FLAG_GENERALDELTA in python
33 /// Corresponds to FLAG_GENERALDELTA in python
33 pub fn uses_generaldelta(self) -> bool {
34 pub fn uses_generaldelta(self) -> bool {
34 self.flags & 2 != 0
35 self.flags & 2 != 0
35 }
36 }
36 }
37 }
37
38
38 /// Corresponds to the INDEX_HEADER structure,
39 /// Corresponds to the INDEX_HEADER structure,
39 /// which is parsed as a `header` variable in `_loadindex` in `revlog.py`
40 /// which is parsed as a `header` variable in `_loadindex` in `revlog.py`
40 impl IndexHeader {
41 impl IndexHeader {
41 fn format_flags(&self) -> IndexHeaderFlags {
42 fn format_flags(&self) -> IndexHeaderFlags {
42 // No "unknown flags" check here, unlike in python. Maybe there should
43 // No "unknown flags" check here, unlike in python. Maybe there should
43 // be.
44 // be.
44 IndexHeaderFlags {
45 IndexHeaderFlags {
45 flags: BigEndian::read_u16(&self.header_bytes[0..2]),
46 flags: BigEndian::read_u16(&self.header_bytes[0..2]),
46 }
47 }
47 }
48 }
48
49
49 /// The only revlog version currently supported by rhg.
50 /// The only revlog version currently supported by rhg.
50 const REVLOGV1: u16 = 1;
51 const REVLOGV1: u16 = 1;
51
52
52 /// Corresponds to `_format_version` in Python.
53 /// Corresponds to `_format_version` in Python.
53 fn format_version(&self) -> u16 {
54 fn format_version(&self) -> u16 {
54 BigEndian::read_u16(&self.header_bytes[2..4])
55 BigEndian::read_u16(&self.header_bytes[2..4])
55 }
56 }
56
57
57 pub fn parse(index_bytes: &[u8]) -> Result<Option<IndexHeader>, HgError> {
58 pub fn parse(index_bytes: &[u8]) -> Result<Option<IndexHeader>, HgError> {
58 if index_bytes.is_empty() {
59 if index_bytes.is_empty() {
59 return Ok(None);
60 return Ok(None);
60 }
61 }
61 if index_bytes.len() < 4 {
62 if index_bytes.len() < 4 {
62 return Err(HgError::corrupted(
63 return Err(HgError::corrupted(
63 "corrupted revlog: can't read the index format header",
64 "corrupted revlog: can't read the index format header",
64 ));
65 ));
65 }
66 }
66 Ok(Some(IndexHeader {
67 Ok(Some(IndexHeader {
67 header_bytes: {
68 header_bytes: {
68 let bytes: [u8; 4] =
69 let bytes: [u8; 4] =
69 index_bytes[0..4].try_into().expect("impossible");
70 index_bytes[0..4].try_into().expect("impossible");
70 bytes
71 bytes
71 },
72 },
72 }))
73 }))
73 }
74 }
74 }
75 }
75
76
76 /// Abstracts the access to the index bytes since they can be spread between
77 /// Abstracts the access to the index bytes since they can be spread between
77 /// the immutable (bytes) part and the mutable (added) part if any appends
78 /// the immutable (bytes) part and the mutable (added) part if any appends
78 /// happened. This makes it transparent for the callers.
79 /// happened. This makes it transparent for the callers.
79 struct IndexData {
80 struct IndexData {
80 /// Immutable bytes, most likely taken from disk
81 /// Immutable bytes, most likely taken from disk
81 bytes: Box<dyn Deref<Target = [u8]> + Send>,
82 bytes: Box<dyn Deref<Target = [u8]> + Send>,
82 /// Used when stripping index contents, keeps track of the start of the
83 /// Used when stripping index contents, keeps track of the start of the
83 /// first stripped revision, which is used to give a slice of the
84 /// first stripped revision, which is used to give a slice of the
84 /// `bytes` field.
85 /// `bytes` field.
85 truncation: Option<usize>,
86 truncation: Option<usize>,
86 /// Bytes that were added after reading the index
87 /// Bytes that were added after reading the index
87 added: Vec<u8>,
88 added: Vec<u8>,
88 }
89 }
89
90
90 impl IndexData {
91 impl IndexData {
91 pub fn new(bytes: Box<dyn Deref<Target = [u8]> + Send>) -> Self {
92 pub fn new(bytes: Box<dyn Deref<Target = [u8]> + Send>) -> Self {
92 Self {
93 Self {
93 bytes,
94 bytes,
94 truncation: None,
95 truncation: None,
95 added: vec![],
96 added: vec![],
96 }
97 }
97 }
98 }
98
99
99 pub fn len(&self) -> usize {
100 pub fn len(&self) -> usize {
100 match self.truncation {
101 match self.truncation {
101 Some(truncation) => truncation + self.added.len(),
102 Some(truncation) => truncation + self.added.len(),
102 None => self.bytes.len() + self.added.len(),
103 None => self.bytes.len() + self.added.len(),
103 }
104 }
104 }
105 }
105
106
106 fn remove(
107 fn remove(
107 &mut self,
108 &mut self,
108 rev: Revision,
109 rev: Revision,
109 offsets: Option<&[usize]>,
110 offsets: Option<&[usize]>,
110 ) -> Result<(), RevlogError> {
111 ) -> Result<(), RevlogError> {
111 let rev = rev.0 as usize;
112 let rev = rev.0 as usize;
112 let truncation = if let Some(offsets) = offsets {
113 let truncation = if let Some(offsets) = offsets {
113 offsets[rev]
114 offsets[rev]
114 } else {
115 } else {
115 rev * INDEX_ENTRY_SIZE
116 rev * INDEX_ENTRY_SIZE
116 };
117 };
117 if truncation < self.bytes.len() {
118 if truncation < self.bytes.len() {
118 self.truncation = Some(truncation);
119 self.truncation = Some(truncation);
119 self.added.clear();
120 self.added.clear();
120 } else {
121 } else {
121 self.added.truncate(truncation - self.bytes.len());
122 self.added.truncate(truncation - self.bytes.len());
122 }
123 }
123 Ok(())
124 Ok(())
124 }
125 }
125 }
126 }
126
127
127 impl std::ops::Index<std::ops::Range<usize>> for IndexData {
128 impl std::ops::Index<std::ops::Range<usize>> for IndexData {
128 type Output = [u8];
129 type Output = [u8];
129
130
130 fn index(&self, index: std::ops::Range<usize>) -> &Self::Output {
131 fn index(&self, index: std::ops::Range<usize>) -> &Self::Output {
131 let start = index.start;
132 let start = index.start;
132 let end = index.end;
133 let end = index.end;
133 let immutable_len = match self.truncation {
134 let immutable_len = match self.truncation {
134 Some(truncation) => truncation,
135 Some(truncation) => truncation,
135 None => self.bytes.len(),
136 None => self.bytes.len(),
136 };
137 };
137 if start < immutable_len {
138 if start < immutable_len {
138 if end > immutable_len {
139 if end > immutable_len {
139 panic!("index data cannot span existing and added ranges");
140 panic!("index data cannot span existing and added ranges");
140 }
141 }
141 &self.bytes[index]
142 &self.bytes[index]
142 } else {
143 } else {
143 &self.added[start - immutable_len..end - immutable_len]
144 &self.added[start - immutable_len..end - immutable_len]
144 }
145 }
145 }
146 }
146 }
147 }
147
148
148 pub struct RevisionDataParams {
149 pub struct RevisionDataParams {
149 pub flags: u16,
150 pub flags: u16,
150 pub data_offset: u64,
151 pub data_offset: u64,
151 pub data_compressed_length: i32,
152 pub data_compressed_length: i32,
152 pub data_uncompressed_length: i32,
153 pub data_uncompressed_length: i32,
153 pub data_delta_base: i32,
154 pub data_delta_base: i32,
154 pub link_rev: i32,
155 pub link_rev: i32,
155 pub parent_rev_1: i32,
156 pub parent_rev_1: i32,
156 pub parent_rev_2: i32,
157 pub parent_rev_2: i32,
157 pub node_id: [u8; NODE_BYTES_LENGTH],
158 pub node_id: [u8; NODE_BYTES_LENGTH],
158 pub _sidedata_offset: u64,
159 pub _sidedata_offset: u64,
159 pub _sidedata_compressed_length: i32,
160 pub _sidedata_compressed_length: i32,
160 pub data_compression_mode: u8,
161 pub data_compression_mode: u8,
161 pub _sidedata_compression_mode: u8,
162 pub _sidedata_compression_mode: u8,
162 pub _rank: i32,
163 pub _rank: i32,
163 }
164 }
164
165
165 #[derive(BytesCast)]
166 #[derive(BytesCast)]
166 #[repr(C)]
167 #[repr(C)]
167 pub struct RevisionDataV1 {
168 pub struct RevisionDataV1 {
168 data_offset_or_flags: unaligned::U64Be,
169 data_offset_or_flags: unaligned::U64Be,
169 data_compressed_length: unaligned::I32Be,
170 data_compressed_length: unaligned::I32Be,
170 data_uncompressed_length: unaligned::I32Be,
171 data_uncompressed_length: unaligned::I32Be,
171 data_delta_base: unaligned::I32Be,
172 data_delta_base: unaligned::I32Be,
172 link_rev: unaligned::I32Be,
173 link_rev: unaligned::I32Be,
173 parent_rev_1: unaligned::I32Be,
174 parent_rev_1: unaligned::I32Be,
174 parent_rev_2: unaligned::I32Be,
175 parent_rev_2: unaligned::I32Be,
175 node_id: [u8; STORED_NODE_ID_BYTES],
176 node_id: [u8; STORED_NODE_ID_BYTES],
176 }
177 }
177
178
178 fn _static_assert_size_of_revision_data_v1() {
179 fn _static_assert_size_of_revision_data_v1() {
179 let _ = std::mem::transmute::<RevisionDataV1, [u8; 64]>;
180 let _ = std::mem::transmute::<RevisionDataV1, [u8; 64]>;
180 }
181 }
181
182
182 impl RevisionDataParams {
183 impl RevisionDataParams {
183 pub fn validate(&self) -> Result<(), RevlogError> {
184 pub fn validate(&self) -> Result<(), RevlogError> {
184 if self.flags & !REVIDX_KNOWN_FLAGS != 0 {
185 if self.flags & !REVIDX_KNOWN_FLAGS != 0 {
185 return Err(RevlogError::corrupted(format!(
186 return Err(RevlogError::corrupted(format!(
186 "unknown revlog index flags: {}",
187 "unknown revlog index flags: {}",
187 self.flags
188 self.flags
188 )));
189 )));
189 }
190 }
190 if self.data_compression_mode != COMPRESSION_MODE_INLINE {
191 if self.data_compression_mode != COMPRESSION_MODE_INLINE {
191 return Err(RevlogError::corrupted(format!(
192 return Err(RevlogError::corrupted(format!(
192 "invalid data compression mode: {}",
193 "invalid data compression mode: {}",
193 self.data_compression_mode
194 self.data_compression_mode
194 )));
195 )));
195 }
196 }
196 // FIXME isn't this only for v2 or changelog v2?
197 // FIXME isn't this only for v2 or changelog v2?
197 if self._sidedata_compression_mode != COMPRESSION_MODE_INLINE {
198 if self._sidedata_compression_mode != COMPRESSION_MODE_INLINE {
198 return Err(RevlogError::corrupted(format!(
199 return Err(RevlogError::corrupted(format!(
199 "invalid sidedata compression mode: {}",
200 "invalid sidedata compression mode: {}",
200 self._sidedata_compression_mode
201 self._sidedata_compression_mode
201 )));
202 )));
202 }
203 }
203 Ok(())
204 Ok(())
204 }
205 }
205
206
206 pub fn into_v1(self) -> RevisionDataV1 {
207 pub fn into_v1(self) -> RevisionDataV1 {
207 let data_offset_or_flags = self.data_offset << 16 | self.flags as u64;
208 let data_offset_or_flags = self.data_offset << 16 | self.flags as u64;
208 let mut node_id = [0; STORED_NODE_ID_BYTES];
209 let mut node_id = [0; STORED_NODE_ID_BYTES];
209 node_id[..NODE_BYTES_LENGTH].copy_from_slice(&self.node_id);
210 node_id[..NODE_BYTES_LENGTH].copy_from_slice(&self.node_id);
210 RevisionDataV1 {
211 RevisionDataV1 {
211 data_offset_or_flags: data_offset_or_flags.into(),
212 data_offset_or_flags: data_offset_or_flags.into(),
212 data_compressed_length: self.data_compressed_length.into(),
213 data_compressed_length: self.data_compressed_length.into(),
213 data_uncompressed_length: self.data_uncompressed_length.into(),
214 data_uncompressed_length: self.data_uncompressed_length.into(),
214 data_delta_base: self.data_delta_base.into(),
215 data_delta_base: self.data_delta_base.into(),
215 link_rev: self.link_rev.into(),
216 link_rev: self.link_rev.into(),
216 parent_rev_1: self.parent_rev_1.into(),
217 parent_rev_1: self.parent_rev_1.into(),
217 parent_rev_2: self.parent_rev_2.into(),
218 parent_rev_2: self.parent_rev_2.into(),
218 node_id,
219 node_id,
219 }
220 }
220 }
221 }
221 }
222 }
222
223
223 /// A Revlog index
224 /// A Revlog index
224 pub struct Index {
225 pub struct Index {
225 bytes: IndexData,
226 bytes: IndexData,
226 /// Offsets of starts of index blocks.
227 /// Offsets of starts of index blocks.
227 /// Only needed when the index is interleaved with data.
228 /// Only needed when the index is interleaved with data.
228 offsets: Option<Vec<usize>>,
229 offsets: RwLock<Option<Vec<usize>>>,
229 uses_generaldelta: bool,
230 uses_generaldelta: bool,
231 is_inline: bool,
230 }
232 }
231
233
232 impl Debug for Index {
234 impl Debug for Index {
233 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
235 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
234 f.debug_struct("Index")
236 f.debug_struct("Index")
235 .field("offsets", &self.offsets)
237 .field("offsets", &self.offsets)
236 .field("uses_generaldelta", &self.uses_generaldelta)
238 .field("uses_generaldelta", &self.uses_generaldelta)
237 .finish()
239 .finish()
238 }
240 }
239 }
241 }
240
242
241 impl Graph for Index {
243 impl Graph for Index {
242 fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
244 fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
243 let err = || GraphError::ParentOutOfRange(rev);
245 let err = || GraphError::ParentOutOfRange(rev);
244 match self.get_entry(rev) {
246 match self.get_entry(rev) {
245 Some(entry) => {
247 Some(entry) => {
246 // The C implementation checks that the parents are valid
248 // The C implementation checks that the parents are valid
247 // before returning
249 // before returning
248 Ok([
250 Ok([
249 self.check_revision(entry.p1()).ok_or_else(err)?,
251 self.check_revision(entry.p1()).ok_or_else(err)?,
250 self.check_revision(entry.p2()).ok_or_else(err)?,
252 self.check_revision(entry.p2()).ok_or_else(err)?,
251 ])
253 ])
252 }
254 }
253 None => Ok([NULL_REVISION, NULL_REVISION]),
255 None => Ok([NULL_REVISION, NULL_REVISION]),
254 }
256 }
255 }
257 }
256 }
258 }
257
259
258 impl Index {
260 impl Index {
259 /// Create an index from bytes.
261 /// Create an index from bytes.
260 /// Calculate the start of each entry when is_inline is true.
262 /// Calculate the start of each entry when is_inline is true.
261 pub fn new(
263 pub fn new(
262 bytes: Box<dyn Deref<Target = [u8]> + Send>,
264 bytes: Box<dyn Deref<Target = [u8]> + Send>,
263 default_header: IndexHeader,
265 default_header: IndexHeader,
264 ) -> Result<Self, HgError> {
266 ) -> Result<Self, HgError> {
265 let header =
267 let header =
266 IndexHeader::parse(bytes.as_ref())?.unwrap_or(default_header);
268 IndexHeader::parse(bytes.as_ref())?.unwrap_or(default_header);
267
269
268 if header.format_version() != IndexHeader::REVLOGV1 {
270 if header.format_version() != IndexHeader::REVLOGV1 {
269 // A proper new version should have had a repo/store
271 // A proper new version should have had a repo/store
270 // requirement.
272 // requirement.
271 return Err(HgError::corrupted("unsupported revlog version"));
273 return Err(HgError::corrupted("unsupported revlog version"));
272 }
274 }
273
275
274 // This is only correct because we know version is REVLOGV1.
276 // This is only correct because we know version is REVLOGV1.
275 // In v2 we always use generaldelta, while in v0 we never use
277 // In v2 we always use generaldelta, while in v0 we never use
276 // generaldelta. Similar for [is_inline] (it's only used in v1).
278 // generaldelta. Similar for [is_inline] (it's only used in v1).
277 let uses_generaldelta = header.format_flags().uses_generaldelta();
279 let uses_generaldelta = header.format_flags().uses_generaldelta();
278
280
279 if header.format_flags().is_inline() {
281 if header.format_flags().is_inline() {
280 let mut offset: usize = 0;
282 let mut offset: usize = 0;
281 let mut offsets = Vec::new();
283 let mut offsets = Vec::new();
282
284
283 while offset + INDEX_ENTRY_SIZE <= bytes.len() {
285 while offset + INDEX_ENTRY_SIZE <= bytes.len() {
284 offsets.push(offset);
286 offsets.push(offset);
285 let end = offset + INDEX_ENTRY_SIZE;
287 let end = offset + INDEX_ENTRY_SIZE;
286 let entry = IndexEntry {
288 let entry = IndexEntry {
287 bytes: &bytes[offset..end],
289 bytes: &bytes[offset..end],
288 offset_override: None,
290 offset_override: None,
289 };
291 };
290
292
291 offset += INDEX_ENTRY_SIZE + entry.compressed_len() as usize;
293 offset += INDEX_ENTRY_SIZE + entry.compressed_len() as usize;
292 }
294 }
293
295
294 if offset == bytes.len() {
296 if offset == bytes.len() {
295 Ok(Self {
297 Ok(Self {
296 bytes: IndexData::new(bytes),
298 bytes: IndexData::new(bytes),
297 offsets: Some(offsets),
299 offsets: RwLock::new(Some(offsets)),
298 uses_generaldelta,
300 uses_generaldelta,
301 is_inline: true,
299 })
302 })
300 } else {
303 } else {
301 Err(HgError::corrupted("unexpected inline revlog length"))
304 Err(HgError::corrupted("unexpected inline revlog length"))
302 }
305 }
303 } else {
306 } else {
304 Ok(Self {
307 Ok(Self {
305 bytes: IndexData::new(bytes),
308 bytes: IndexData::new(bytes),
306 offsets: None,
309 offsets: RwLock::new(None),
307 uses_generaldelta,
310 uses_generaldelta,
311 is_inline: false,
308 })
312 })
309 }
313 }
310 }
314 }
311
315
312 pub fn uses_generaldelta(&self) -> bool {
316 pub fn uses_generaldelta(&self) -> bool {
313 self.uses_generaldelta
317 self.uses_generaldelta
314 }
318 }
315
319
316 /// Value of the inline flag.
320 /// Value of the inline flag.
317 pub fn is_inline(&self) -> bool {
321 pub fn is_inline(&self) -> bool {
318 self.offsets.is_some()
322 self.is_inline
319 }
323 }
320
324
321 /// Return a slice of bytes if `revlog` is inline. Panic if not.
325 /// Return a slice of bytes if `revlog` is inline. Panic if not.
322 pub fn data(&self, start: usize, end: usize) -> &[u8] {
326 pub fn data(&self, start: usize, end: usize) -> &[u8] {
323 if !self.is_inline() {
327 if !self.is_inline() {
324 panic!("tried to access data in the index of a revlog that is not inline");
328 panic!("tried to access data in the index of a revlog that is not inline");
325 }
329 }
326 &self.bytes[start..end]
330 &self.bytes[start..end]
327 }
331 }
328
332
329 /// Return number of entries of the revlog index.
333 /// Return number of entries of the revlog index.
330 pub fn len(&self) -> usize {
334 pub fn len(&self) -> usize {
331 if let Some(offsets) = &self.offsets {
335 if let Some(offsets) = &*self.get_offsets() {
332 offsets.len()
336 offsets.len()
333 } else {
337 } else {
334 self.bytes.len() / INDEX_ENTRY_SIZE
338 self.bytes.len() / INDEX_ENTRY_SIZE
335 }
339 }
336 }
340 }
337
341
342 pub fn get_offsets(&self) -> RwLockReadGuard<Option<Vec<usize>>> {
343 if self.is_inline() {
344 {
345 // Wrap in a block to drop the read guard
346 // TODO perf?
347 let mut offsets = self.offsets.write().unwrap();
348 if offsets.is_none() {
349 offsets.replace(inline_scan(&self.bytes.bytes).1);
350 }
351 }
352 }
353 self.offsets.read().unwrap()
354 }
355
356 pub fn get_offsets_mut(&mut self) -> RwLockWriteGuard<Option<Vec<usize>>> {
357 let mut offsets = self.offsets.write().unwrap();
358 if self.is_inline() && offsets.is_none() {
359 offsets.replace(inline_scan(&self.bytes.bytes).1);
360 }
361 offsets
362 }
363
338 /// Returns `true` if the `Index` has zero `entries`.
364 /// Returns `true` if the `Index` has zero `entries`.
339 pub fn is_empty(&self) -> bool {
365 pub fn is_empty(&self) -> bool {
340 self.len() == 0
366 self.len() == 0
341 }
367 }
342
368
343 /// Return the index entry corresponding to the given revision if it
369 /// Return the index entry corresponding to the given revision if it
344 /// exists.
370 /// exists.
345 pub fn get_entry(&self, rev: Revision) -> Option<IndexEntry> {
371 pub fn get_entry(&self, rev: Revision) -> Option<IndexEntry> {
346 if rev == NULL_REVISION {
372 if rev == NULL_REVISION {
347 return None;
373 return None;
348 }
374 }
349 Some(if let Some(offsets) = &self.offsets {
375 Some(if let Some(offsets) = &*self.get_offsets() {
350 self.get_entry_inline(rev, offsets)
376 self.get_entry_inline(rev, offsets.as_ref())
351 } else {
377 } else {
352 self.get_entry_separated(rev)
378 self.get_entry_separated(rev)
353 })
379 })
354 }
380 }
355
381
356 fn get_entry_inline(
382 fn get_entry_inline(
357 &self,
383 &self,
358 rev: Revision,
384 rev: Revision,
359 offsets: &[usize],
385 offsets: &[usize],
360 ) -> IndexEntry {
386 ) -> IndexEntry {
361 let start = offsets[rev.0 as usize];
387 let start = offsets[rev.0 as usize];
362 let end = start + INDEX_ENTRY_SIZE;
388 let end = start + INDEX_ENTRY_SIZE;
363 let bytes = &self.bytes[start..end];
389 let bytes = &self.bytes[start..end];
364
390
365 // See IndexEntry for an explanation of this override.
391 // See IndexEntry for an explanation of this override.
366 let offset_override = Some(end);
392 let offset_override = Some(end);
367
393
368 IndexEntry {
394 IndexEntry {
369 bytes,
395 bytes,
370 offset_override,
396 offset_override,
371 }
397 }
372 }
398 }
373
399
374 fn get_entry_separated(&self, rev: Revision) -> IndexEntry {
400 fn get_entry_separated(&self, rev: Revision) -> IndexEntry {
375 let start = rev.0 as usize * INDEX_ENTRY_SIZE;
401 let start = rev.0 as usize * INDEX_ENTRY_SIZE;
376 let end = start + INDEX_ENTRY_SIZE;
402 let end = start + INDEX_ENTRY_SIZE;
377 let bytes = &self.bytes[start..end];
403 let bytes = &self.bytes[start..end];
378
404
379 // Override the offset of the first revision as its bytes are used
405 // Override the offset of the first revision as its bytes are used
380 // for the index's metadata (saving space because it is always 0)
406 // for the index's metadata (saving space because it is always 0)
381 let offset_override = if rev == Revision(0) { Some(0) } else { None };
407 let offset_override = if rev == Revision(0) { Some(0) } else { None };
382
408
383 IndexEntry {
409 IndexEntry {
384 bytes,
410 bytes,
385 offset_override,
411 offset_override,
386 }
412 }
387 }
413 }
388
414
389 /// TODO move this to the trait probably, along with other things
415 /// TODO move this to the trait probably, along with other things
390 pub fn append(
416 pub fn append(
391 &mut self,
417 &mut self,
392 revision_data: RevisionDataParams,
418 revision_data: RevisionDataParams,
393 ) -> Result<(), RevlogError> {
419 ) -> Result<(), RevlogError> {
394 revision_data.validate()?;
420 revision_data.validate()?;
395 let new_offset = self.bytes.len();
421 let new_offset = self.bytes.len();
396 if let Some(offsets) = self.offsets.as_mut() {
422 if let Some(offsets) = &mut *self.get_offsets_mut() {
397 offsets.push(new_offset)
423 offsets.push(new_offset)
398 }
424 }
399 self.bytes.added.extend(revision_data.into_v1().as_bytes());
425 self.bytes.added.extend(revision_data.into_v1().as_bytes());
400 Ok(())
426 Ok(())
401 }
427 }
402
428
403 pub fn remove(&mut self, rev: Revision) -> Result<(), RevlogError> {
429 pub fn remove(&mut self, rev: Revision) -> Result<(), RevlogError> {
404 self.bytes.remove(rev, self.offsets.as_deref())?;
430 let offsets = self.get_offsets().clone();
405 if let Some(offsets) = self.offsets.as_mut() {
431 self.bytes.remove(rev, offsets.as_deref())?;
432 if let Some(offsets) = &mut *self.get_offsets_mut() {
406 offsets.truncate(rev.0 as usize)
433 offsets.truncate(rev.0 as usize)
407 }
434 }
408 Ok(())
435 Ok(())
409 }
436 }
437
438 pub fn clear_caches(&mut self) {
439 // We need to get the 'inline' value from Python at init and use this
440 // instead of offsets to determine whether we're inline since we might
441 // clear caches. This implies re-populating the offsets on-demand.
442 self.offsets = RwLock::new(None);
443 }
444 }
445
446 fn inline_scan(bytes: &[u8]) -> (usize, Vec<usize>) {
447 let mut offset: usize = 0;
448 let mut offsets = Vec::new();
449
450 while offset + INDEX_ENTRY_SIZE <= bytes.len() {
451 offsets.push(offset);
452 let end = offset + INDEX_ENTRY_SIZE;
453 let entry = IndexEntry {
454 bytes: &bytes[offset..end],
455 offset_override: None,
456 };
457
458 offset += INDEX_ENTRY_SIZE + entry.compressed_len() as usize;
459 }
460 (offset, offsets)
410 }
461 }
411
462
412 impl super::RevlogIndex for Index {
463 impl super::RevlogIndex for Index {
413 fn len(&self) -> usize {
464 fn len(&self) -> usize {
414 self.len()
465 self.len()
415 }
466 }
416
467
417 fn node(&self, rev: Revision) -> Option<&Node> {
468 fn node(&self, rev: Revision) -> Option<&Node> {
418 self.get_entry(rev).map(|entry| entry.hash())
469 self.get_entry(rev).map(|entry| entry.hash())
419 }
470 }
420 }
471 }
421
472
422 #[derive(Debug)]
473 #[derive(Debug)]
423 pub struct IndexEntry<'a> {
474 pub struct IndexEntry<'a> {
424 bytes: &'a [u8],
475 bytes: &'a [u8],
425 /// Allows to override the offset value of the entry.
476 /// Allows to override the offset value of the entry.
426 ///
477 ///
427 /// For interleaved index and data, the offset stored in the index
478 /// For interleaved index and data, the offset stored in the index
428 /// corresponds to the separated data offset.
479 /// corresponds to the separated data offset.
429 /// It has to be overridden with the actual offset in the interleaved
480 /// It has to be overridden with the actual offset in the interleaved
430 /// index which is just after the index block.
481 /// index which is just after the index block.
431 ///
482 ///
432 /// For separated index and data, the offset stored in the first index
483 /// For separated index and data, the offset stored in the first index
433 /// entry is mixed with the index headers.
484 /// entry is mixed with the index headers.
434 /// It has to be overridden with 0.
485 /// It has to be overridden with 0.
435 offset_override: Option<usize>,
486 offset_override: Option<usize>,
436 }
487 }
437
488
438 impl<'a> IndexEntry<'a> {
489 impl<'a> IndexEntry<'a> {
439 /// Return the offset of the data.
490 /// Return the offset of the data.
440 pub fn offset(&self) -> usize {
491 pub fn offset(&self) -> usize {
441 if let Some(offset_override) = self.offset_override {
492 if let Some(offset_override) = self.offset_override {
442 offset_override
493 offset_override
443 } else {
494 } else {
444 let mut bytes = [0; 8];
495 let mut bytes = [0; 8];
445 bytes[2..8].copy_from_slice(&self.bytes[0..=5]);
496 bytes[2..8].copy_from_slice(&self.bytes[0..=5]);
446 BigEndian::read_u64(&bytes[..]) as usize
497 BigEndian::read_u64(&bytes[..]) as usize
447 }
498 }
448 }
499 }
449
500
450 pub fn flags(&self) -> u16 {
501 pub fn flags(&self) -> u16 {
451 BigEndian::read_u16(&self.bytes[6..=7])
502 BigEndian::read_u16(&self.bytes[6..=7])
452 }
503 }
453
504
454 /// Return the compressed length of the data.
505 /// Return the compressed length of the data.
455 pub fn compressed_len(&self) -> u32 {
506 pub fn compressed_len(&self) -> u32 {
456 BigEndian::read_u32(&self.bytes[8..=11])
507 BigEndian::read_u32(&self.bytes[8..=11])
457 }
508 }
458
509
459 /// Return the uncompressed length of the data.
510 /// Return the uncompressed length of the data.
460 pub fn uncompressed_len(&self) -> i32 {
511 pub fn uncompressed_len(&self) -> i32 {
461 BigEndian::read_i32(&self.bytes[12..=15])
512 BigEndian::read_i32(&self.bytes[12..=15])
462 }
513 }
463
514
464 /// Return the revision upon which the data has been derived.
515 /// Return the revision upon which the data has been derived.
465 pub fn base_revision_or_base_of_delta_chain(&self) -> UncheckedRevision {
516 pub fn base_revision_or_base_of_delta_chain(&self) -> UncheckedRevision {
466 // TODO Maybe return an Option when base_revision == rev?
517 // TODO Maybe return an Option when base_revision == rev?
467 // Requires to add rev to IndexEntry
518 // Requires to add rev to IndexEntry
468
519
469 BigEndian::read_i32(&self.bytes[16..]).into()
520 BigEndian::read_i32(&self.bytes[16..]).into()
470 }
521 }
471
522
472 pub fn link_revision(&self) -> UncheckedRevision {
523 pub fn link_revision(&self) -> UncheckedRevision {
473 BigEndian::read_i32(&self.bytes[20..]).into()
524 BigEndian::read_i32(&self.bytes[20..]).into()
474 }
525 }
475
526
476 pub fn p1(&self) -> UncheckedRevision {
527 pub fn p1(&self) -> UncheckedRevision {
477 BigEndian::read_i32(&self.bytes[24..]).into()
528 BigEndian::read_i32(&self.bytes[24..]).into()
478 }
529 }
479
530
480 pub fn p2(&self) -> UncheckedRevision {
531 pub fn p2(&self) -> UncheckedRevision {
481 BigEndian::read_i32(&self.bytes[28..]).into()
532 BigEndian::read_i32(&self.bytes[28..]).into()
482 }
533 }
483
534
484 /// Return the hash of revision's full text.
535 /// Return the hash of revision's full text.
485 ///
536 ///
486 /// Currently, SHA-1 is used and only the first 20 bytes of this field
537 /// Currently, SHA-1 is used and only the first 20 bytes of this field
487 /// are used.
538 /// are used.
488 pub fn hash(&self) -> &'a Node {
539 pub fn hash(&self) -> &'a Node {
489 (&self.bytes[32..52]).try_into().unwrap()
540 (&self.bytes[32..52]).try_into().unwrap()
490 }
541 }
491 }
542 }
492
543
493 #[cfg(test)]
544 #[cfg(test)]
494 mod tests {
545 mod tests {
495 use super::*;
546 use super::*;
496 use crate::node::NULL_NODE;
547 use crate::node::NULL_NODE;
497
548
498 #[cfg(test)]
549 #[cfg(test)]
499 #[derive(Debug, Copy, Clone)]
550 #[derive(Debug, Copy, Clone)]
500 pub struct IndexEntryBuilder {
551 pub struct IndexEntryBuilder {
501 is_first: bool,
552 is_first: bool,
502 is_inline: bool,
553 is_inline: bool,
503 is_general_delta: bool,
554 is_general_delta: bool,
504 version: u16,
555 version: u16,
505 offset: usize,
556 offset: usize,
506 compressed_len: usize,
557 compressed_len: usize,
507 uncompressed_len: usize,
558 uncompressed_len: usize,
508 base_revision_or_base_of_delta_chain: Revision,
559 base_revision_or_base_of_delta_chain: Revision,
509 link_revision: Revision,
560 link_revision: Revision,
510 p1: Revision,
561 p1: Revision,
511 p2: Revision,
562 p2: Revision,
512 node: Node,
563 node: Node,
513 }
564 }
514
565
515 #[cfg(test)]
566 #[cfg(test)]
516 impl IndexEntryBuilder {
567 impl IndexEntryBuilder {
517 #[allow(clippy::new_without_default)]
568 #[allow(clippy::new_without_default)]
518 pub fn new() -> Self {
569 pub fn new() -> Self {
519 Self {
570 Self {
520 is_first: false,
571 is_first: false,
521 is_inline: false,
572 is_inline: false,
522 is_general_delta: true,
573 is_general_delta: true,
523 version: 1,
574 version: 1,
524 offset: 0,
575 offset: 0,
525 compressed_len: 0,
576 compressed_len: 0,
526 uncompressed_len: 0,
577 uncompressed_len: 0,
527 base_revision_or_base_of_delta_chain: Revision(0),
578 base_revision_or_base_of_delta_chain: Revision(0),
528 link_revision: Revision(0),
579 link_revision: Revision(0),
529 p1: NULL_REVISION,
580 p1: NULL_REVISION,
530 p2: NULL_REVISION,
581 p2: NULL_REVISION,
531 node: NULL_NODE,
582 node: NULL_NODE,
532 }
583 }
533 }
584 }
534
585
535 pub fn is_first(&mut self, value: bool) -> &mut Self {
586 pub fn is_first(&mut self, value: bool) -> &mut Self {
536 self.is_first = value;
587 self.is_first = value;
537 self
588 self
538 }
589 }
539
590
540 pub fn with_inline(&mut self, value: bool) -> &mut Self {
591 pub fn with_inline(&mut self, value: bool) -> &mut Self {
541 self.is_inline = value;
592 self.is_inline = value;
542 self
593 self
543 }
594 }
544
595
545 pub fn with_general_delta(&mut self, value: bool) -> &mut Self {
596 pub fn with_general_delta(&mut self, value: bool) -> &mut Self {
546 self.is_general_delta = value;
597 self.is_general_delta = value;
547 self
598 self
548 }
599 }
549
600
550 pub fn with_version(&mut self, value: u16) -> &mut Self {
601 pub fn with_version(&mut self, value: u16) -> &mut Self {
551 self.version = value;
602 self.version = value;
552 self
603 self
553 }
604 }
554
605
555 pub fn with_offset(&mut self, value: usize) -> &mut Self {
606 pub fn with_offset(&mut self, value: usize) -> &mut Self {
556 self.offset = value;
607 self.offset = value;
557 self
608 self
558 }
609 }
559
610
560 pub fn with_compressed_len(&mut self, value: usize) -> &mut Self {
611 pub fn with_compressed_len(&mut self, value: usize) -> &mut Self {
561 self.compressed_len = value;
612 self.compressed_len = value;
562 self
613 self
563 }
614 }
564
615
565 pub fn with_uncompressed_len(&mut self, value: usize) -> &mut Self {
616 pub fn with_uncompressed_len(&mut self, value: usize) -> &mut Self {
566 self.uncompressed_len = value;
617 self.uncompressed_len = value;
567 self
618 self
568 }
619 }
569
620
570 pub fn with_base_revision_or_base_of_delta_chain(
621 pub fn with_base_revision_or_base_of_delta_chain(
571 &mut self,
622 &mut self,
572 value: Revision,
623 value: Revision,
573 ) -> &mut Self {
624 ) -> &mut Self {
574 self.base_revision_or_base_of_delta_chain = value;
625 self.base_revision_or_base_of_delta_chain = value;
575 self
626 self
576 }
627 }
577
628
578 pub fn with_link_revision(&mut self, value: Revision) -> &mut Self {
629 pub fn with_link_revision(&mut self, value: Revision) -> &mut Self {
579 self.link_revision = value;
630 self.link_revision = value;
580 self
631 self
581 }
632 }
582
633
583 pub fn with_p1(&mut self, value: Revision) -> &mut Self {
634 pub fn with_p1(&mut self, value: Revision) -> &mut Self {
584 self.p1 = value;
635 self.p1 = value;
585 self
636 self
586 }
637 }
587
638
588 pub fn with_p2(&mut self, value: Revision) -> &mut Self {
639 pub fn with_p2(&mut self, value: Revision) -> &mut Self {
589 self.p2 = value;
640 self.p2 = value;
590 self
641 self
591 }
642 }
592
643
593 pub fn with_node(&mut self, value: Node) -> &mut Self {
644 pub fn with_node(&mut self, value: Node) -> &mut Self {
594 self.node = value;
645 self.node = value;
595 self
646 self
596 }
647 }
597
648
598 pub fn build(&self) -> Vec<u8> {
649 pub fn build(&self) -> Vec<u8> {
599 let mut bytes = Vec::with_capacity(INDEX_ENTRY_SIZE);
650 let mut bytes = Vec::with_capacity(INDEX_ENTRY_SIZE);
600 if self.is_first {
651 if self.is_first {
601 bytes.extend(&match (self.is_general_delta, self.is_inline) {
652 bytes.extend(&match (self.is_general_delta, self.is_inline) {
602 (false, false) => [0u8, 0],
653 (false, false) => [0u8, 0],
603 (false, true) => [0u8, 1],
654 (false, true) => [0u8, 1],
604 (true, false) => [0u8, 2],
655 (true, false) => [0u8, 2],
605 (true, true) => [0u8, 3],
656 (true, true) => [0u8, 3],
606 });
657 });
607 bytes.extend(&self.version.to_be_bytes());
658 bytes.extend(&self.version.to_be_bytes());
608 // Remaining offset bytes.
659 // Remaining offset bytes.
609 bytes.extend(&[0u8; 2]);
660 bytes.extend(&[0u8; 2]);
610 } else {
661 } else {
611 // Offset stored on 48 bits (6 bytes)
662 // Offset stored on 48 bits (6 bytes)
612 bytes.extend(&(self.offset as u64).to_be_bytes()[2..]);
663 bytes.extend(&(self.offset as u64).to_be_bytes()[2..]);
613 }
664 }
614 bytes.extend(&[0u8; 2]); // Revision flags.
665 bytes.extend(&[0u8; 2]); // Revision flags.
615 bytes.extend(&(self.compressed_len as u32).to_be_bytes());
666 bytes.extend(&(self.compressed_len as u32).to_be_bytes());
616 bytes.extend(&(self.uncompressed_len as u32).to_be_bytes());
667 bytes.extend(&(self.uncompressed_len as u32).to_be_bytes());
617 bytes.extend(
668 bytes.extend(
618 &self.base_revision_or_base_of_delta_chain.0.to_be_bytes(),
669 &self.base_revision_or_base_of_delta_chain.0.to_be_bytes(),
619 );
670 );
620 bytes.extend(&self.link_revision.0.to_be_bytes());
671 bytes.extend(&self.link_revision.0.to_be_bytes());
621 bytes.extend(&self.p1.0.to_be_bytes());
672 bytes.extend(&self.p1.0.to_be_bytes());
622 bytes.extend(&self.p2.0.to_be_bytes());
673 bytes.extend(&self.p2.0.to_be_bytes());
623 bytes.extend(self.node.as_bytes());
674 bytes.extend(self.node.as_bytes());
624 bytes.extend(vec![0u8; 12]);
675 bytes.extend(vec![0u8; 12]);
625 bytes
676 bytes
626 }
677 }
627 }
678 }
628
679
629 pub fn is_inline(index_bytes: &[u8]) -> bool {
680 pub fn is_inline(index_bytes: &[u8]) -> bool {
630 IndexHeader::parse(index_bytes)
681 IndexHeader::parse(index_bytes)
631 .expect("too short")
682 .expect("too short")
632 .unwrap()
683 .unwrap()
633 .format_flags()
684 .format_flags()
634 .is_inline()
685 .is_inline()
635 }
686 }
636
687
637 pub fn uses_generaldelta(index_bytes: &[u8]) -> bool {
688 pub fn uses_generaldelta(index_bytes: &[u8]) -> bool {
638 IndexHeader::parse(index_bytes)
689 IndexHeader::parse(index_bytes)
639 .expect("too short")
690 .expect("too short")
640 .unwrap()
691 .unwrap()
641 .format_flags()
692 .format_flags()
642 .uses_generaldelta()
693 .uses_generaldelta()
643 }
694 }
644
695
645 pub fn get_version(index_bytes: &[u8]) -> u16 {
696 pub fn get_version(index_bytes: &[u8]) -> u16 {
646 IndexHeader::parse(index_bytes)
697 IndexHeader::parse(index_bytes)
647 .expect("too short")
698 .expect("too short")
648 .unwrap()
699 .unwrap()
649 .format_version()
700 .format_version()
650 }
701 }
651
702
652 #[test]
703 #[test]
653 fn flags_when_no_inline_flag_test() {
704 fn flags_when_no_inline_flag_test() {
654 let bytes = IndexEntryBuilder::new()
705 let bytes = IndexEntryBuilder::new()
655 .is_first(true)
706 .is_first(true)
656 .with_general_delta(false)
707 .with_general_delta(false)
657 .with_inline(false)
708 .with_inline(false)
658 .build();
709 .build();
659
710
660 assert!(!is_inline(&bytes));
711 assert!(!is_inline(&bytes));
661 assert!(!uses_generaldelta(&bytes));
712 assert!(!uses_generaldelta(&bytes));
662 }
713 }
663
714
664 #[test]
715 #[test]
665 fn flags_when_inline_flag_test() {
716 fn flags_when_inline_flag_test() {
666 let bytes = IndexEntryBuilder::new()
717 let bytes = IndexEntryBuilder::new()
667 .is_first(true)
718 .is_first(true)
668 .with_general_delta(false)
719 .with_general_delta(false)
669 .with_inline(true)
720 .with_inline(true)
670 .build();
721 .build();
671
722
672 assert!(is_inline(&bytes));
723 assert!(is_inline(&bytes));
673 assert!(!uses_generaldelta(&bytes));
724 assert!(!uses_generaldelta(&bytes));
674 }
725 }
675
726
676 #[test]
727 #[test]
677 fn flags_when_inline_and_generaldelta_flags_test() {
728 fn flags_when_inline_and_generaldelta_flags_test() {
678 let bytes = IndexEntryBuilder::new()
729 let bytes = IndexEntryBuilder::new()
679 .is_first(true)
730 .is_first(true)
680 .with_general_delta(true)
731 .with_general_delta(true)
681 .with_inline(true)
732 .with_inline(true)
682 .build();
733 .build();
683
734
684 assert!(is_inline(&bytes));
735 assert!(is_inline(&bytes));
685 assert!(uses_generaldelta(&bytes));
736 assert!(uses_generaldelta(&bytes));
686 }
737 }
687
738
688 #[test]
739 #[test]
689 fn test_offset() {
740 fn test_offset() {
690 let bytes = IndexEntryBuilder::new().with_offset(1).build();
741 let bytes = IndexEntryBuilder::new().with_offset(1).build();
691 let entry = IndexEntry {
742 let entry = IndexEntry {
692 bytes: &bytes,
743 bytes: &bytes,
693 offset_override: None,
744 offset_override: None,
694 };
745 };
695
746
696 assert_eq!(entry.offset(), 1)
747 assert_eq!(entry.offset(), 1)
697 }
748 }
698
749
699 #[test]
750 #[test]
700 fn test_with_overridden_offset() {
751 fn test_with_overridden_offset() {
701 let bytes = IndexEntryBuilder::new().with_offset(1).build();
752 let bytes = IndexEntryBuilder::new().with_offset(1).build();
702 let entry = IndexEntry {
753 let entry = IndexEntry {
703 bytes: &bytes,
754 bytes: &bytes,
704 offset_override: Some(2),
755 offset_override: Some(2),
705 };
756 };
706
757
707 assert_eq!(entry.offset(), 2)
758 assert_eq!(entry.offset(), 2)
708 }
759 }
709
760
710 #[test]
761 #[test]
711 fn test_compressed_len() {
762 fn test_compressed_len() {
712 let bytes = IndexEntryBuilder::new().with_compressed_len(1).build();
763 let bytes = IndexEntryBuilder::new().with_compressed_len(1).build();
713 let entry = IndexEntry {
764 let entry = IndexEntry {
714 bytes: &bytes,
765 bytes: &bytes,
715 offset_override: None,
766 offset_override: None,
716 };
767 };
717
768
718 assert_eq!(entry.compressed_len(), 1)
769 assert_eq!(entry.compressed_len(), 1)
719 }
770 }
720
771
721 #[test]
772 #[test]
722 fn test_uncompressed_len() {
773 fn test_uncompressed_len() {
723 let bytes = IndexEntryBuilder::new().with_uncompressed_len(1).build();
774 let bytes = IndexEntryBuilder::new().with_uncompressed_len(1).build();
724 let entry = IndexEntry {
775 let entry = IndexEntry {
725 bytes: &bytes,
776 bytes: &bytes,
726 offset_override: None,
777 offset_override: None,
727 };
778 };
728
779
729 assert_eq!(entry.uncompressed_len(), 1)
780 assert_eq!(entry.uncompressed_len(), 1)
730 }
781 }
731
782
732 #[test]
783 #[test]
733 fn test_base_revision_or_base_of_delta_chain() {
784 fn test_base_revision_or_base_of_delta_chain() {
734 let bytes = IndexEntryBuilder::new()
785 let bytes = IndexEntryBuilder::new()
735 .with_base_revision_or_base_of_delta_chain(Revision(1))
786 .with_base_revision_or_base_of_delta_chain(Revision(1))
736 .build();
787 .build();
737 let entry = IndexEntry {
788 let entry = IndexEntry {
738 bytes: &bytes,
789 bytes: &bytes,
739 offset_override: None,
790 offset_override: None,
740 };
791 };
741
792
742 assert_eq!(entry.base_revision_or_base_of_delta_chain(), 1.into())
793 assert_eq!(entry.base_revision_or_base_of_delta_chain(), 1.into())
743 }
794 }
744
795
745 #[test]
796 #[test]
746 fn link_revision_test() {
797 fn link_revision_test() {
747 let bytes = IndexEntryBuilder::new()
798 let bytes = IndexEntryBuilder::new()
748 .with_link_revision(Revision(123))
799 .with_link_revision(Revision(123))
749 .build();
800 .build();
750
801
751 let entry = IndexEntry {
802 let entry = IndexEntry {
752 bytes: &bytes,
803 bytes: &bytes,
753 offset_override: None,
804 offset_override: None,
754 };
805 };
755
806
756 assert_eq!(entry.link_revision(), 123.into());
807 assert_eq!(entry.link_revision(), 123.into());
757 }
808 }
758
809
759 #[test]
810 #[test]
760 fn p1_test() {
811 fn p1_test() {
761 let bytes = IndexEntryBuilder::new().with_p1(Revision(123)).build();
812 let bytes = IndexEntryBuilder::new().with_p1(Revision(123)).build();
762
813
763 let entry = IndexEntry {
814 let entry = IndexEntry {
764 bytes: &bytes,
815 bytes: &bytes,
765 offset_override: None,
816 offset_override: None,
766 };
817 };
767
818
768 assert_eq!(entry.p1(), 123.into());
819 assert_eq!(entry.p1(), 123.into());
769 }
820 }
770
821
771 #[test]
822 #[test]
772 fn p2_test() {
823 fn p2_test() {
773 let bytes = IndexEntryBuilder::new().with_p2(Revision(123)).build();
824 let bytes = IndexEntryBuilder::new().with_p2(Revision(123)).build();
774
825
775 let entry = IndexEntry {
826 let entry = IndexEntry {
776 bytes: &bytes,
827 bytes: &bytes,
777 offset_override: None,
828 offset_override: None,
778 };
829 };
779
830
780 assert_eq!(entry.p2(), 123.into());
831 assert_eq!(entry.p2(), 123.into());
781 }
832 }
782
833
783 #[test]
834 #[test]
784 fn node_test() {
835 fn node_test() {
785 let node = Node::from_hex("0123456789012345678901234567890123456789")
836 let node = Node::from_hex("0123456789012345678901234567890123456789")
786 .unwrap();
837 .unwrap();
787 let bytes = IndexEntryBuilder::new().with_node(node).build();
838 let bytes = IndexEntryBuilder::new().with_node(node).build();
788
839
789 let entry = IndexEntry {
840 let entry = IndexEntry {
790 bytes: &bytes,
841 bytes: &bytes,
791 offset_override: None,
842 offset_override: None,
792 };
843 };
793
844
794 assert_eq!(*entry.hash(), node);
845 assert_eq!(*entry.hash(), node);
795 }
846 }
796
847
797 #[test]
848 #[test]
798 fn version_test() {
849 fn version_test() {
799 let bytes = IndexEntryBuilder::new()
850 let bytes = IndexEntryBuilder::new()
800 .is_first(true)
851 .is_first(true)
801 .with_version(2)
852 .with_version(2)
802 .build();
853 .build();
803
854
804 assert_eq!(get_version(&bytes), 2)
855 assert_eq!(get_version(&bytes), 2)
805 }
856 }
806 }
857 }
807
858
808 #[cfg(test)]
859 #[cfg(test)]
809 pub use tests::IndexEntryBuilder;
860 pub use tests::IndexEntryBuilder;
@@ -1,633 +1,634 b''
1 // revlog.rs
1 // revlog.rs
2 //
2 //
3 // Copyright 2019-2020 Georges Racinet <georges.racinet@octobus.net>
3 // Copyright 2019-2020 Georges Racinet <georges.racinet@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 use crate::{
8 use crate::{
9 cindex,
9 cindex,
10 utils::{node_from_py_bytes, node_from_py_object},
10 utils::{node_from_py_bytes, node_from_py_object},
11 PyRevision,
11 PyRevision,
12 };
12 };
13 use cpython::{
13 use cpython::{
14 buffer::{Element, PyBuffer},
14 buffer::{Element, PyBuffer},
15 exc::{IndexError, ValueError},
15 exc::{IndexError, ValueError},
16 ObjectProtocol, PyBytes, PyClone, PyDict, PyErr, PyInt, PyModule,
16 ObjectProtocol, PyBytes, PyClone, PyDict, PyErr, PyInt, PyModule,
17 PyObject, PyResult, PyString, PyTuple, Python, PythonObject, ToPyObject,
17 PyObject, PyResult, PyString, PyTuple, Python, PythonObject, ToPyObject,
18 };
18 };
19 use hg::{
19 use hg::{
20 index::IndexHeader,
20 index::IndexHeader,
21 index::{RevisionDataParams, COMPRESSION_MODE_INLINE},
21 index::{RevisionDataParams, COMPRESSION_MODE_INLINE},
22 nodemap::{Block, NodeMapError, NodeTree},
22 nodemap::{Block, NodeMapError, NodeTree},
23 revlog::{nodemap::NodeMap, NodePrefix, RevlogIndex},
23 revlog::{nodemap::NodeMap, NodePrefix, RevlogIndex},
24 BaseRevision, Revision, UncheckedRevision,
24 BaseRevision, Revision, UncheckedRevision,
25 };
25 };
26 use std::cell::RefCell;
26 use std::cell::RefCell;
27
27
28 /// Return a Struct implementing the Graph trait
28 /// Return a Struct implementing the Graph trait
29 pub(crate) fn pyindex_to_graph(
29 pub(crate) fn pyindex_to_graph(
30 py: Python,
30 py: Python,
31 index: PyObject,
31 index: PyObject,
32 ) -> PyResult<cindex::Index> {
32 ) -> PyResult<cindex::Index> {
33 match index.extract::<MixedIndex>(py) {
33 match index.extract::<MixedIndex>(py) {
34 Ok(midx) => Ok(midx.clone_cindex(py)),
34 Ok(midx) => Ok(midx.clone_cindex(py)),
35 Err(_) => cindex::Index::new(py, index),
35 Err(_) => cindex::Index::new(py, index),
36 }
36 }
37 }
37 }
38
38
39 py_class!(pub class MixedIndex |py| {
39 py_class!(pub class MixedIndex |py| {
40 data cindex: RefCell<cindex::Index>;
40 data cindex: RefCell<cindex::Index>;
41 data index: RefCell<hg::index::Index>;
41 data index: RefCell<hg::index::Index>;
42 data nt: RefCell<Option<NodeTree>>;
42 data nt: RefCell<Option<NodeTree>>;
43 data docket: RefCell<Option<PyObject>>;
43 data docket: RefCell<Option<PyObject>>;
44 // Holds a reference to the mmap'ed persistent nodemap data
44 // Holds a reference to the mmap'ed persistent nodemap data
45 data nodemap_mmap: RefCell<Option<PyBuffer>>;
45 data nodemap_mmap: RefCell<Option<PyBuffer>>;
46 // Holds a reference to the mmap'ed persistent index data
46 // Holds a reference to the mmap'ed persistent index data
47 data index_mmap: RefCell<Option<PyBuffer>>;
47 data index_mmap: RefCell<Option<PyBuffer>>;
48
48
49 def __new__(
49 def __new__(
50 _cls,
50 _cls,
51 cindex: PyObject,
51 cindex: PyObject,
52 data: PyObject,
52 data: PyObject,
53 default_header: u32,
53 default_header: u32,
54 ) -> PyResult<MixedIndex> {
54 ) -> PyResult<MixedIndex> {
55 Self::new(py, cindex, data, default_header)
55 Self::new(py, cindex, data, default_header)
56 }
56 }
57
57
58 /// Compatibility layer used for Python consumers needing access to the C index
58 /// Compatibility layer used for Python consumers needing access to the C index
59 ///
59 ///
60 /// Only use case so far is `scmutil.shortesthexnodeidprefix`,
60 /// Only use case so far is `scmutil.shortesthexnodeidprefix`,
61 /// that may need to build a custom `nodetree`, based on a specified revset.
61 /// that may need to build a custom `nodetree`, based on a specified revset.
62 /// With a Rust implementation of the nodemap, we will be able to get rid of
62 /// With a Rust implementation of the nodemap, we will be able to get rid of
63 /// this, by exposing our own standalone nodemap class,
63 /// this, by exposing our own standalone nodemap class,
64 /// ready to accept `MixedIndex`.
64 /// ready to accept `MixedIndex`.
65 def get_cindex(&self) -> PyResult<PyObject> {
65 def get_cindex(&self) -> PyResult<PyObject> {
66 Ok(self.cindex(py).borrow().inner().clone_ref(py))
66 Ok(self.cindex(py).borrow().inner().clone_ref(py))
67 }
67 }
68
68
69 // Index API involving nodemap, as defined in mercurial/pure/parsers.py
69 // Index API involving nodemap, as defined in mercurial/pure/parsers.py
70
70
71 /// Return Revision if found, raises a bare `error.RevlogError`
71 /// Return Revision if found, raises a bare `error.RevlogError`
72 /// in case of ambiguity, same as C version does
72 /// in case of ambiguity, same as C version does
73 def get_rev(&self, node: PyBytes) -> PyResult<Option<PyRevision>> {
73 def get_rev(&self, node: PyBytes) -> PyResult<Option<PyRevision>> {
74 let opt = self.get_nodetree(py)?.borrow();
74 let opt = self.get_nodetree(py)?.borrow();
75 let nt = opt.as_ref().unwrap();
75 let nt = opt.as_ref().unwrap();
76 let idx = &*self.cindex(py).borrow();
76 let idx = &*self.cindex(py).borrow();
77 let ridx = &*self.index(py).borrow();
77 let ridx = &*self.index(py).borrow();
78 let node = node_from_py_bytes(py, &node)?;
78 let node = node_from_py_bytes(py, &node)?;
79 let rust_rev =
79 let rust_rev =
80 nt.find_bin(ridx, node.into()).map_err(|e| nodemap_error(py, e))?;
80 nt.find_bin(ridx, node.into()).map_err(|e| nodemap_error(py, e))?;
81 let c_rev =
81 let c_rev =
82 nt.find_bin(idx, node.into()).map_err(|e| nodemap_error(py, e))?;
82 nt.find_bin(idx, node.into()).map_err(|e| nodemap_error(py, e))?;
83 assert_eq!(rust_rev, c_rev);
83 assert_eq!(rust_rev, c_rev);
84 Ok(rust_rev.map(Into::into))
84 Ok(rust_rev.map(Into::into))
85
85
86 }
86 }
87
87
88 /// same as `get_rev()` but raises a bare `error.RevlogError` if node
88 /// same as `get_rev()` but raises a bare `error.RevlogError` if node
89 /// is not found.
89 /// is not found.
90 ///
90 ///
91 /// No need to repeat `node` in the exception, `mercurial/revlog.py`
91 /// No need to repeat `node` in the exception, `mercurial/revlog.py`
92 /// will catch and rewrap with it
92 /// will catch and rewrap with it
93 def rev(&self, node: PyBytes) -> PyResult<PyRevision> {
93 def rev(&self, node: PyBytes) -> PyResult<PyRevision> {
94 self.get_rev(py, node)?.ok_or_else(|| revlog_error(py))
94 self.get_rev(py, node)?.ok_or_else(|| revlog_error(py))
95 }
95 }
96
96
97 /// return True if the node exist in the index
97 /// return True if the node exist in the index
98 def has_node(&self, node: PyBytes) -> PyResult<bool> {
98 def has_node(&self, node: PyBytes) -> PyResult<bool> {
99 self.get_rev(py, node).map(|opt| opt.is_some())
99 self.get_rev(py, node).map(|opt| opt.is_some())
100 }
100 }
101
101
102 /// find length of shortest hex nodeid of a binary ID
102 /// find length of shortest hex nodeid of a binary ID
103 def shortest(&self, node: PyBytes) -> PyResult<usize> {
103 def shortest(&self, node: PyBytes) -> PyResult<usize> {
104 let opt = self.get_nodetree(py)?.borrow();
104 let opt = self.get_nodetree(py)?.borrow();
105 let nt = opt.as_ref().unwrap();
105 let nt = opt.as_ref().unwrap();
106 let idx = &*self.cindex(py).borrow();
106 let idx = &*self.cindex(py).borrow();
107 match nt.unique_prefix_len_node(idx, &node_from_py_bytes(py, &node)?)
107 match nt.unique_prefix_len_node(idx, &node_from_py_bytes(py, &node)?)
108 {
108 {
109 Ok(Some(l)) => Ok(l),
109 Ok(Some(l)) => Ok(l),
110 Ok(None) => Err(revlog_error(py)),
110 Ok(None) => Err(revlog_error(py)),
111 Err(e) => Err(nodemap_error(py, e)),
111 Err(e) => Err(nodemap_error(py, e)),
112 }
112 }
113 }
113 }
114
114
115 def partialmatch(&self, node: PyObject) -> PyResult<Option<PyBytes>> {
115 def partialmatch(&self, node: PyObject) -> PyResult<Option<PyBytes>> {
116 let opt = self.get_nodetree(py)?.borrow();
116 let opt = self.get_nodetree(py)?.borrow();
117 let nt = opt.as_ref().unwrap();
117 let nt = opt.as_ref().unwrap();
118 let idx = &*self.cindex(py).borrow();
118 let idx = &*self.cindex(py).borrow();
119
119
120 let node_as_string = if cfg!(feature = "python3-sys") {
120 let node_as_string = if cfg!(feature = "python3-sys") {
121 node.cast_as::<PyString>(py)?.to_string(py)?.to_string()
121 node.cast_as::<PyString>(py)?.to_string(py)?.to_string()
122 }
122 }
123 else {
123 else {
124 let node = node.extract::<PyBytes>(py)?;
124 let node = node.extract::<PyBytes>(py)?;
125 String::from_utf8_lossy(node.data(py)).to_string()
125 String::from_utf8_lossy(node.data(py)).to_string()
126 };
126 };
127
127
128 let prefix = NodePrefix::from_hex(&node_as_string)
128 let prefix = NodePrefix::from_hex(&node_as_string)
129 .map_err(|_| PyErr::new::<ValueError, _>(
129 .map_err(|_| PyErr::new::<ValueError, _>(
130 py, format!("Invalid node or prefix '{}'", node_as_string))
130 py, format!("Invalid node or prefix '{}'", node_as_string))
131 )?;
131 )?;
132
132
133 nt.find_bin(idx, prefix)
133 nt.find_bin(idx, prefix)
134 // TODO make an inner API returning the node directly
134 // TODO make an inner API returning the node directly
135 .map(|opt| opt.map(
135 .map(|opt| opt.map(
136 |rev| PyBytes::new(py, idx.node(rev).unwrap().as_bytes())))
136 |rev| PyBytes::new(py, idx.node(rev).unwrap().as_bytes())))
137 .map_err(|e| nodemap_error(py, e))
137 .map_err(|e| nodemap_error(py, e))
138
138
139 }
139 }
140
140
141 /// append an index entry
141 /// append an index entry
142 def append(&self, tup: PyTuple) -> PyResult<PyObject> {
142 def append(&self, tup: PyTuple) -> PyResult<PyObject> {
143 if tup.len(py) < 8 {
143 if tup.len(py) < 8 {
144 // this is better than the panic promised by tup.get_item()
144 // this is better than the panic promised by tup.get_item()
145 return Err(
145 return Err(
146 PyErr::new::<IndexError, _>(py, "tuple index out of range"))
146 PyErr::new::<IndexError, _>(py, "tuple index out of range"))
147 }
147 }
148 let node_bytes = tup.get_item(py, 7).extract(py)?;
148 let node_bytes = tup.get_item(py, 7).extract(py)?;
149 let node = node_from_py_object(py, &node_bytes)?;
149 let node = node_from_py_object(py, &node_bytes)?;
150
150
151 let rev = self.len(py)? as BaseRevision;
151 let rev = self.len(py)? as BaseRevision;
152 let mut idx = self.cindex(py).borrow_mut();
152 let mut idx = self.cindex(py).borrow_mut();
153
153
154 // This is ok since we will just add the revision to the index
154 // This is ok since we will just add the revision to the index
155 let rev = Revision(rev);
155 let rev = Revision(rev);
156 idx.append(py, tup.clone_ref(py))?;
156 idx.append(py, tup.clone_ref(py))?;
157 self.index(py)
157 self.index(py)
158 .borrow_mut()
158 .borrow_mut()
159 .append(py_tuple_to_revision_data_params(py, tup)?)
159 .append(py_tuple_to_revision_data_params(py, tup)?)
160 .unwrap();
160 .unwrap();
161 self.get_nodetree(py)?.borrow_mut().as_mut().unwrap()
161 self.get_nodetree(py)?.borrow_mut().as_mut().unwrap()
162 .insert(&*idx, &node, rev)
162 .insert(&*idx, &node, rev)
163 .map_err(|e| nodemap_error(py, e))?;
163 .map_err(|e| nodemap_error(py, e))?;
164 Ok(py.None())
164 Ok(py.None())
165 }
165 }
166
166
167 def __delitem__(&self, key: PyObject) -> PyResult<()> {
167 def __delitem__(&self, key: PyObject) -> PyResult<()> {
168 // __delitem__ is both for `del idx[r]` and `del idx[r1:r2]`
168 // __delitem__ is both for `del idx[r]` and `del idx[r1:r2]`
169 self.cindex(py).borrow().inner().del_item(py, &key)?;
169 self.cindex(py).borrow().inner().del_item(py, &key)?;
170 let start = key.getattr(py, "start")?;
170 let start = key.getattr(py, "start")?;
171 let start = UncheckedRevision(start.extract(py)?);
171 let start = UncheckedRevision(start.extract(py)?);
172 let start = self.index(py)
172 let start = self.index(py)
173 .borrow()
173 .borrow()
174 .check_revision(start)
174 .check_revision(start)
175 .ok_or_else(|| {
175 .ok_or_else(|| {
176 nodemap_error(py, NodeMapError::RevisionNotInIndex(start))
176 nodemap_error(py, NodeMapError::RevisionNotInIndex(start))
177 })?;
177 })?;
178 self.index(py).borrow_mut().remove(start).unwrap();
178 self.index(py).borrow_mut().remove(start).unwrap();
179 let mut opt = self.get_nodetree(py)?.borrow_mut();
179 let mut opt = self.get_nodetree(py)?.borrow_mut();
180 let nt = opt.as_mut().unwrap();
180 let nt = opt.as_mut().unwrap();
181 nt.invalidate_all();
181 nt.invalidate_all();
182 self.fill_nodemap(py, nt)?;
182 self.fill_nodemap(py, nt)?;
183 Ok(())
183 Ok(())
184 }
184 }
185
185
186 //
186 //
187 // Reforwarded C index API
187 // Reforwarded C index API
188 //
188 //
189
189
190 // index_methods (tp_methods). Same ordering as in revlog.c
190 // index_methods (tp_methods). Same ordering as in revlog.c
191
191
192 /// return the gca set of the given revs
192 /// return the gca set of the given revs
193 def ancestors(&self, *args, **kw) -> PyResult<PyObject> {
193 def ancestors(&self, *args, **kw) -> PyResult<PyObject> {
194 self.call_cindex(py, "ancestors", args, kw)
194 self.call_cindex(py, "ancestors", args, kw)
195 }
195 }
196
196
197 /// return the heads of the common ancestors of the given revs
197 /// return the heads of the common ancestors of the given revs
198 def commonancestorsheads(&self, *args, **kw) -> PyResult<PyObject> {
198 def commonancestorsheads(&self, *args, **kw) -> PyResult<PyObject> {
199 self.call_cindex(py, "commonancestorsheads", args, kw)
199 self.call_cindex(py, "commonancestorsheads", args, kw)
200 }
200 }
201
201
202 /// Clear the index caches and inner py_class data.
202 /// Clear the index caches and inner py_class data.
203 /// It is Python's responsibility to call `update_nodemap_data` again.
203 /// It is Python's responsibility to call `update_nodemap_data` again.
204 def clearcaches(&self, *args, **kw) -> PyResult<PyObject> {
204 def clearcaches(&self, *args, **kw) -> PyResult<PyObject> {
205 self.nt(py).borrow_mut().take();
205 self.nt(py).borrow_mut().take();
206 self.docket(py).borrow_mut().take();
206 self.docket(py).borrow_mut().take();
207 self.nodemap_mmap(py).borrow_mut().take();
207 self.nodemap_mmap(py).borrow_mut().take();
208 self.index(py).borrow_mut().clear_caches();
208 self.call_cindex(py, "clearcaches", args, kw)
209 self.call_cindex(py, "clearcaches", args, kw)
209 }
210 }
210
211
211 /// return the raw binary string representing a revision
212 /// return the raw binary string representing a revision
212 def entry_binary(&self, *args, **kw) -> PyResult<PyObject> {
213 def entry_binary(&self, *args, **kw) -> PyResult<PyObject> {
213 self.call_cindex(py, "entry_binary", args, kw)
214 self.call_cindex(py, "entry_binary", args, kw)
214 }
215 }
215
216
216 /// return a binary packed version of the header
217 /// return a binary packed version of the header
217 def pack_header(&self, *args, **kw) -> PyResult<PyObject> {
218 def pack_header(&self, *args, **kw) -> PyResult<PyObject> {
218 self.call_cindex(py, "pack_header", args, kw)
219 self.call_cindex(py, "pack_header", args, kw)
219 }
220 }
220
221
221 /// get an index entry
222 /// get an index entry
222 def get(&self, *args, **kw) -> PyResult<PyObject> {
223 def get(&self, *args, **kw) -> PyResult<PyObject> {
223 self.call_cindex(py, "get", args, kw)
224 self.call_cindex(py, "get", args, kw)
224 }
225 }
225
226
226 /// compute phases
227 /// compute phases
227 def computephasesmapsets(&self, *args, **kw) -> PyResult<PyObject> {
228 def computephasesmapsets(&self, *args, **kw) -> PyResult<PyObject> {
228 self.call_cindex(py, "computephasesmapsets", args, kw)
229 self.call_cindex(py, "computephasesmapsets", args, kw)
229 }
230 }
230
231
231 /// reachableroots
232 /// reachableroots
232 def reachableroots2(&self, *args, **kw) -> PyResult<PyObject> {
233 def reachableroots2(&self, *args, **kw) -> PyResult<PyObject> {
233 self.call_cindex(py, "reachableroots2", args, kw)
234 self.call_cindex(py, "reachableroots2", args, kw)
234 }
235 }
235
236
236 /// get head revisions
237 /// get head revisions
237 def headrevs(&self, *args, **kw) -> PyResult<PyObject> {
238 def headrevs(&self, *args, **kw) -> PyResult<PyObject> {
238 self.call_cindex(py, "headrevs", args, kw)
239 self.call_cindex(py, "headrevs", args, kw)
239 }
240 }
240
241
241 /// get filtered head revisions
242 /// get filtered head revisions
242 def headrevsfiltered(&self, *args, **kw) -> PyResult<PyObject> {
243 def headrevsfiltered(&self, *args, **kw) -> PyResult<PyObject> {
243 self.call_cindex(py, "headrevsfiltered", args, kw)
244 self.call_cindex(py, "headrevsfiltered", args, kw)
244 }
245 }
245
246
246 /// True if the object is a snapshot
247 /// True if the object is a snapshot
247 def issnapshot(&self, *args, **kw) -> PyResult<PyObject> {
248 def issnapshot(&self, *args, **kw) -> PyResult<PyObject> {
248 self.call_cindex(py, "issnapshot", args, kw)
249 self.call_cindex(py, "issnapshot", args, kw)
249 }
250 }
250
251
251 /// Gather snapshot data in a cache dict
252 /// Gather snapshot data in a cache dict
252 def findsnapshots(&self, *args, **kw) -> PyResult<PyObject> {
253 def findsnapshots(&self, *args, **kw) -> PyResult<PyObject> {
253 self.call_cindex(py, "findsnapshots", args, kw)
254 self.call_cindex(py, "findsnapshots", args, kw)
254 }
255 }
255
256
256 /// determine revisions with deltas to reconstruct fulltext
257 /// determine revisions with deltas to reconstruct fulltext
257 def deltachain(&self, *args, **kw) -> PyResult<PyObject> {
258 def deltachain(&self, *args, **kw) -> PyResult<PyObject> {
258 self.call_cindex(py, "deltachain", args, kw)
259 self.call_cindex(py, "deltachain", args, kw)
259 }
260 }
260
261
261 /// slice planned chunk read to reach a density threshold
262 /// slice planned chunk read to reach a density threshold
262 def slicechunktodensity(&self, *args, **kw) -> PyResult<PyObject> {
263 def slicechunktodensity(&self, *args, **kw) -> PyResult<PyObject> {
263 self.call_cindex(py, "slicechunktodensity", args, kw)
264 self.call_cindex(py, "slicechunktodensity", args, kw)
264 }
265 }
265
266
266 /// stats for the index
267 /// stats for the index
267 def stats(&self, *args, **kw) -> PyResult<PyObject> {
268 def stats(&self, *args, **kw) -> PyResult<PyObject> {
268 self.call_cindex(py, "stats", args, kw)
269 self.call_cindex(py, "stats", args, kw)
269 }
270 }
270
271
271 // index_sequence_methods and index_mapping_methods.
272 // index_sequence_methods and index_mapping_methods.
272 //
273 //
273 // Since we call back through the high level Python API,
274 // Since we call back through the high level Python API,
274 // there's no point making a distinction between index_get
275 // there's no point making a distinction between index_get
275 // and index_getitem.
276 // and index_getitem.
276
277
277 def __len__(&self) -> PyResult<usize> {
278 def __len__(&self) -> PyResult<usize> {
278 self.len(py)
279 self.len(py)
279 }
280 }
280
281
281 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
282 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
282 // this conversion seems needless, but that's actually because
283 // this conversion seems needless, but that's actually because
283 // `index_getitem` does not handle conversion from PyLong,
284 // `index_getitem` does not handle conversion from PyLong,
284 // which expressions such as [e for e in index] internally use.
285 // which expressions such as [e for e in index] internally use.
285 // Note that we don't seem to have a direct way to call
286 // Note that we don't seem to have a direct way to call
286 // PySequence_GetItem (does the job), which would possibly be better
287 // PySequence_GetItem (does the job), which would possibly be better
287 // for performance
288 // for performance
288 let key = match key.extract::<i32>(py) {
289 let key = match key.extract::<i32>(py) {
289 Ok(rev) => rev.to_py_object(py).into_object(),
290 Ok(rev) => rev.to_py_object(py).into_object(),
290 Err(_) => key,
291 Err(_) => key,
291 };
292 };
292 self.cindex(py).borrow().inner().get_item(py, key)
293 self.cindex(py).borrow().inner().get_item(py, key)
293 }
294 }
294
295
295 def __contains__(&self, item: PyObject) -> PyResult<bool> {
296 def __contains__(&self, item: PyObject) -> PyResult<bool> {
296 // ObjectProtocol does not seem to provide contains(), so
297 // ObjectProtocol does not seem to provide contains(), so
297 // this is an equivalent implementation of the index_contains()
298 // this is an equivalent implementation of the index_contains()
298 // defined in revlog.c
299 // defined in revlog.c
299 let cindex = self.cindex(py).borrow();
300 let cindex = self.cindex(py).borrow();
300 match item.extract::<i32>(py) {
301 match item.extract::<i32>(py) {
301 Ok(rev) => {
302 Ok(rev) => {
302 Ok(rev >= -1 && rev < self.len(py)? as BaseRevision)
303 Ok(rev >= -1 && rev < self.len(py)? as BaseRevision)
303 }
304 }
304 Err(_) => {
305 Err(_) => {
305 cindex.inner().call_method(
306 cindex.inner().call_method(
306 py,
307 py,
307 "has_node",
308 "has_node",
308 PyTuple::new(py, &[item]),
309 PyTuple::new(py, &[item]),
309 None)?
310 None)?
310 .extract(py)
311 .extract(py)
311 }
312 }
312 }
313 }
313 }
314 }
314
315
315 def nodemap_data_all(&self) -> PyResult<PyBytes> {
316 def nodemap_data_all(&self) -> PyResult<PyBytes> {
316 self.inner_nodemap_data_all(py)
317 self.inner_nodemap_data_all(py)
317 }
318 }
318
319
319 def nodemap_data_incremental(&self) -> PyResult<PyObject> {
320 def nodemap_data_incremental(&self) -> PyResult<PyObject> {
320 self.inner_nodemap_data_incremental(py)
321 self.inner_nodemap_data_incremental(py)
321 }
322 }
322 def update_nodemap_data(
323 def update_nodemap_data(
323 &self,
324 &self,
324 docket: PyObject,
325 docket: PyObject,
325 nm_data: PyObject
326 nm_data: PyObject
326 ) -> PyResult<PyObject> {
327 ) -> PyResult<PyObject> {
327 self.inner_update_nodemap_data(py, docket, nm_data)
328 self.inner_update_nodemap_data(py, docket, nm_data)
328 }
329 }
329
330
330 @property
331 @property
331 def entry_size(&self) -> PyResult<PyInt> {
332 def entry_size(&self) -> PyResult<PyInt> {
332 self.cindex(py).borrow().inner().getattr(py, "entry_size")?.extract::<PyInt>(py)
333 self.cindex(py).borrow().inner().getattr(py, "entry_size")?.extract::<PyInt>(py)
333 }
334 }
334
335
335 @property
336 @property
336 def rust_ext_compat(&self) -> PyResult<PyInt> {
337 def rust_ext_compat(&self) -> PyResult<PyInt> {
337 self.cindex(py).borrow().inner().getattr(py, "rust_ext_compat")?.extract::<PyInt>(py)
338 self.cindex(py).borrow().inner().getattr(py, "rust_ext_compat")?.extract::<PyInt>(py)
338 }
339 }
339
340
340 });
341 });
341
342
342 /// Take a (potentially) mmap'ed buffer, and return the underlying Python
343 /// Take a (potentially) mmap'ed buffer, and return the underlying Python
343 /// buffer along with the Rust slice into said buffer. We need to keep the
344 /// buffer along with the Rust slice into said buffer. We need to keep the
344 /// Python buffer around, otherwise we'd get a dangling pointer once the buffer
345 /// Python buffer around, otherwise we'd get a dangling pointer once the buffer
345 /// is freed from Python's side.
346 /// is freed from Python's side.
346 ///
347 ///
347 /// # Safety
348 /// # Safety
348 ///
349 ///
349 /// The caller must make sure that the buffer is kept around for at least as
350 /// The caller must make sure that the buffer is kept around for at least as
350 /// long as the slice.
351 /// long as the slice.
351 #[deny(unsafe_op_in_unsafe_fn)]
352 #[deny(unsafe_op_in_unsafe_fn)]
352 unsafe fn mmap_keeparound(
353 unsafe fn mmap_keeparound(
353 py: Python,
354 py: Python,
354 data: PyObject,
355 data: PyObject,
355 ) -> PyResult<(
356 ) -> PyResult<(
356 PyBuffer,
357 PyBuffer,
357 Box<dyn std::ops::Deref<Target = [u8]> + Send + 'static>,
358 Box<dyn std::ops::Deref<Target = [u8]> + Send + 'static>,
358 )> {
359 )> {
359 let buf = PyBuffer::get(py, &data)?;
360 let buf = PyBuffer::get(py, &data)?;
360 let len = buf.item_count();
361 let len = buf.item_count();
361
362
362 // Build a slice from the mmap'ed buffer data
363 // Build a slice from the mmap'ed buffer data
363 let cbuf = buf.buf_ptr();
364 let cbuf = buf.buf_ptr();
364 let bytes = if std::mem::size_of::<u8>() == buf.item_size()
365 let bytes = if std::mem::size_of::<u8>() == buf.item_size()
365 && buf.is_c_contiguous()
366 && buf.is_c_contiguous()
366 && u8::is_compatible_format(buf.format())
367 && u8::is_compatible_format(buf.format())
367 {
368 {
368 unsafe { std::slice::from_raw_parts(cbuf as *const u8, len) }
369 unsafe { std::slice::from_raw_parts(cbuf as *const u8, len) }
369 } else {
370 } else {
370 return Err(PyErr::new::<ValueError, _>(
371 return Err(PyErr::new::<ValueError, _>(
371 py,
372 py,
372 "Nodemap data buffer has an invalid memory representation"
373 "Nodemap data buffer has an invalid memory representation"
373 .to_string(),
374 .to_string(),
374 ));
375 ));
375 };
376 };
376
377
377 Ok((buf, Box::new(bytes)))
378 Ok((buf, Box::new(bytes)))
378 }
379 }
379
380
380 fn py_tuple_to_revision_data_params(
381 fn py_tuple_to_revision_data_params(
381 py: Python,
382 py: Python,
382 tuple: PyTuple,
383 tuple: PyTuple,
383 ) -> PyResult<RevisionDataParams> {
384 ) -> PyResult<RevisionDataParams> {
384 if tuple.len(py) < 8 {
385 if tuple.len(py) < 8 {
385 // this is better than the panic promised by tup.get_item()
386 // this is better than the panic promised by tup.get_item()
386 return Err(PyErr::new::<IndexError, _>(
387 return Err(PyErr::new::<IndexError, _>(
387 py,
388 py,
388 "tuple index out of range",
389 "tuple index out of range",
389 ));
390 ));
390 }
391 }
391 let offset_or_flags: u64 = tuple.get_item(py, 0).extract(py)?;
392 let offset_or_flags: u64 = tuple.get_item(py, 0).extract(py)?;
392 let node_id = tuple
393 let node_id = tuple
393 .get_item(py, 7)
394 .get_item(py, 7)
394 .extract::<PyBytes>(py)?
395 .extract::<PyBytes>(py)?
395 .data(py)
396 .data(py)
396 .try_into()
397 .try_into()
397 .unwrap();
398 .unwrap();
398 let flags = (offset_or_flags & 0xFFFF) as u16;
399 let flags = (offset_or_flags & 0xFFFF) as u16;
399 let data_offset = offset_or_flags >> 16;
400 let data_offset = offset_or_flags >> 16;
400 Ok(RevisionDataParams {
401 Ok(RevisionDataParams {
401 flags,
402 flags,
402 data_offset,
403 data_offset,
403 data_compressed_length: tuple.get_item(py, 1).extract(py)?,
404 data_compressed_length: tuple.get_item(py, 1).extract(py)?,
404 data_uncompressed_length: tuple.get_item(py, 2).extract(py)?,
405 data_uncompressed_length: tuple.get_item(py, 2).extract(py)?,
405 data_delta_base: tuple.get_item(py, 3).extract(py)?,
406 data_delta_base: tuple.get_item(py, 3).extract(py)?,
406 link_rev: tuple.get_item(py, 4).extract(py)?,
407 link_rev: tuple.get_item(py, 4).extract(py)?,
407 parent_rev_1: tuple.get_item(py, 5).extract(py)?,
408 parent_rev_1: tuple.get_item(py, 5).extract(py)?,
408 parent_rev_2: tuple.get_item(py, 6).extract(py)?,
409 parent_rev_2: tuple.get_item(py, 6).extract(py)?,
409 node_id,
410 node_id,
410 _sidedata_offset: 0,
411 _sidedata_offset: 0,
411 _sidedata_compressed_length: 0,
412 _sidedata_compressed_length: 0,
412 data_compression_mode: COMPRESSION_MODE_INLINE,
413 data_compression_mode: COMPRESSION_MODE_INLINE,
413 _sidedata_compression_mode: COMPRESSION_MODE_INLINE,
414 _sidedata_compression_mode: COMPRESSION_MODE_INLINE,
414 _rank: -1,
415 _rank: -1,
415 })
416 })
416 }
417 }
417
418
418 impl MixedIndex {
419 impl MixedIndex {
419 fn new(
420 fn new(
420 py: Python,
421 py: Python,
421 cindex: PyObject,
422 cindex: PyObject,
422 data: PyObject,
423 data: PyObject,
423 header: u32,
424 header: u32,
424 ) -> PyResult<MixedIndex> {
425 ) -> PyResult<MixedIndex> {
425 // Safety: we keep the buffer around inside the class as `index_mmap`
426 // Safety: we keep the buffer around inside the class as `index_mmap`
426 let (buf, bytes) = unsafe { mmap_keeparound(py, data)? };
427 let (buf, bytes) = unsafe { mmap_keeparound(py, data)? };
427
428
428 Self::create_instance(
429 Self::create_instance(
429 py,
430 py,
430 RefCell::new(cindex::Index::new(py, cindex)?),
431 RefCell::new(cindex::Index::new(py, cindex)?),
431 RefCell::new(
432 RefCell::new(
432 hg::index::Index::new(
433 hg::index::Index::new(
433 bytes,
434 bytes,
434 IndexHeader::parse(&header.to_be_bytes())
435 IndexHeader::parse(&header.to_be_bytes())
435 .expect("default header is broken")
436 .expect("default header is broken")
436 .unwrap(),
437 .unwrap(),
437 )
438 )
438 .unwrap(),
439 .unwrap(),
439 ),
440 ),
440 RefCell::new(None),
441 RefCell::new(None),
441 RefCell::new(None),
442 RefCell::new(None),
442 RefCell::new(None),
443 RefCell::new(None),
443 RefCell::new(Some(buf)),
444 RefCell::new(Some(buf)),
444 )
445 )
445 }
446 }
446
447
447 fn len(&self, py: Python) -> PyResult<usize> {
448 fn len(&self, py: Python) -> PyResult<usize> {
448 let rust_index_len = self.index(py).borrow().len();
449 let rust_index_len = self.index(py).borrow().len();
449 let cindex_len = self.cindex(py).borrow().inner().len(py)?;
450 let cindex_len = self.cindex(py).borrow().inner().len(py)?;
450 assert_eq!(rust_index_len, cindex_len);
451 assert_eq!(rust_index_len, cindex_len);
451 Ok(cindex_len)
452 Ok(cindex_len)
452 }
453 }
453
454
454 /// This is scaffolding at this point, but it could also become
455 /// This is scaffolding at this point, but it could also become
455 /// a way to start a persistent nodemap or perform a
456 /// a way to start a persistent nodemap or perform a
456 /// vacuum / repack operation
457 /// vacuum / repack operation
457 fn fill_nodemap(
458 fn fill_nodemap(
458 &self,
459 &self,
459 py: Python,
460 py: Python,
460 nt: &mut NodeTree,
461 nt: &mut NodeTree,
461 ) -> PyResult<PyObject> {
462 ) -> PyResult<PyObject> {
462 let index = self.cindex(py).borrow();
463 let index = self.cindex(py).borrow();
463 for r in 0..self.len(py)? {
464 for r in 0..self.len(py)? {
464 let rev = Revision(r as BaseRevision);
465 let rev = Revision(r as BaseRevision);
465 // in this case node() won't ever return None
466 // in this case node() won't ever return None
466 nt.insert(&*index, index.node(rev).unwrap(), rev)
467 nt.insert(&*index, index.node(rev).unwrap(), rev)
467 .map_err(|e| nodemap_error(py, e))?
468 .map_err(|e| nodemap_error(py, e))?
468 }
469 }
469 Ok(py.None())
470 Ok(py.None())
470 }
471 }
471
472
472 fn get_nodetree<'a>(
473 fn get_nodetree<'a>(
473 &'a self,
474 &'a self,
474 py: Python<'a>,
475 py: Python<'a>,
475 ) -> PyResult<&'a RefCell<Option<NodeTree>>> {
476 ) -> PyResult<&'a RefCell<Option<NodeTree>>> {
476 if self.nt(py).borrow().is_none() {
477 if self.nt(py).borrow().is_none() {
477 let readonly = Box::<Vec<_>>::default();
478 let readonly = Box::<Vec<_>>::default();
478 let mut nt = NodeTree::load_bytes(readonly, 0);
479 let mut nt = NodeTree::load_bytes(readonly, 0);
479 self.fill_nodemap(py, &mut nt)?;
480 self.fill_nodemap(py, &mut nt)?;
480 self.nt(py).borrow_mut().replace(nt);
481 self.nt(py).borrow_mut().replace(nt);
481 }
482 }
482 Ok(self.nt(py))
483 Ok(self.nt(py))
483 }
484 }
484
485
485 /// forward a method call to the underlying C index
486 /// forward a method call to the underlying C index
486 fn call_cindex(
487 fn call_cindex(
487 &self,
488 &self,
488 py: Python,
489 py: Python,
489 name: &str,
490 name: &str,
490 args: &PyTuple,
491 args: &PyTuple,
491 kwargs: Option<&PyDict>,
492 kwargs: Option<&PyDict>,
492 ) -> PyResult<PyObject> {
493 ) -> PyResult<PyObject> {
493 self.cindex(py)
494 self.cindex(py)
494 .borrow()
495 .borrow()
495 .inner()
496 .inner()
496 .call_method(py, name, args, kwargs)
497 .call_method(py, name, args, kwargs)
497 }
498 }
498
499
499 pub fn clone_cindex(&self, py: Python) -> cindex::Index {
500 pub fn clone_cindex(&self, py: Python) -> cindex::Index {
500 self.cindex(py).borrow().clone_ref(py)
501 self.cindex(py).borrow().clone_ref(py)
501 }
502 }
502
503
503 /// Returns the full nodemap bytes to be written as-is to disk
504 /// Returns the full nodemap bytes to be written as-is to disk
504 fn inner_nodemap_data_all(&self, py: Python) -> PyResult<PyBytes> {
505 fn inner_nodemap_data_all(&self, py: Python) -> PyResult<PyBytes> {
505 let nodemap = self.get_nodetree(py)?.borrow_mut().take().unwrap();
506 let nodemap = self.get_nodetree(py)?.borrow_mut().take().unwrap();
506 let (readonly, bytes) = nodemap.into_readonly_and_added_bytes();
507 let (readonly, bytes) = nodemap.into_readonly_and_added_bytes();
507
508
508 // If there's anything readonly, we need to build the data again from
509 // If there's anything readonly, we need to build the data again from
509 // scratch
510 // scratch
510 let bytes = if readonly.len() > 0 {
511 let bytes = if readonly.len() > 0 {
511 let mut nt = NodeTree::load_bytes(Box::<Vec<_>>::default(), 0);
512 let mut nt = NodeTree::load_bytes(Box::<Vec<_>>::default(), 0);
512 self.fill_nodemap(py, &mut nt)?;
513 self.fill_nodemap(py, &mut nt)?;
513
514
514 let (readonly, bytes) = nt.into_readonly_and_added_bytes();
515 let (readonly, bytes) = nt.into_readonly_and_added_bytes();
515 assert_eq!(readonly.len(), 0);
516 assert_eq!(readonly.len(), 0);
516
517
517 bytes
518 bytes
518 } else {
519 } else {
519 bytes
520 bytes
520 };
521 };
521
522
522 let bytes = PyBytes::new(py, &bytes);
523 let bytes = PyBytes::new(py, &bytes);
523 Ok(bytes)
524 Ok(bytes)
524 }
525 }
525
526
526 /// Returns the last saved docket along with the size of any changed data
527 /// Returns the last saved docket along with the size of any changed data
527 /// (in number of blocks), and said data as bytes.
528 /// (in number of blocks), and said data as bytes.
528 fn inner_nodemap_data_incremental(
529 fn inner_nodemap_data_incremental(
529 &self,
530 &self,
530 py: Python,
531 py: Python,
531 ) -> PyResult<PyObject> {
532 ) -> PyResult<PyObject> {
532 let docket = self.docket(py).borrow();
533 let docket = self.docket(py).borrow();
533 let docket = match docket.as_ref() {
534 let docket = match docket.as_ref() {
534 Some(d) => d,
535 Some(d) => d,
535 None => return Ok(py.None()),
536 None => return Ok(py.None()),
536 };
537 };
537
538
538 let node_tree = self.get_nodetree(py)?.borrow_mut().take().unwrap();
539 let node_tree = self.get_nodetree(py)?.borrow_mut().take().unwrap();
539 let masked_blocks = node_tree.masked_readonly_blocks();
540 let masked_blocks = node_tree.masked_readonly_blocks();
540 let (_, data) = node_tree.into_readonly_and_added_bytes();
541 let (_, data) = node_tree.into_readonly_and_added_bytes();
541 let changed = masked_blocks * std::mem::size_of::<Block>();
542 let changed = masked_blocks * std::mem::size_of::<Block>();
542
543
543 Ok((docket, changed, PyBytes::new(py, &data))
544 Ok((docket, changed, PyBytes::new(py, &data))
544 .to_py_object(py)
545 .to_py_object(py)
545 .into_object())
546 .into_object())
546 }
547 }
547
548
548 /// Update the nodemap from the new (mmaped) data.
549 /// Update the nodemap from the new (mmaped) data.
549 /// The docket is kept as a reference for later incremental calls.
550 /// The docket is kept as a reference for later incremental calls.
550 fn inner_update_nodemap_data(
551 fn inner_update_nodemap_data(
551 &self,
552 &self,
552 py: Python,
553 py: Python,
553 docket: PyObject,
554 docket: PyObject,
554 nm_data: PyObject,
555 nm_data: PyObject,
555 ) -> PyResult<PyObject> {
556 ) -> PyResult<PyObject> {
556 // Safety: we keep the buffer around inside the class as `nodemap_mmap`
557 // Safety: we keep the buffer around inside the class as `nodemap_mmap`
557 let (buf, bytes) = unsafe { mmap_keeparound(py, nm_data)? };
558 let (buf, bytes) = unsafe { mmap_keeparound(py, nm_data)? };
558 let len = buf.item_count();
559 let len = buf.item_count();
559 self.nodemap_mmap(py).borrow_mut().replace(buf);
560 self.nodemap_mmap(py).borrow_mut().replace(buf);
560
561
561 let mut nt = NodeTree::load_bytes(bytes, len);
562 let mut nt = NodeTree::load_bytes(bytes, len);
562
563
563 let data_tip = docket
564 let data_tip = docket
564 .getattr(py, "tip_rev")?
565 .getattr(py, "tip_rev")?
565 .extract::<BaseRevision>(py)?
566 .extract::<BaseRevision>(py)?
566 .into();
567 .into();
567 self.docket(py).borrow_mut().replace(docket.clone_ref(py));
568 self.docket(py).borrow_mut().replace(docket.clone_ref(py));
568 let idx = self.cindex(py).borrow();
569 let idx = self.cindex(py).borrow();
569 let data_tip = idx.check_revision(data_tip).ok_or_else(|| {
570 let data_tip = idx.check_revision(data_tip).ok_or_else(|| {
570 nodemap_error(py, NodeMapError::RevisionNotInIndex(data_tip))
571 nodemap_error(py, NodeMapError::RevisionNotInIndex(data_tip))
571 })?;
572 })?;
572 let current_tip = idx.len();
573 let current_tip = idx.len();
573
574
574 for r in (data_tip.0 + 1)..current_tip as BaseRevision {
575 for r in (data_tip.0 + 1)..current_tip as BaseRevision {
575 let rev = Revision(r);
576 let rev = Revision(r);
576 // in this case node() won't ever return None
577 // in this case node() won't ever return None
577 nt.insert(&*idx, idx.node(rev).unwrap(), rev)
578 nt.insert(&*idx, idx.node(rev).unwrap(), rev)
578 .map_err(|e| nodemap_error(py, e))?
579 .map_err(|e| nodemap_error(py, e))?
579 }
580 }
580
581
581 *self.nt(py).borrow_mut() = Some(nt);
582 *self.nt(py).borrow_mut() = Some(nt);
582
583
583 Ok(py.None())
584 Ok(py.None())
584 }
585 }
585 }
586 }
586
587
587 fn revlog_error(py: Python) -> PyErr {
588 fn revlog_error(py: Python) -> PyErr {
588 match py
589 match py
589 .import("mercurial.error")
590 .import("mercurial.error")
590 .and_then(|m| m.get(py, "RevlogError"))
591 .and_then(|m| m.get(py, "RevlogError"))
591 {
592 {
592 Err(e) => e,
593 Err(e) => e,
593 Ok(cls) => PyErr::from_instance(
594 Ok(cls) => PyErr::from_instance(
594 py,
595 py,
595 cls.call(py, (py.None(),), None).ok().into_py_object(py),
596 cls.call(py, (py.None(),), None).ok().into_py_object(py),
596 ),
597 ),
597 }
598 }
598 }
599 }
599
600
600 fn rev_not_in_index(py: Python, rev: UncheckedRevision) -> PyErr {
601 fn rev_not_in_index(py: Python, rev: UncheckedRevision) -> PyErr {
601 PyErr::new::<ValueError, _>(
602 PyErr::new::<ValueError, _>(
602 py,
603 py,
603 format!(
604 format!(
604 "Inconsistency: Revision {} found in nodemap \
605 "Inconsistency: Revision {} found in nodemap \
605 is not in revlog index",
606 is not in revlog index",
606 rev
607 rev
607 ),
608 ),
608 )
609 )
609 }
610 }
610
611
611 /// Standard treatment of NodeMapError
612 /// Standard treatment of NodeMapError
612 fn nodemap_error(py: Python, err: NodeMapError) -> PyErr {
613 fn nodemap_error(py: Python, err: NodeMapError) -> PyErr {
613 match err {
614 match err {
614 NodeMapError::MultipleResults => revlog_error(py),
615 NodeMapError::MultipleResults => revlog_error(py),
615 NodeMapError::RevisionNotInIndex(r) => rev_not_in_index(py, r),
616 NodeMapError::RevisionNotInIndex(r) => rev_not_in_index(py, r),
616 }
617 }
617 }
618 }
618
619
619 /// Create the module, with __package__ given from parent
620 /// Create the module, with __package__ given from parent
620 pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> {
621 pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> {
621 let dotted_name = &format!("{}.revlog", package);
622 let dotted_name = &format!("{}.revlog", package);
622 let m = PyModule::new(py, dotted_name)?;
623 let m = PyModule::new(py, dotted_name)?;
623 m.add(py, "__package__", package)?;
624 m.add(py, "__package__", package)?;
624 m.add(py, "__doc__", "RevLog - Rust implementations")?;
625 m.add(py, "__doc__", "RevLog - Rust implementations")?;
625
626
626 m.add_class::<MixedIndex>(py)?;
627 m.add_class::<MixedIndex>(py)?;
627
628
628 let sys = PyModule::import(py, "sys")?;
629 let sys = PyModule::import(py, "sys")?;
629 let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?;
630 let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?;
630 sys_modules.set_item(py, dotted_name, &m)?;
631 sys_modules.set_item(py, dotted_name, &m)?;
631
632
632 Ok(m)
633 Ok(m)
633 }
634 }
General Comments 0
You need to be logged in to leave comments. Login now