##// END OF EJS Templates
rust-revlog: bare minimal NodeTree exposition...
Georges Racinet -
r52142:2966b88d default
parent child Browse files
Show More
@@ -1,1042 +1,1110 b''
1 // revlog.rs
1 // revlog.rs
2 //
2 //
3 // Copyright 2019-2020 Georges Racinet <georges.racinet@octobus.net>
3 // Copyright 2019-2020 Georges Racinet <georges.racinet@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 use crate::{
8 use crate::{
9 cindex,
9 cindex,
10 conversion::{rev_pyiter_collect, rev_pyiter_collect_or_else},
10 conversion::{rev_pyiter_collect, rev_pyiter_collect_or_else},
11 utils::{node_from_py_bytes, node_from_py_object},
11 utils::{node_from_py_bytes, node_from_py_object},
12 PyRevision,
12 PyRevision,
13 };
13 };
14 use cpython::{
14 use cpython::{
15 buffer::{Element, PyBuffer},
15 buffer::{Element, PyBuffer},
16 exc::{IndexError, ValueError},
16 exc::{IndexError, ValueError},
17 ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyInt, PyList,
17 ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyInt, PyList,
18 PyModule, PyObject, PyResult, PySet, PyString, PyTuple, Python,
18 PyModule, PyObject, PyResult, PySet, PyString, PyTuple, Python,
19 PythonObject, ToPyObject, UnsafePyLeaked,
19 PythonObject, ToPyObject, UnsafePyLeaked,
20 };
20 };
21 use hg::{
21 use hg::{
22 errors::HgError,
22 errors::HgError,
23 index::{
23 index::{
24 IndexHeader, Phase, RevisionDataParams, SnapshotsCache,
24 IndexHeader, Phase, RevisionDataParams, SnapshotsCache,
25 INDEX_ENTRY_SIZE,
25 INDEX_ENTRY_SIZE,
26 },
26 },
27 nodemap::{Block, NodeMapError, NodeTree as CoreNodeTree},
27 nodemap::{Block, NodeMapError, NodeTree as CoreNodeTree},
28 revlog::{nodemap::NodeMap, Graph, NodePrefix, RevlogError, RevlogIndex},
28 revlog::{nodemap::NodeMap, Graph, NodePrefix, RevlogError, RevlogIndex},
29 BaseRevision, Node, Revision, UncheckedRevision, NULL_REVISION,
29 BaseRevision, Node, Revision, UncheckedRevision, NULL_REVISION,
30 };
30 };
31 use std::{cell::RefCell, collections::HashMap};
31 use std::{cell::RefCell, collections::HashMap};
32 use vcsgraph::graph::Graph as VCSGraph;
32 use vcsgraph::graph::Graph as VCSGraph;
33
33
34 pub struct PySharedIndex {
34 pub struct PySharedIndex {
35 /// The underlying hg-core index
35 /// The underlying hg-core index
36 pub(crate) inner: &'static hg::index::Index,
36 pub(crate) inner: &'static hg::index::Index,
37 }
37 }
38
38
39 /// Return a Struct implementing the Graph trait
39 /// Return a Struct implementing the Graph trait
40 pub(crate) fn py_rust_index_to_graph(
40 pub(crate) fn py_rust_index_to_graph(
41 py: Python,
41 py: Python,
42 index: PyObject,
42 index: PyObject,
43 ) -> PyResult<UnsafePyLeaked<PySharedIndex>> {
43 ) -> PyResult<UnsafePyLeaked<PySharedIndex>> {
44 let midx = index.extract::<MixedIndex>(py)?;
44 let midx = index.extract::<MixedIndex>(py)?;
45 let leaked = midx.index(py).leak_immutable();
45 let leaked = midx.index(py).leak_immutable();
46 Ok(unsafe { leaked.map(py, |idx| PySharedIndex { inner: idx }) })
46 Ok(unsafe { leaked.map(py, |idx| PySharedIndex { inner: idx }) })
47 }
47 }
48
48
49 impl Clone for PySharedIndex {
49 impl Clone for PySharedIndex {
50 fn clone(&self) -> Self {
50 fn clone(&self) -> Self {
51 Self { inner: self.inner }
51 Self { inner: self.inner }
52 }
52 }
53 }
53 }
54
54
55 impl Graph for PySharedIndex {
55 impl Graph for PySharedIndex {
56 fn parents(&self, rev: Revision) -> Result<[Revision; 2], hg::GraphError> {
56 fn parents(&self, rev: Revision) -> Result<[Revision; 2], hg::GraphError> {
57 self.inner.parents(rev)
57 self.inner.parents(rev)
58 }
58 }
59 }
59 }
60
60
61 impl VCSGraph for PySharedIndex {
61 impl VCSGraph for PySharedIndex {
62 fn parents(
62 fn parents(
63 &self,
63 &self,
64 rev: BaseRevision,
64 rev: BaseRevision,
65 ) -> Result<vcsgraph::graph::Parents, vcsgraph::graph::GraphReadError>
65 ) -> Result<vcsgraph::graph::Parents, vcsgraph::graph::GraphReadError>
66 {
66 {
67 // FIXME This trait should be reworked to decide between Revision
67 // FIXME This trait should be reworked to decide between Revision
68 // and UncheckedRevision, get better errors names, etc.
68 // and UncheckedRevision, get better errors names, etc.
69 match Graph::parents(self, Revision(rev)) {
69 match Graph::parents(self, Revision(rev)) {
70 Ok(parents) => {
70 Ok(parents) => {
71 Ok(vcsgraph::graph::Parents([parents[0].0, parents[1].0]))
71 Ok(vcsgraph::graph::Parents([parents[0].0, parents[1].0]))
72 }
72 }
73 Err(hg::GraphError::ParentOutOfRange(rev)) => {
73 Err(hg::GraphError::ParentOutOfRange(rev)) => {
74 Err(vcsgraph::graph::GraphReadError::KeyedInvalidKey(rev.0))
74 Err(vcsgraph::graph::GraphReadError::KeyedInvalidKey(rev.0))
75 }
75 }
76 }
76 }
77 }
77 }
78 }
78 }
79
79
80 impl RevlogIndex for PySharedIndex {
80 impl RevlogIndex for PySharedIndex {
81 fn len(&self) -> usize {
81 fn len(&self) -> usize {
82 self.inner.len()
82 self.inner.len()
83 }
83 }
84 fn node(&self, rev: Revision) -> Option<&Node> {
84 fn node(&self, rev: Revision) -> Option<&Node> {
85 self.inner.node(rev)
85 self.inner.node(rev)
86 }
86 }
87 }
87 }
88
88
89 py_class!(pub class MixedIndex |py| {
89 py_class!(pub class MixedIndex |py| {
90 data cindex: RefCell<cindex::Index>;
90 data cindex: RefCell<cindex::Index>;
91 @shared data index: hg::index::Index;
91 @shared data index: hg::index::Index;
92 data nt: RefCell<Option<CoreNodeTree>>;
92 data nt: RefCell<Option<CoreNodeTree>>;
93 data docket: RefCell<Option<PyObject>>;
93 data docket: RefCell<Option<PyObject>>;
94 // Holds a reference to the mmap'ed persistent nodemap data
94 // Holds a reference to the mmap'ed persistent nodemap data
95 data nodemap_mmap: RefCell<Option<PyBuffer>>;
95 data nodemap_mmap: RefCell<Option<PyBuffer>>;
96 // Holds a reference to the mmap'ed persistent index data
96 // Holds a reference to the mmap'ed persistent index data
97 data index_mmap: RefCell<Option<PyBuffer>>;
97 data index_mmap: RefCell<Option<PyBuffer>>;
98
98
99 def __new__(
99 def __new__(
100 _cls,
100 _cls,
101 cindex: PyObject,
101 cindex: PyObject,
102 data: PyObject,
102 data: PyObject,
103 default_header: u32,
103 default_header: u32,
104 ) -> PyResult<MixedIndex> {
104 ) -> PyResult<MixedIndex> {
105 Self::new(py, cindex, data, default_header)
105 Self::new(py, cindex, data, default_header)
106 }
106 }
107
107
108 /// Compatibility layer used for Python consumers needing access to the C index
108 /// Compatibility layer used for Python consumers needing access to the C index
109 ///
109 ///
110 /// Only use case so far is `scmutil.shortesthexnodeidprefix`,
110 /// Only use case so far is `scmutil.shortesthexnodeidprefix`,
111 /// that may need to build a custom `nodetree`, based on a specified revset.
111 /// that may need to build a custom `nodetree`, based on a specified revset.
112 /// With a Rust implementation of the nodemap, we will be able to get rid of
112 /// With a Rust implementation of the nodemap, we will be able to get rid of
113 /// this, by exposing our own standalone nodemap class,
113 /// this, by exposing our own standalone nodemap class,
114 /// ready to accept `MixedIndex`.
114 /// ready to accept `MixedIndex`.
115 def get_cindex(&self) -> PyResult<PyObject> {
115 def get_cindex(&self) -> PyResult<PyObject> {
116 Ok(self.cindex(py).borrow().inner().clone_ref(py))
116 Ok(self.cindex(py).borrow().inner().clone_ref(py))
117 }
117 }
118
118
119 // Index API involving nodemap, as defined in mercurial/pure/parsers.py
119 // Index API involving nodemap, as defined in mercurial/pure/parsers.py
120
120
121 /// Return Revision if found, raises a bare `error.RevlogError`
121 /// Return Revision if found, raises a bare `error.RevlogError`
122 /// in case of ambiguity, same as C version does
122 /// in case of ambiguity, same as C version does
123 def get_rev(&self, node: PyBytes) -> PyResult<Option<PyRevision>> {
123 def get_rev(&self, node: PyBytes) -> PyResult<Option<PyRevision>> {
124 let opt = self.get_nodetree(py)?.borrow();
124 let opt = self.get_nodetree(py)?.borrow();
125 let nt = opt.as_ref().unwrap();
125 let nt = opt.as_ref().unwrap();
126 let ridx = &*self.index(py).borrow();
126 let ridx = &*self.index(py).borrow();
127 let node = node_from_py_bytes(py, &node)?;
127 let node = node_from_py_bytes(py, &node)?;
128 let rust_rev =
128 let rust_rev =
129 nt.find_bin(ridx, node.into()).map_err(|e| nodemap_error(py, e))?;
129 nt.find_bin(ridx, node.into()).map_err(|e| nodemap_error(py, e))?;
130 Ok(rust_rev.map(Into::into))
130 Ok(rust_rev.map(Into::into))
131
131
132 }
132 }
133
133
134 /// same as `get_rev()` but raises a bare `error.RevlogError` if node
134 /// same as `get_rev()` but raises a bare `error.RevlogError` if node
135 /// is not found.
135 /// is not found.
136 ///
136 ///
137 /// No need to repeat `node` in the exception, `mercurial/revlog.py`
137 /// No need to repeat `node` in the exception, `mercurial/revlog.py`
138 /// will catch and rewrap with it
138 /// will catch and rewrap with it
139 def rev(&self, node: PyBytes) -> PyResult<PyRevision> {
139 def rev(&self, node: PyBytes) -> PyResult<PyRevision> {
140 self.get_rev(py, node)?.ok_or_else(|| revlog_error(py))
140 self.get_rev(py, node)?.ok_or_else(|| revlog_error(py))
141 }
141 }
142
142
143 /// return True if the node exist in the index
143 /// return True if the node exist in the index
144 def has_node(&self, node: PyBytes) -> PyResult<bool> {
144 def has_node(&self, node: PyBytes) -> PyResult<bool> {
145 // TODO OPTIM we could avoid a needless conversion here,
145 // TODO OPTIM we could avoid a needless conversion here,
146 // to do when scaffolding for pure Rust switch is removed,
146 // to do when scaffolding for pure Rust switch is removed,
147 // as `get_rev()` currently does the necessary assertions
147 // as `get_rev()` currently does the necessary assertions
148 self.get_rev(py, node).map(|opt| opt.is_some())
148 self.get_rev(py, node).map(|opt| opt.is_some())
149 }
149 }
150
150
151 /// find length of shortest hex nodeid of a binary ID
151 /// find length of shortest hex nodeid of a binary ID
152 def shortest(&self, node: PyBytes) -> PyResult<usize> {
152 def shortest(&self, node: PyBytes) -> PyResult<usize> {
153 let opt = self.get_nodetree(py)?.borrow();
153 let opt = self.get_nodetree(py)?.borrow();
154 let nt = opt.as_ref().unwrap();
154 let nt = opt.as_ref().unwrap();
155 let idx = &*self.index(py).borrow();
155 let idx = &*self.index(py).borrow();
156 match nt.unique_prefix_len_node(idx, &node_from_py_bytes(py, &node)?)
156 match nt.unique_prefix_len_node(idx, &node_from_py_bytes(py, &node)?)
157 {
157 {
158 Ok(Some(l)) => Ok(l),
158 Ok(Some(l)) => Ok(l),
159 Ok(None) => Err(revlog_error(py)),
159 Ok(None) => Err(revlog_error(py)),
160 Err(e) => Err(nodemap_error(py, e)),
160 Err(e) => Err(nodemap_error(py, e)),
161 }
161 }
162 }
162 }
163
163
164 def partialmatch(&self, node: PyObject) -> PyResult<Option<PyBytes>> {
164 def partialmatch(&self, node: PyObject) -> PyResult<Option<PyBytes>> {
165 let opt = self.get_nodetree(py)?.borrow();
165 let opt = self.get_nodetree(py)?.borrow();
166 let nt = opt.as_ref().unwrap();
166 let nt = opt.as_ref().unwrap();
167 let idx = &*self.index(py).borrow();
167 let idx = &*self.index(py).borrow();
168
168
169 let node_as_string = if cfg!(feature = "python3-sys") {
169 let node_as_string = if cfg!(feature = "python3-sys") {
170 node.cast_as::<PyString>(py)?.to_string(py)?.to_string()
170 node.cast_as::<PyString>(py)?.to_string(py)?.to_string()
171 }
171 }
172 else {
172 else {
173 let node = node.extract::<PyBytes>(py)?;
173 let node = node.extract::<PyBytes>(py)?;
174 String::from_utf8_lossy(node.data(py)).to_string()
174 String::from_utf8_lossy(node.data(py)).to_string()
175 };
175 };
176
176
177 let prefix = NodePrefix::from_hex(&node_as_string)
177 let prefix = NodePrefix::from_hex(&node_as_string)
178 .map_err(|_| PyErr::new::<ValueError, _>(
178 .map_err(|_| PyErr::new::<ValueError, _>(
179 py, format!("Invalid node or prefix '{}'", node_as_string))
179 py, format!("Invalid node or prefix '{}'", node_as_string))
180 )?;
180 )?;
181
181
182 nt.find_bin(idx, prefix)
182 nt.find_bin(idx, prefix)
183 // TODO make an inner API returning the node directly
183 // TODO make an inner API returning the node directly
184 .map(|opt| opt.map(
184 .map(|opt| opt.map(
185 |rev| PyBytes::new(py, idx.node(rev).unwrap().as_bytes())))
185 |rev| PyBytes::new(py, idx.node(rev).unwrap().as_bytes())))
186 .map_err(|e| nodemap_error(py, e))
186 .map_err(|e| nodemap_error(py, e))
187
187
188 }
188 }
189
189
190 /// append an index entry
190 /// append an index entry
191 def append(&self, tup: PyTuple) -> PyResult<PyObject> {
191 def append(&self, tup: PyTuple) -> PyResult<PyObject> {
192 if tup.len(py) < 8 {
192 if tup.len(py) < 8 {
193 // this is better than the panic promised by tup.get_item()
193 // this is better than the panic promised by tup.get_item()
194 return Err(
194 return Err(
195 PyErr::new::<IndexError, _>(py, "tuple index out of range"))
195 PyErr::new::<IndexError, _>(py, "tuple index out of range"))
196 }
196 }
197 let node_bytes = tup.get_item(py, 7).extract(py)?;
197 let node_bytes = tup.get_item(py, 7).extract(py)?;
198 let node = node_from_py_object(py, &node_bytes)?;
198 let node = node_from_py_object(py, &node_bytes)?;
199
199
200 let rev = self.len(py)? as BaseRevision;
200 let rev = self.len(py)? as BaseRevision;
201
201
202 // This is ok since we will just add the revision to the index
202 // This is ok since we will just add the revision to the index
203 let rev = Revision(rev);
203 let rev = Revision(rev);
204 self.index(py)
204 self.index(py)
205 .borrow_mut()
205 .borrow_mut()
206 .append(py_tuple_to_revision_data_params(py, tup)?)
206 .append(py_tuple_to_revision_data_params(py, tup)?)
207 .unwrap();
207 .unwrap();
208 let idx = &*self.index(py).borrow();
208 let idx = &*self.index(py).borrow();
209 self.get_nodetree(py)?.borrow_mut().as_mut().unwrap()
209 self.get_nodetree(py)?.borrow_mut().as_mut().unwrap()
210 .insert(idx, &node, rev)
210 .insert(idx, &node, rev)
211 .map_err(|e| nodemap_error(py, e))?;
211 .map_err(|e| nodemap_error(py, e))?;
212 Ok(py.None())
212 Ok(py.None())
213 }
213 }
214
214
215 def __delitem__(&self, key: PyObject) -> PyResult<()> {
215 def __delitem__(&self, key: PyObject) -> PyResult<()> {
216 // __delitem__ is both for `del idx[r]` and `del idx[r1:r2]`
216 // __delitem__ is both for `del idx[r]` and `del idx[r1:r2]`
217 let start = key.getattr(py, "start")?;
217 let start = key.getattr(py, "start")?;
218 let start = UncheckedRevision(start.extract(py)?);
218 let start = UncheckedRevision(start.extract(py)?);
219 let start = self.index(py)
219 let start = self.index(py)
220 .borrow()
220 .borrow()
221 .check_revision(start)
221 .check_revision(start)
222 .ok_or_else(|| {
222 .ok_or_else(|| {
223 nodemap_error(py, NodeMapError::RevisionNotInIndex(start))
223 nodemap_error(py, NodeMapError::RevisionNotInIndex(start))
224 })?;
224 })?;
225 self.index(py).borrow_mut().remove(start).unwrap();
225 self.index(py).borrow_mut().remove(start).unwrap();
226 let mut opt = self.get_nodetree(py)?.borrow_mut();
226 let mut opt = self.get_nodetree(py)?.borrow_mut();
227 let nt = opt.as_mut().unwrap();
227 let nt = opt.as_mut().unwrap();
228 nt.invalidate_all();
228 nt.invalidate_all();
229 self.fill_nodemap(py, nt)?;
229 self.fill_nodemap(py, nt)?;
230 Ok(())
230 Ok(())
231 }
231 }
232
232
233 //
233 //
234 // Index methods previously reforwarded to C index (tp_methods)
234 // Index methods previously reforwarded to C index (tp_methods)
235 // Same ordering as in revlog.c
235 // Same ordering as in revlog.c
236 //
236 //
237
237
238 /// return the gca set of the given revs
238 /// return the gca set of the given revs
239 def ancestors(&self, *args, **_kw) -> PyResult<PyObject> {
239 def ancestors(&self, *args, **_kw) -> PyResult<PyObject> {
240 let rust_res = self.inner_ancestors(py, args)?;
240 let rust_res = self.inner_ancestors(py, args)?;
241 Ok(rust_res)
241 Ok(rust_res)
242 }
242 }
243
243
244 /// return the heads of the common ancestors of the given revs
244 /// return the heads of the common ancestors of the given revs
245 def commonancestorsheads(&self, *args, **_kw) -> PyResult<PyObject> {
245 def commonancestorsheads(&self, *args, **_kw) -> PyResult<PyObject> {
246 let rust_res = self.inner_commonancestorsheads(py, args)?;
246 let rust_res = self.inner_commonancestorsheads(py, args)?;
247 Ok(rust_res)
247 Ok(rust_res)
248 }
248 }
249
249
250 /// Clear the index caches and inner py_class data.
250 /// Clear the index caches and inner py_class data.
251 /// It is Python's responsibility to call `update_nodemap_data` again.
251 /// It is Python's responsibility to call `update_nodemap_data` again.
252 def clearcaches(&self) -> PyResult<PyObject> {
252 def clearcaches(&self) -> PyResult<PyObject> {
253 self.nt(py).borrow_mut().take();
253 self.nt(py).borrow_mut().take();
254 self.docket(py).borrow_mut().take();
254 self.docket(py).borrow_mut().take();
255 self.nodemap_mmap(py).borrow_mut().take();
255 self.nodemap_mmap(py).borrow_mut().take();
256 self.index(py).borrow().clear_caches();
256 self.index(py).borrow().clear_caches();
257 Ok(py.None())
257 Ok(py.None())
258 }
258 }
259
259
260 /// return the raw binary string representing a revision
260 /// return the raw binary string representing a revision
261 def entry_binary(&self, *args, **_kw) -> PyResult<PyObject> {
261 def entry_binary(&self, *args, **_kw) -> PyResult<PyObject> {
262 let rindex = self.index(py).borrow();
262 let rindex = self.index(py).borrow();
263 let rev = UncheckedRevision(args.get_item(py, 0).extract(py)?);
263 let rev = UncheckedRevision(args.get_item(py, 0).extract(py)?);
264 let rust_bytes = rindex.check_revision(rev).and_then(
264 let rust_bytes = rindex.check_revision(rev).and_then(
265 |r| rindex.entry_binary(r))
265 |r| rindex.entry_binary(r))
266 .ok_or_else(|| rev_not_in_index(py, rev))?;
266 .ok_or_else(|| rev_not_in_index(py, rev))?;
267 let rust_res = PyBytes::new(py, rust_bytes).into_object();
267 let rust_res = PyBytes::new(py, rust_bytes).into_object();
268 Ok(rust_res)
268 Ok(rust_res)
269 }
269 }
270
270
271 /// return a binary packed version of the header
271 /// return a binary packed version of the header
272 def pack_header(&self, *args, **_kw) -> PyResult<PyObject> {
272 def pack_header(&self, *args, **_kw) -> PyResult<PyObject> {
273 let rindex = self.index(py).borrow();
273 let rindex = self.index(py).borrow();
274 let packed = rindex.pack_header(args.get_item(py, 0).extract(py)?);
274 let packed = rindex.pack_header(args.get_item(py, 0).extract(py)?);
275 let rust_res = PyBytes::new(py, &packed).into_object();
275 let rust_res = PyBytes::new(py, &packed).into_object();
276 Ok(rust_res)
276 Ok(rust_res)
277 }
277 }
278
278
279 /// compute phases
279 /// compute phases
280 def computephasesmapsets(&self, *args, **_kw) -> PyResult<PyObject> {
280 def computephasesmapsets(&self, *args, **_kw) -> PyResult<PyObject> {
281 let py_roots = args.get_item(py, 0).extract::<PyDict>(py)?;
281 let py_roots = args.get_item(py, 0).extract::<PyDict>(py)?;
282 let rust_res = self.inner_computephasesmapsets(py, py_roots)?;
282 let rust_res = self.inner_computephasesmapsets(py, py_roots)?;
283 Ok(rust_res)
283 Ok(rust_res)
284 }
284 }
285
285
286 /// reachableroots
286 /// reachableroots
287 def reachableroots2(&self, *args, **_kw) -> PyResult<PyObject> {
287 def reachableroots2(&self, *args, **_kw) -> PyResult<PyObject> {
288 let rust_res = self.inner_reachableroots2(
288 let rust_res = self.inner_reachableroots2(
289 py,
289 py,
290 UncheckedRevision(args.get_item(py, 0).extract(py)?),
290 UncheckedRevision(args.get_item(py, 0).extract(py)?),
291 args.get_item(py, 1),
291 args.get_item(py, 1),
292 args.get_item(py, 2),
292 args.get_item(py, 2),
293 args.get_item(py, 3).extract(py)?,
293 args.get_item(py, 3).extract(py)?,
294 )?;
294 )?;
295 Ok(rust_res)
295 Ok(rust_res)
296 }
296 }
297
297
298 /// get head revisions
298 /// get head revisions
299 def headrevs(&self) -> PyResult<PyObject> {
299 def headrevs(&self) -> PyResult<PyObject> {
300 let rust_res = self.inner_headrevs(py)?;
300 let rust_res = self.inner_headrevs(py)?;
301 Ok(rust_res)
301 Ok(rust_res)
302 }
302 }
303
303
304 /// get filtered head revisions
304 /// get filtered head revisions
305 def headrevsfiltered(&self, *args, **_kw) -> PyResult<PyObject> {
305 def headrevsfiltered(&self, *args, **_kw) -> PyResult<PyObject> {
306 let rust_res = self.inner_headrevsfiltered(py, &args.get_item(py, 0))?;
306 let rust_res = self.inner_headrevsfiltered(py, &args.get_item(py, 0))?;
307 Ok(rust_res)
307 Ok(rust_res)
308 }
308 }
309
309
310 /// True if the object is a snapshot
310 /// True if the object is a snapshot
311 def issnapshot(&self, *args, **_kw) -> PyResult<bool> {
311 def issnapshot(&self, *args, **_kw) -> PyResult<bool> {
312 let index = self.index(py).borrow();
312 let index = self.index(py).borrow();
313 let result = index
313 let result = index
314 .is_snapshot(UncheckedRevision(args.get_item(py, 0).extract(py)?))
314 .is_snapshot(UncheckedRevision(args.get_item(py, 0).extract(py)?))
315 .map_err(|e| {
315 .map_err(|e| {
316 PyErr::new::<cpython::exc::ValueError, _>(py, e.to_string())
316 PyErr::new::<cpython::exc::ValueError, _>(py, e.to_string())
317 })?;
317 })?;
318 Ok(result)
318 Ok(result)
319 }
319 }
320
320
321 /// Gather snapshot data in a cache dict
321 /// Gather snapshot data in a cache dict
322 def findsnapshots(&self, *args, **_kw) -> PyResult<PyObject> {
322 def findsnapshots(&self, *args, **_kw) -> PyResult<PyObject> {
323 let index = self.index(py).borrow();
323 let index = self.index(py).borrow();
324 let cache: PyDict = args.get_item(py, 0).extract(py)?;
324 let cache: PyDict = args.get_item(py, 0).extract(py)?;
325 // this methods operates by setting new values in the cache,
325 // this methods operates by setting new values in the cache,
326 // hence we will compare results by letting the C implementation
326 // hence we will compare results by letting the C implementation
327 // operate over a deepcopy of the cache, and finally compare both
327 // operate over a deepcopy of the cache, and finally compare both
328 // caches.
328 // caches.
329 let c_cache = PyDict::new(py);
329 let c_cache = PyDict::new(py);
330 for (k, v) in cache.items(py) {
330 for (k, v) in cache.items(py) {
331 c_cache.set_item(py, k, PySet::new(py, v)?)?;
331 c_cache.set_item(py, k, PySet::new(py, v)?)?;
332 }
332 }
333
333
334 let start_rev = UncheckedRevision(args.get_item(py, 1).extract(py)?);
334 let start_rev = UncheckedRevision(args.get_item(py, 1).extract(py)?);
335 let end_rev = UncheckedRevision(args.get_item(py, 2).extract(py)?);
335 let end_rev = UncheckedRevision(args.get_item(py, 2).extract(py)?);
336 let mut cache_wrapper = PySnapshotsCache{ py, dict: cache };
336 let mut cache_wrapper = PySnapshotsCache{ py, dict: cache };
337 index.find_snapshots(
337 index.find_snapshots(
338 start_rev,
338 start_rev,
339 end_rev,
339 end_rev,
340 &mut cache_wrapper,
340 &mut cache_wrapper,
341 ).map_err(|_| revlog_error(py))?;
341 ).map_err(|_| revlog_error(py))?;
342 Ok(py.None())
342 Ok(py.None())
343 }
343 }
344
344
345 /// determine revisions with deltas to reconstruct fulltext
345 /// determine revisions with deltas to reconstruct fulltext
346 def deltachain(&self, *args, **_kw) -> PyResult<PyObject> {
346 def deltachain(&self, *args, **_kw) -> PyResult<PyObject> {
347 let index = self.index(py).borrow();
347 let index = self.index(py).borrow();
348 let rev = args.get_item(py, 0).extract::<BaseRevision>(py)?.into();
348 let rev = args.get_item(py, 0).extract::<BaseRevision>(py)?.into();
349 let stop_rev =
349 let stop_rev =
350 args.get_item(py, 1).extract::<Option<BaseRevision>>(py)?;
350 args.get_item(py, 1).extract::<Option<BaseRevision>>(py)?;
351 let rev = index.check_revision(rev).ok_or_else(|| {
351 let rev = index.check_revision(rev).ok_or_else(|| {
352 nodemap_error(py, NodeMapError::RevisionNotInIndex(rev))
352 nodemap_error(py, NodeMapError::RevisionNotInIndex(rev))
353 })?;
353 })?;
354 let stop_rev = if let Some(stop_rev) = stop_rev {
354 let stop_rev = if let Some(stop_rev) = stop_rev {
355 let stop_rev = UncheckedRevision(stop_rev);
355 let stop_rev = UncheckedRevision(stop_rev);
356 Some(index.check_revision(stop_rev).ok_or_else(|| {
356 Some(index.check_revision(stop_rev).ok_or_else(|| {
357 nodemap_error(py, NodeMapError::RevisionNotInIndex(stop_rev))
357 nodemap_error(py, NodeMapError::RevisionNotInIndex(stop_rev))
358 })?)
358 })?)
359 } else {None};
359 } else {None};
360 let using_general_delta = args.get_item(py, 2)
360 let using_general_delta = args.get_item(py, 2)
361 .extract::<Option<u32>>(py)?
361 .extract::<Option<u32>>(py)?
362 .map(|i| i != 0);
362 .map(|i| i != 0);
363 let (chain, stopped) = index.delta_chain(
363 let (chain, stopped) = index.delta_chain(
364 rev, stop_rev, using_general_delta
364 rev, stop_rev, using_general_delta
365 ).map_err(|e| {
365 ).map_err(|e| {
366 PyErr::new::<cpython::exc::ValueError, _>(py, e.to_string())
366 PyErr::new::<cpython::exc::ValueError, _>(py, e.to_string())
367 })?;
367 })?;
368
368
369 let chain: Vec<_> = chain.into_iter().map(|r| r.0).collect();
369 let chain: Vec<_> = chain.into_iter().map(|r| r.0).collect();
370 Ok(
370 Ok(
371 PyTuple::new(
371 PyTuple::new(
372 py,
372 py,
373 &[
373 &[
374 chain.into_py_object(py).into_object(),
374 chain.into_py_object(py).into_object(),
375 stopped.into_py_object(py).into_object()
375 stopped.into_py_object(py).into_object()
376 ]
376 ]
377 ).into_object()
377 ).into_object()
378 )
378 )
379
379
380 }
380 }
381
381
382 /// slice planned chunk read to reach a density threshold
382 /// slice planned chunk read to reach a density threshold
383 def slicechunktodensity(&self, *args, **_kw) -> PyResult<PyObject> {
383 def slicechunktodensity(&self, *args, **_kw) -> PyResult<PyObject> {
384 let rust_res = self.inner_slicechunktodensity(
384 let rust_res = self.inner_slicechunktodensity(
385 py,
385 py,
386 args.get_item(py, 0),
386 args.get_item(py, 0),
387 args.get_item(py, 1).extract(py)?,
387 args.get_item(py, 1).extract(py)?,
388 args.get_item(py, 2).extract(py)?
388 args.get_item(py, 2).extract(py)?
389 )?;
389 )?;
390 Ok(rust_res)
390 Ok(rust_res)
391 }
391 }
392
392
393 // index_sequence_methods and index_mapping_methods.
393 // index_sequence_methods and index_mapping_methods.
394 //
394 //
395 // Since we call back through the high level Python API,
395 // Since we call back through the high level Python API,
396 // there's no point making a distinction between index_get
396 // there's no point making a distinction between index_get
397 // and index_getitem.
397 // and index_getitem.
398 // gracinet 2023: this above is no longer true for the pure Rust impl
398 // gracinet 2023: this above is no longer true for the pure Rust impl
399
399
400 def __len__(&self) -> PyResult<usize> {
400 def __len__(&self) -> PyResult<usize> {
401 self.len(py)
401 self.len(py)
402 }
402 }
403
403
404 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
404 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
405 let rust_res = self.inner_getitem(py, key.clone_ref(py))?;
405 let rust_res = self.inner_getitem(py, key.clone_ref(py))?;
406 Ok(rust_res)
406 Ok(rust_res)
407 }
407 }
408
408
409 def __contains__(&self, item: PyObject) -> PyResult<bool> {
409 def __contains__(&self, item: PyObject) -> PyResult<bool> {
410 // ObjectProtocol does not seem to provide contains(), so
410 // ObjectProtocol does not seem to provide contains(), so
411 // this is an equivalent implementation of the index_contains()
411 // this is an equivalent implementation of the index_contains()
412 // defined in revlog.c
412 // defined in revlog.c
413 match item.extract::<i32>(py) {
413 match item.extract::<i32>(py) {
414 Ok(rev) => {
414 Ok(rev) => {
415 Ok(rev >= -1 && rev < self.len(py)? as BaseRevision)
415 Ok(rev >= -1 && rev < self.len(py)? as BaseRevision)
416 }
416 }
417 Err(_) => {
417 Err(_) => {
418 let item_bytes: PyBytes = item.extract(py)?;
418 let item_bytes: PyBytes = item.extract(py)?;
419 let rust_res = self.has_node(py, item_bytes)?;
419 let rust_res = self.has_node(py, item_bytes)?;
420 Ok(rust_res)
420 Ok(rust_res)
421 }
421 }
422 }
422 }
423 }
423 }
424
424
425 def nodemap_data_all(&self) -> PyResult<PyBytes> {
425 def nodemap_data_all(&self) -> PyResult<PyBytes> {
426 self.inner_nodemap_data_all(py)
426 self.inner_nodemap_data_all(py)
427 }
427 }
428
428
429 def nodemap_data_incremental(&self) -> PyResult<PyObject> {
429 def nodemap_data_incremental(&self) -> PyResult<PyObject> {
430 self.inner_nodemap_data_incremental(py)
430 self.inner_nodemap_data_incremental(py)
431 }
431 }
432 def update_nodemap_data(
432 def update_nodemap_data(
433 &self,
433 &self,
434 docket: PyObject,
434 docket: PyObject,
435 nm_data: PyObject
435 nm_data: PyObject
436 ) -> PyResult<PyObject> {
436 ) -> PyResult<PyObject> {
437 self.inner_update_nodemap_data(py, docket, nm_data)
437 self.inner_update_nodemap_data(py, docket, nm_data)
438 }
438 }
439
439
440 @property
440 @property
441 def entry_size(&self) -> PyResult<PyInt> {
441 def entry_size(&self) -> PyResult<PyInt> {
442 let rust_res: PyInt = INDEX_ENTRY_SIZE.to_py_object(py);
442 let rust_res: PyInt = INDEX_ENTRY_SIZE.to_py_object(py);
443 Ok(rust_res)
443 Ok(rust_res)
444 }
444 }
445
445
446 @property
446 @property
447 def rust_ext_compat(&self) -> PyResult<PyInt> {
447 def rust_ext_compat(&self) -> PyResult<PyInt> {
448 // will be entirely removed when the Rust index yet useful to
448 // will be entirely removed when the Rust index yet useful to
449 // implement in Rust to detangle things when removing `self.cindex`
449 // implement in Rust to detangle things when removing `self.cindex`
450 let rust_res: PyInt = 1.to_py_object(py);
450 let rust_res: PyInt = 1.to_py_object(py);
451 Ok(rust_res)
451 Ok(rust_res)
452 }
452 }
453
453
454 @property
454 @property
455 def is_rust(&self) -> PyResult<PyBool> {
455 def is_rust(&self) -> PyResult<PyBool> {
456 Ok(false.to_py_object(py))
456 Ok(false.to_py_object(py))
457 }
457 }
458
458
459 });
459 });
460
460
461 /// Take a (potentially) mmap'ed buffer, and return the underlying Python
461 /// Take a (potentially) mmap'ed buffer, and return the underlying Python
462 /// buffer along with the Rust slice into said buffer. We need to keep the
462 /// buffer along with the Rust slice into said buffer. We need to keep the
463 /// Python buffer around, otherwise we'd get a dangling pointer once the buffer
463 /// Python buffer around, otherwise we'd get a dangling pointer once the buffer
464 /// is freed from Python's side.
464 /// is freed from Python's side.
465 ///
465 ///
466 /// # Safety
466 /// # Safety
467 ///
467 ///
468 /// The caller must make sure that the buffer is kept around for at least as
468 /// The caller must make sure that the buffer is kept around for at least as
469 /// long as the slice.
469 /// long as the slice.
470 #[deny(unsafe_op_in_unsafe_fn)]
470 #[deny(unsafe_op_in_unsafe_fn)]
471 unsafe fn mmap_keeparound(
471 unsafe fn mmap_keeparound(
472 py: Python,
472 py: Python,
473 data: PyObject,
473 data: PyObject,
474 ) -> PyResult<(
474 ) -> PyResult<(
475 PyBuffer,
475 PyBuffer,
476 Box<dyn std::ops::Deref<Target = [u8]> + Send + Sync + 'static>,
476 Box<dyn std::ops::Deref<Target = [u8]> + Send + Sync + 'static>,
477 )> {
477 )> {
478 let buf = PyBuffer::get(py, &data)?;
478 let buf = PyBuffer::get(py, &data)?;
479 let len = buf.item_count();
479 let len = buf.item_count();
480
480
481 // Build a slice from the mmap'ed buffer data
481 // Build a slice from the mmap'ed buffer data
482 let cbuf = buf.buf_ptr();
482 let cbuf = buf.buf_ptr();
483 let bytes = if std::mem::size_of::<u8>() == buf.item_size()
483 let bytes = if std::mem::size_of::<u8>() == buf.item_size()
484 && buf.is_c_contiguous()
484 && buf.is_c_contiguous()
485 && u8::is_compatible_format(buf.format())
485 && u8::is_compatible_format(buf.format())
486 {
486 {
487 unsafe { std::slice::from_raw_parts(cbuf as *const u8, len) }
487 unsafe { std::slice::from_raw_parts(cbuf as *const u8, len) }
488 } else {
488 } else {
489 return Err(PyErr::new::<ValueError, _>(
489 return Err(PyErr::new::<ValueError, _>(
490 py,
490 py,
491 "Nodemap data buffer has an invalid memory representation"
491 "Nodemap data buffer has an invalid memory representation"
492 .to_string(),
492 .to_string(),
493 ));
493 ));
494 };
494 };
495
495
496 Ok((buf, Box::new(bytes)))
496 Ok((buf, Box::new(bytes)))
497 }
497 }
498
498
499 fn py_tuple_to_revision_data_params(
499 fn py_tuple_to_revision_data_params(
500 py: Python,
500 py: Python,
501 tuple: PyTuple,
501 tuple: PyTuple,
502 ) -> PyResult<RevisionDataParams> {
502 ) -> PyResult<RevisionDataParams> {
503 if tuple.len(py) < 8 {
503 if tuple.len(py) < 8 {
504 // this is better than the panic promised by tup.get_item()
504 // this is better than the panic promised by tup.get_item()
505 return Err(PyErr::new::<IndexError, _>(
505 return Err(PyErr::new::<IndexError, _>(
506 py,
506 py,
507 "tuple index out of range",
507 "tuple index out of range",
508 ));
508 ));
509 }
509 }
510 let offset_or_flags: u64 = tuple.get_item(py, 0).extract(py)?;
510 let offset_or_flags: u64 = tuple.get_item(py, 0).extract(py)?;
511 let node_id = tuple
511 let node_id = tuple
512 .get_item(py, 7)
512 .get_item(py, 7)
513 .extract::<PyBytes>(py)?
513 .extract::<PyBytes>(py)?
514 .data(py)
514 .data(py)
515 .try_into()
515 .try_into()
516 .unwrap();
516 .unwrap();
517 let flags = (offset_or_flags & 0xFFFF) as u16;
517 let flags = (offset_or_flags & 0xFFFF) as u16;
518 let data_offset = offset_or_flags >> 16;
518 let data_offset = offset_or_flags >> 16;
519 Ok(RevisionDataParams {
519 Ok(RevisionDataParams {
520 flags,
520 flags,
521 data_offset,
521 data_offset,
522 data_compressed_length: tuple.get_item(py, 1).extract(py)?,
522 data_compressed_length: tuple.get_item(py, 1).extract(py)?,
523 data_uncompressed_length: tuple.get_item(py, 2).extract(py)?,
523 data_uncompressed_length: tuple.get_item(py, 2).extract(py)?,
524 data_delta_base: tuple.get_item(py, 3).extract(py)?,
524 data_delta_base: tuple.get_item(py, 3).extract(py)?,
525 link_rev: tuple.get_item(py, 4).extract(py)?,
525 link_rev: tuple.get_item(py, 4).extract(py)?,
526 parent_rev_1: tuple.get_item(py, 5).extract(py)?,
526 parent_rev_1: tuple.get_item(py, 5).extract(py)?,
527 parent_rev_2: tuple.get_item(py, 6).extract(py)?,
527 parent_rev_2: tuple.get_item(py, 6).extract(py)?,
528 node_id,
528 node_id,
529 ..Default::default()
529 ..Default::default()
530 })
530 })
531 }
531 }
532 fn revision_data_params_to_py_tuple(
532 fn revision_data_params_to_py_tuple(
533 py: Python,
533 py: Python,
534 params: RevisionDataParams,
534 params: RevisionDataParams,
535 ) -> PyTuple {
535 ) -> PyTuple {
536 PyTuple::new(
536 PyTuple::new(
537 py,
537 py,
538 &[
538 &[
539 params.data_offset.into_py_object(py).into_object(),
539 params.data_offset.into_py_object(py).into_object(),
540 params
540 params
541 .data_compressed_length
541 .data_compressed_length
542 .into_py_object(py)
542 .into_py_object(py)
543 .into_object(),
543 .into_object(),
544 params
544 params
545 .data_uncompressed_length
545 .data_uncompressed_length
546 .into_py_object(py)
546 .into_py_object(py)
547 .into_object(),
547 .into_object(),
548 params.data_delta_base.into_py_object(py).into_object(),
548 params.data_delta_base.into_py_object(py).into_object(),
549 params.link_rev.into_py_object(py).into_object(),
549 params.link_rev.into_py_object(py).into_object(),
550 params.parent_rev_1.into_py_object(py).into_object(),
550 params.parent_rev_1.into_py_object(py).into_object(),
551 params.parent_rev_2.into_py_object(py).into_object(),
551 params.parent_rev_2.into_py_object(py).into_object(),
552 PyBytes::new(py, &params.node_id)
552 PyBytes::new(py, &params.node_id)
553 .into_py_object(py)
553 .into_py_object(py)
554 .into_object(),
554 .into_object(),
555 params._sidedata_offset.into_py_object(py).into_object(),
555 params._sidedata_offset.into_py_object(py).into_object(),
556 params
556 params
557 ._sidedata_compressed_length
557 ._sidedata_compressed_length
558 .into_py_object(py)
558 .into_py_object(py)
559 .into_object(),
559 .into_object(),
560 params
560 params
561 .data_compression_mode
561 .data_compression_mode
562 .into_py_object(py)
562 .into_py_object(py)
563 .into_object(),
563 .into_object(),
564 params
564 params
565 ._sidedata_compression_mode
565 ._sidedata_compression_mode
566 .into_py_object(py)
566 .into_py_object(py)
567 .into_object(),
567 .into_object(),
568 params._rank.into_py_object(py).into_object(),
568 params._rank.into_py_object(py).into_object(),
569 ],
569 ],
570 )
570 )
571 }
571 }
572
572
573 struct PySnapshotsCache<'p> {
573 struct PySnapshotsCache<'p> {
574 py: Python<'p>,
574 py: Python<'p>,
575 dict: PyDict,
575 dict: PyDict,
576 }
576 }
577
577
578 impl<'p> SnapshotsCache for PySnapshotsCache<'p> {
578 impl<'p> SnapshotsCache for PySnapshotsCache<'p> {
579 fn insert_for(
579 fn insert_for(
580 &mut self,
580 &mut self,
581 rev: BaseRevision,
581 rev: BaseRevision,
582 value: BaseRevision,
582 value: BaseRevision,
583 ) -> Result<(), RevlogError> {
583 ) -> Result<(), RevlogError> {
584 let pyvalue = value.into_py_object(self.py).into_object();
584 let pyvalue = value.into_py_object(self.py).into_object();
585 match self.dict.get_item(self.py, rev) {
585 match self.dict.get_item(self.py, rev) {
586 Some(obj) => obj
586 Some(obj) => obj
587 .extract::<PySet>(self.py)
587 .extract::<PySet>(self.py)
588 .and_then(|set| set.add(self.py, pyvalue)),
588 .and_then(|set| set.add(self.py, pyvalue)),
589 None => PySet::new(self.py, vec![pyvalue])
589 None => PySet::new(self.py, vec![pyvalue])
590 .and_then(|set| self.dict.set_item(self.py, rev, set)),
590 .and_then(|set| self.dict.set_item(self.py, rev, set)),
591 }
591 }
592 .map_err(|_| {
592 .map_err(|_| {
593 RevlogError::Other(HgError::unsupported(
593 RevlogError::Other(HgError::unsupported(
594 "Error in Python caches handling",
594 "Error in Python caches handling",
595 ))
595 ))
596 })
596 })
597 }
597 }
598 }
598 }
599
599
600 impl MixedIndex {
600 impl MixedIndex {
601 fn new(
601 fn new(
602 py: Python,
602 py: Python,
603 cindex: PyObject,
603 cindex: PyObject,
604 data: PyObject,
604 data: PyObject,
605 header: u32,
605 header: u32,
606 ) -> PyResult<MixedIndex> {
606 ) -> PyResult<MixedIndex> {
607 // Safety: we keep the buffer around inside the class as `index_mmap`
607 // Safety: we keep the buffer around inside the class as `index_mmap`
608 let (buf, bytes) = unsafe { mmap_keeparound(py, data)? };
608 let (buf, bytes) = unsafe { mmap_keeparound(py, data)? };
609
609
610 Self::create_instance(
610 Self::create_instance(
611 py,
611 py,
612 RefCell::new(cindex::Index::new(py, cindex)?),
612 RefCell::new(cindex::Index::new(py, cindex)?),
613 hg::index::Index::new(
613 hg::index::Index::new(
614 bytes,
614 bytes,
615 IndexHeader::parse(&header.to_be_bytes())
615 IndexHeader::parse(&header.to_be_bytes())
616 .expect("default header is broken")
616 .expect("default header is broken")
617 .unwrap(),
617 .unwrap(),
618 )
618 )
619 .map_err(|e| {
619 .map_err(|e| {
620 revlog_error_with_msg(py, e.to_string().as_bytes())
620 revlog_error_with_msg(py, e.to_string().as_bytes())
621 })?,
621 })?,
622 RefCell::new(None),
622 RefCell::new(None),
623 RefCell::new(None),
623 RefCell::new(None),
624 RefCell::new(None),
624 RefCell::new(None),
625 RefCell::new(Some(buf)),
625 RefCell::new(Some(buf)),
626 )
626 )
627 }
627 }
628
628
629 fn len(&self, py: Python) -> PyResult<usize> {
629 fn len(&self, py: Python) -> PyResult<usize> {
630 let rust_index_len = self.index(py).borrow().len();
630 let rust_index_len = self.index(py).borrow().len();
631 Ok(rust_index_len)
631 Ok(rust_index_len)
632 }
632 }
633
633
634 /// This is scaffolding at this point, but it could also become
634 /// This is scaffolding at this point, but it could also become
635 /// a way to start a persistent nodemap or perform a
635 /// a way to start a persistent nodemap or perform a
636 /// vacuum / repack operation
636 /// vacuum / repack operation
637 fn fill_nodemap(
637 fn fill_nodemap(
638 &self,
638 &self,
639 py: Python,
639 py: Python,
640 nt: &mut CoreNodeTree,
640 nt: &mut CoreNodeTree,
641 ) -> PyResult<PyObject> {
641 ) -> PyResult<PyObject> {
642 let index = self.index(py).borrow();
642 let index = self.index(py).borrow();
643 for r in 0..self.len(py)? {
643 for r in 0..self.len(py)? {
644 let rev = Revision(r as BaseRevision);
644 let rev = Revision(r as BaseRevision);
645 // in this case node() won't ever return None
645 // in this case node() won't ever return None
646 nt.insert(&*index, index.node(rev).unwrap(), rev)
646 nt.insert(&*index, index.node(rev).unwrap(), rev)
647 .map_err(|e| nodemap_error(py, e))?
647 .map_err(|e| nodemap_error(py, e))?
648 }
648 }
649 Ok(py.None())
649 Ok(py.None())
650 }
650 }
651
651
652 fn get_nodetree<'a>(
652 fn get_nodetree<'a>(
653 &'a self,
653 &'a self,
654 py: Python<'a>,
654 py: Python<'a>,
655 ) -> PyResult<&'a RefCell<Option<CoreNodeTree>>> {
655 ) -> PyResult<&'a RefCell<Option<CoreNodeTree>>> {
656 if self.nt(py).borrow().is_none() {
656 if self.nt(py).borrow().is_none() {
657 let readonly = Box::<Vec<_>>::default();
657 let readonly = Box::<Vec<_>>::default();
658 let mut nt = CoreNodeTree::load_bytes(readonly, 0);
658 let mut nt = CoreNodeTree::load_bytes(readonly, 0);
659 self.fill_nodemap(py, &mut nt)?;
659 self.fill_nodemap(py, &mut nt)?;
660 self.nt(py).borrow_mut().replace(nt);
660 self.nt(py).borrow_mut().replace(nt);
661 }
661 }
662 Ok(self.nt(py))
662 Ok(self.nt(py))
663 }
663 }
664
664
665 pub fn clone_cindex(&self, py: Python) -> cindex::Index {
665 pub fn clone_cindex(&self, py: Python) -> cindex::Index {
666 self.cindex(py).borrow().clone_ref(py)
666 self.cindex(py).borrow().clone_ref(py)
667 }
667 }
668
668
669 /// Returns the full nodemap bytes to be written as-is to disk
669 /// Returns the full nodemap bytes to be written as-is to disk
670 fn inner_nodemap_data_all(&self, py: Python) -> PyResult<PyBytes> {
670 fn inner_nodemap_data_all(&self, py: Python) -> PyResult<PyBytes> {
671 let nodemap = self.get_nodetree(py)?.borrow_mut().take().unwrap();
671 let nodemap = self.get_nodetree(py)?.borrow_mut().take().unwrap();
672 let (readonly, bytes) = nodemap.into_readonly_and_added_bytes();
672 let (readonly, bytes) = nodemap.into_readonly_and_added_bytes();
673
673
674 // If there's anything readonly, we need to build the data again from
674 // If there's anything readonly, we need to build the data again from
675 // scratch
675 // scratch
676 let bytes = if readonly.len() > 0 {
676 let bytes = if readonly.len() > 0 {
677 let mut nt = CoreNodeTree::load_bytes(Box::<Vec<_>>::default(), 0);
677 let mut nt = CoreNodeTree::load_bytes(Box::<Vec<_>>::default(), 0);
678 self.fill_nodemap(py, &mut nt)?;
678 self.fill_nodemap(py, &mut nt)?;
679
679
680 let (readonly, bytes) = nt.into_readonly_and_added_bytes();
680 let (readonly, bytes) = nt.into_readonly_and_added_bytes();
681 assert_eq!(readonly.len(), 0);
681 assert_eq!(readonly.len(), 0);
682
682
683 bytes
683 bytes
684 } else {
684 } else {
685 bytes
685 bytes
686 };
686 };
687
687
688 let bytes = PyBytes::new(py, &bytes);
688 let bytes = PyBytes::new(py, &bytes);
689 Ok(bytes)
689 Ok(bytes)
690 }
690 }
691
691
692 /// Returns the last saved docket along with the size of any changed data
692 /// Returns the last saved docket along with the size of any changed data
693 /// (in number of blocks), and said data as bytes.
693 /// (in number of blocks), and said data as bytes.
694 fn inner_nodemap_data_incremental(
694 fn inner_nodemap_data_incremental(
695 &self,
695 &self,
696 py: Python,
696 py: Python,
697 ) -> PyResult<PyObject> {
697 ) -> PyResult<PyObject> {
698 let docket = self.docket(py).borrow();
698 let docket = self.docket(py).borrow();
699 let docket = match docket.as_ref() {
699 let docket = match docket.as_ref() {
700 Some(d) => d,
700 Some(d) => d,
701 None => return Ok(py.None()),
701 None => return Ok(py.None()),
702 };
702 };
703
703
704 let node_tree = self.get_nodetree(py)?.borrow_mut().take().unwrap();
704 let node_tree = self.get_nodetree(py)?.borrow_mut().take().unwrap();
705 let masked_blocks = node_tree.masked_readonly_blocks();
705 let masked_blocks = node_tree.masked_readonly_blocks();
706 let (_, data) = node_tree.into_readonly_and_added_bytes();
706 let (_, data) = node_tree.into_readonly_and_added_bytes();
707 let changed = masked_blocks * std::mem::size_of::<Block>();
707 let changed = masked_blocks * std::mem::size_of::<Block>();
708
708
709 Ok((docket, changed, PyBytes::new(py, &data))
709 Ok((docket, changed, PyBytes::new(py, &data))
710 .to_py_object(py)
710 .to_py_object(py)
711 .into_object())
711 .into_object())
712 }
712 }
713
713
714 /// Update the nodemap from the new (mmaped) data.
714 /// Update the nodemap from the new (mmaped) data.
715 /// The docket is kept as a reference for later incremental calls.
715 /// The docket is kept as a reference for later incremental calls.
716 fn inner_update_nodemap_data(
716 fn inner_update_nodemap_data(
717 &self,
717 &self,
718 py: Python,
718 py: Python,
719 docket: PyObject,
719 docket: PyObject,
720 nm_data: PyObject,
720 nm_data: PyObject,
721 ) -> PyResult<PyObject> {
721 ) -> PyResult<PyObject> {
722 // Safety: we keep the buffer around inside the class as `nodemap_mmap`
722 // Safety: we keep the buffer around inside the class as `nodemap_mmap`
723 let (buf, bytes) = unsafe { mmap_keeparound(py, nm_data)? };
723 let (buf, bytes) = unsafe { mmap_keeparound(py, nm_data)? };
724 let len = buf.item_count();
724 let len = buf.item_count();
725 self.nodemap_mmap(py).borrow_mut().replace(buf);
725 self.nodemap_mmap(py).borrow_mut().replace(buf);
726
726
727 let mut nt = CoreNodeTree::load_bytes(bytes, len);
727 let mut nt = CoreNodeTree::load_bytes(bytes, len);
728
728
729 let data_tip = docket
729 let data_tip = docket
730 .getattr(py, "tip_rev")?
730 .getattr(py, "tip_rev")?
731 .extract::<BaseRevision>(py)?
731 .extract::<BaseRevision>(py)?
732 .into();
732 .into();
733 self.docket(py).borrow_mut().replace(docket.clone_ref(py));
733 self.docket(py).borrow_mut().replace(docket.clone_ref(py));
734 let idx = self.index(py).borrow();
734 let idx = self.index(py).borrow();
735 let data_tip = idx.check_revision(data_tip).ok_or_else(|| {
735 let data_tip = idx.check_revision(data_tip).ok_or_else(|| {
736 nodemap_error(py, NodeMapError::RevisionNotInIndex(data_tip))
736 nodemap_error(py, NodeMapError::RevisionNotInIndex(data_tip))
737 })?;
737 })?;
738 let current_tip = idx.len();
738 let current_tip = idx.len();
739
739
740 for r in (data_tip.0 + 1)..current_tip as BaseRevision {
740 for r in (data_tip.0 + 1)..current_tip as BaseRevision {
741 let rev = Revision(r);
741 let rev = Revision(r);
742 // in this case node() won't ever return None
742 // in this case node() won't ever return None
743 nt.insert(&*idx, idx.node(rev).unwrap(), rev)
743 nt.insert(&*idx, idx.node(rev).unwrap(), rev)
744 .map_err(|e| nodemap_error(py, e))?
744 .map_err(|e| nodemap_error(py, e))?
745 }
745 }
746
746
747 *self.nt(py).borrow_mut() = Some(nt);
747 *self.nt(py).borrow_mut() = Some(nt);
748
748
749 Ok(py.None())
749 Ok(py.None())
750 }
750 }
751
751
752 fn inner_getitem(&self, py: Python, key: PyObject) -> PyResult<PyObject> {
752 fn inner_getitem(&self, py: Python, key: PyObject) -> PyResult<PyObject> {
753 let idx = self.index(py).borrow();
753 let idx = self.index(py).borrow();
754 Ok(match key.extract::<BaseRevision>(py) {
754 Ok(match key.extract::<BaseRevision>(py) {
755 Ok(key_as_int) => {
755 Ok(key_as_int) => {
756 let entry_params = if key_as_int == NULL_REVISION.0 {
756 let entry_params = if key_as_int == NULL_REVISION.0 {
757 RevisionDataParams::default()
757 RevisionDataParams::default()
758 } else {
758 } else {
759 let rev = UncheckedRevision(key_as_int);
759 let rev = UncheckedRevision(key_as_int);
760 match idx.entry_as_params(rev) {
760 match idx.entry_as_params(rev) {
761 Some(e) => e,
761 Some(e) => e,
762 None => {
762 None => {
763 return Err(PyErr::new::<IndexError, _>(
763 return Err(PyErr::new::<IndexError, _>(
764 py,
764 py,
765 "revlog index out of range",
765 "revlog index out of range",
766 ));
766 ));
767 }
767 }
768 }
768 }
769 };
769 };
770 revision_data_params_to_py_tuple(py, entry_params)
770 revision_data_params_to_py_tuple(py, entry_params)
771 .into_object()
771 .into_object()
772 }
772 }
773 _ => self.get_rev(py, key.extract::<PyBytes>(py)?)?.map_or_else(
773 _ => self.get_rev(py, key.extract::<PyBytes>(py)?)?.map_or_else(
774 || py.None(),
774 || py.None(),
775 |py_rev| py_rev.into_py_object(py).into_object(),
775 |py_rev| py_rev.into_py_object(py).into_object(),
776 ),
776 ),
777 })
777 })
778 }
778 }
779
779
780 fn inner_headrevs(&self, py: Python) -> PyResult<PyObject> {
780 fn inner_headrevs(&self, py: Python) -> PyResult<PyObject> {
781 let index = &*self.index(py).borrow();
781 let index = &*self.index(py).borrow();
782 let as_vec: Vec<PyObject> = index
782 let as_vec: Vec<PyObject> = index
783 .head_revs()
783 .head_revs()
784 .map_err(|e| graph_error(py, e))?
784 .map_err(|e| graph_error(py, e))?
785 .iter()
785 .iter()
786 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
786 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
787 .collect();
787 .collect();
788 Ok(PyList::new(py, &as_vec).into_object())
788 Ok(PyList::new(py, &as_vec).into_object())
789 }
789 }
790
790
791 fn inner_headrevsfiltered(
791 fn inner_headrevsfiltered(
792 &self,
792 &self,
793 py: Python,
793 py: Python,
794 filtered_revs: &PyObject,
794 filtered_revs: &PyObject,
795 ) -> PyResult<PyObject> {
795 ) -> PyResult<PyObject> {
796 let index = &mut *self.index(py).borrow_mut();
796 let index = &mut *self.index(py).borrow_mut();
797 let filtered_revs = rev_pyiter_collect(py, filtered_revs, index)?;
797 let filtered_revs = rev_pyiter_collect(py, filtered_revs, index)?;
798
798
799 let as_vec: Vec<PyObject> = index
799 let as_vec: Vec<PyObject> = index
800 .head_revs_filtered(&filtered_revs)
800 .head_revs_filtered(&filtered_revs)
801 .map_err(|e| graph_error(py, e))?
801 .map_err(|e| graph_error(py, e))?
802 .iter()
802 .iter()
803 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
803 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
804 .collect();
804 .collect();
805 Ok(PyList::new(py, &as_vec).into_object())
805 Ok(PyList::new(py, &as_vec).into_object())
806 }
806 }
807
807
808 fn inner_ancestors(
808 fn inner_ancestors(
809 &self,
809 &self,
810 py: Python,
810 py: Python,
811 py_revs: &PyTuple,
811 py_revs: &PyTuple,
812 ) -> PyResult<PyObject> {
812 ) -> PyResult<PyObject> {
813 let index = &*self.index(py).borrow();
813 let index = &*self.index(py).borrow();
814 let revs: Vec<_> = rev_pyiter_collect(py, py_revs.as_object(), index)?;
814 let revs: Vec<_> = rev_pyiter_collect(py, py_revs.as_object(), index)?;
815 let as_vec: Vec<_> = index
815 let as_vec: Vec<_> = index
816 .ancestors(&revs)
816 .ancestors(&revs)
817 .map_err(|e| graph_error(py, e))?
817 .map_err(|e| graph_error(py, e))?
818 .iter()
818 .iter()
819 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
819 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
820 .collect();
820 .collect();
821 Ok(PyList::new(py, &as_vec).into_object())
821 Ok(PyList::new(py, &as_vec).into_object())
822 }
822 }
823
823
824 fn inner_commonancestorsheads(
824 fn inner_commonancestorsheads(
825 &self,
825 &self,
826 py: Python,
826 py: Python,
827 py_revs: &PyTuple,
827 py_revs: &PyTuple,
828 ) -> PyResult<PyObject> {
828 ) -> PyResult<PyObject> {
829 let index = &*self.index(py).borrow();
829 let index = &*self.index(py).borrow();
830 let revs: Vec<_> = rev_pyiter_collect(py, py_revs.as_object(), index)?;
830 let revs: Vec<_> = rev_pyiter_collect(py, py_revs.as_object(), index)?;
831 let as_vec: Vec<_> = index
831 let as_vec: Vec<_> = index
832 .common_ancestor_heads(&revs)
832 .common_ancestor_heads(&revs)
833 .map_err(|e| graph_error(py, e))?
833 .map_err(|e| graph_error(py, e))?
834 .iter()
834 .iter()
835 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
835 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
836 .collect();
836 .collect();
837 Ok(PyList::new(py, &as_vec).into_object())
837 Ok(PyList::new(py, &as_vec).into_object())
838 }
838 }
839
839
840 fn inner_computephasesmapsets(
840 fn inner_computephasesmapsets(
841 &self,
841 &self,
842 py: Python,
842 py: Python,
843 py_roots: PyDict,
843 py_roots: PyDict,
844 ) -> PyResult<PyObject> {
844 ) -> PyResult<PyObject> {
845 let index = &*self.index(py).borrow();
845 let index = &*self.index(py).borrow();
846 let opt = self.get_nodetree(py)?.borrow();
846 let opt = self.get_nodetree(py)?.borrow();
847 let nt = opt.as_ref().unwrap();
847 let nt = opt.as_ref().unwrap();
848 let roots: Result<HashMap<Phase, Vec<Revision>>, PyErr> = py_roots
848 let roots: Result<HashMap<Phase, Vec<Revision>>, PyErr> = py_roots
849 .items_list(py)
849 .items_list(py)
850 .iter(py)
850 .iter(py)
851 .map(|r| {
851 .map(|r| {
852 let phase = r.get_item(py, 0)?;
852 let phase = r.get_item(py, 0)?;
853 let nodes = r.get_item(py, 1)?;
853 let nodes = r.get_item(py, 1)?;
854 // Transform the nodes from Python to revs here since we
854 // Transform the nodes from Python to revs here since we
855 // have access to the nodemap
855 // have access to the nodemap
856 let revs: Result<_, _> = nodes
856 let revs: Result<_, _> = nodes
857 .iter(py)?
857 .iter(py)?
858 .map(|node| match node?.extract::<PyBytes>(py) {
858 .map(|node| match node?.extract::<PyBytes>(py) {
859 Ok(py_bytes) => {
859 Ok(py_bytes) => {
860 let node = node_from_py_bytes(py, &py_bytes)?;
860 let node = node_from_py_bytes(py, &py_bytes)?;
861 nt.find_bin(index, node.into())
861 nt.find_bin(index, node.into())
862 .map_err(|e| nodemap_error(py, e))?
862 .map_err(|e| nodemap_error(py, e))?
863 .ok_or_else(|| revlog_error(py))
863 .ok_or_else(|| revlog_error(py))
864 }
864 }
865 Err(e) => Err(e),
865 Err(e) => Err(e),
866 })
866 })
867 .collect();
867 .collect();
868 let phase = Phase::try_from(phase.extract::<usize>(py)?)
868 let phase = Phase::try_from(phase.extract::<usize>(py)?)
869 .map_err(|_| revlog_error(py));
869 .map_err(|_| revlog_error(py));
870 Ok((phase?, revs?))
870 Ok((phase?, revs?))
871 })
871 })
872 .collect();
872 .collect();
873 let (len, phase_maps) = index
873 let (len, phase_maps) = index
874 .compute_phases_map_sets(roots?)
874 .compute_phases_map_sets(roots?)
875 .map_err(|e| graph_error(py, e))?;
875 .map_err(|e| graph_error(py, e))?;
876
876
877 // Ugly hack, but temporary
877 // Ugly hack, but temporary
878 const IDX_TO_PHASE_NUM: [usize; 4] = [1, 2, 32, 96];
878 const IDX_TO_PHASE_NUM: [usize; 4] = [1, 2, 32, 96];
879 let py_phase_maps = PyDict::new(py);
879 let py_phase_maps = PyDict::new(py);
880 for (idx, roots) in phase_maps.iter().enumerate() {
880 for (idx, roots) in phase_maps.iter().enumerate() {
881 let phase_num = IDX_TO_PHASE_NUM[idx].into_py_object(py);
881 let phase_num = IDX_TO_PHASE_NUM[idx].into_py_object(py);
882 // OPTIM too bad we have to collect here. At least, we could
882 // OPTIM too bad we have to collect here. At least, we could
883 // reuse the same Vec and allocate it with capacity at
883 // reuse the same Vec and allocate it with capacity at
884 // max(len(phase_maps)
884 // max(len(phase_maps)
885 let roots_vec: Vec<PyInt> = roots
885 let roots_vec: Vec<PyInt> = roots
886 .iter()
886 .iter()
887 .map(|r| PyRevision::from(*r).into_py_object(py))
887 .map(|r| PyRevision::from(*r).into_py_object(py))
888 .collect();
888 .collect();
889 py_phase_maps.set_item(
889 py_phase_maps.set_item(
890 py,
890 py,
891 phase_num,
891 phase_num,
892 PySet::new(py, roots_vec)?,
892 PySet::new(py, roots_vec)?,
893 )?;
893 )?;
894 }
894 }
895 Ok(PyTuple::new(
895 Ok(PyTuple::new(
896 py,
896 py,
897 &[
897 &[
898 len.into_py_object(py).into_object(),
898 len.into_py_object(py).into_object(),
899 py_phase_maps.into_object(),
899 py_phase_maps.into_object(),
900 ],
900 ],
901 )
901 )
902 .into_object())
902 .into_object())
903 }
903 }
904
904
905 fn inner_slicechunktodensity(
905 fn inner_slicechunktodensity(
906 &self,
906 &self,
907 py: Python,
907 py: Python,
908 revs: PyObject,
908 revs: PyObject,
909 target_density: f64,
909 target_density: f64,
910 min_gap_size: usize,
910 min_gap_size: usize,
911 ) -> PyResult<PyObject> {
911 ) -> PyResult<PyObject> {
912 let index = &*self.index(py).borrow();
912 let index = &*self.index(py).borrow();
913 let revs: Vec<_> = rev_pyiter_collect(py, &revs, index)?;
913 let revs: Vec<_> = rev_pyiter_collect(py, &revs, index)?;
914 let as_nested_vec =
914 let as_nested_vec =
915 index.slice_chunk_to_density(&revs, target_density, min_gap_size);
915 index.slice_chunk_to_density(&revs, target_density, min_gap_size);
916 let mut res = Vec::with_capacity(as_nested_vec.len());
916 let mut res = Vec::with_capacity(as_nested_vec.len());
917 let mut py_chunk = Vec::new();
917 let mut py_chunk = Vec::new();
918 for chunk in as_nested_vec {
918 for chunk in as_nested_vec {
919 py_chunk.clear();
919 py_chunk.clear();
920 py_chunk.reserve_exact(chunk.len());
920 py_chunk.reserve_exact(chunk.len());
921 for rev in chunk {
921 for rev in chunk {
922 py_chunk.push(
922 py_chunk.push(
923 PyRevision::from(rev).into_py_object(py).into_object(),
923 PyRevision::from(rev).into_py_object(py).into_object(),
924 );
924 );
925 }
925 }
926 res.push(PyList::new(py, &py_chunk).into_object());
926 res.push(PyList::new(py, &py_chunk).into_object());
927 }
927 }
928 // This is just to do the same as C, not sure why it does this
928 // This is just to do the same as C, not sure why it does this
929 if res.len() == 1 {
929 if res.len() == 1 {
930 Ok(PyTuple::new(py, &res).into_object())
930 Ok(PyTuple::new(py, &res).into_object())
931 } else {
931 } else {
932 Ok(PyList::new(py, &res).into_object())
932 Ok(PyList::new(py, &res).into_object())
933 }
933 }
934 }
934 }
935
935
936 fn inner_reachableroots2(
936 fn inner_reachableroots2(
937 &self,
937 &self,
938 py: Python,
938 py: Python,
939 min_root: UncheckedRevision,
939 min_root: UncheckedRevision,
940 heads: PyObject,
940 heads: PyObject,
941 roots: PyObject,
941 roots: PyObject,
942 include_path: bool,
942 include_path: bool,
943 ) -> PyResult<PyObject> {
943 ) -> PyResult<PyObject> {
944 let index = &*self.index(py).borrow();
944 let index = &*self.index(py).borrow();
945 let heads = rev_pyiter_collect_or_else(py, &heads, index, |_rev| {
945 let heads = rev_pyiter_collect_or_else(py, &heads, index, |_rev| {
946 PyErr::new::<IndexError, _>(py, "head out of range")
946 PyErr::new::<IndexError, _>(py, "head out of range")
947 })?;
947 })?;
948 let roots: Result<_, _> = roots
948 let roots: Result<_, _> = roots
949 .iter(py)?
949 .iter(py)?
950 .map(|r| {
950 .map(|r| {
951 r.and_then(|o| match o.extract::<PyRevision>(py) {
951 r.and_then(|o| match o.extract::<PyRevision>(py) {
952 Ok(r) => Ok(UncheckedRevision(r.0)),
952 Ok(r) => Ok(UncheckedRevision(r.0)),
953 Err(e) => Err(e),
953 Err(e) => Err(e),
954 })
954 })
955 })
955 })
956 .collect();
956 .collect();
957 let as_set = index
957 let as_set = index
958 .reachable_roots(min_root, heads, roots?, include_path)
958 .reachable_roots(min_root, heads, roots?, include_path)
959 .map_err(|e| graph_error(py, e))?;
959 .map_err(|e| graph_error(py, e))?;
960 let as_vec: Vec<PyObject> = as_set
960 let as_vec: Vec<PyObject> = as_set
961 .iter()
961 .iter()
962 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
962 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
963 .collect();
963 .collect();
964 Ok(PyList::new(py, &as_vec).into_object())
964 Ok(PyList::new(py, &as_vec).into_object())
965 }
965 }
966 }
966 }
967
967
968 py_class!(pub class NodeTree |py| {
969 data nt: RefCell<CoreNodeTree>;
970 data index: RefCell<UnsafePyLeaked<PySharedIndex>>;
971
972 def __new__(_cls, index: PyObject) -> PyResult<NodeTree> {
973 let index = py_rust_index_to_graph(py, index)?;
974 let nt = CoreNodeTree::default(); // in-RAM, fully mutable
975 Self::create_instance(py, RefCell::new(nt), RefCell::new(index))
976 }
977
978 def insert(&self, rev: PyRevision) -> PyResult<PyObject> {
979 let leaked = self.index(py).borrow();
980 let index = &*unsafe { leaked.try_borrow(py)? };
981
982 let rev = UncheckedRevision(rev.0);
983 let rev = index
984 .check_revision(rev)
985 .ok_or_else(|| rev_not_in_index(py, rev))?;
986 if rev == NULL_REVISION {
987 return Err(rev_not_in_index(py, rev.into()))
988 }
989
990 let entry = index.inner.get_entry(rev).unwrap();
991 let mut nt = self.nt(py).borrow_mut();
992 nt.insert(index, entry.hash(), rev).map_err(|e| nodemap_error(py, e))?;
993
994 Ok(py.None())
995 }
996
997 /// Lookup by node hex prefix in the NodeTree, returning revision number.
998 ///
999 /// This is not part of the classical NodeTree API, but is good enough
1000 /// for unit testing, as in `test-rust-revlog.py`.
1001 def prefix_rev_lookup(
1002 &self,
1003 node_prefix: PyBytes
1004 ) -> PyResult<Option<PyRevision>> {
1005 let prefix = NodePrefix::from_hex(node_prefix.data(py))
1006 .map_err(|_| PyErr::new::<ValueError, _>(
1007 py,
1008 format!("Invalid node or prefix {:?}",
1009 node_prefix.as_object()))
1010 )?;
1011
1012 let nt = self.nt(py).borrow();
1013 let leaked = self.index(py).borrow();
1014 let index = &*unsafe { leaked.try_borrow(py)? };
1015
1016 Ok(nt.find_bin(index, prefix)
1017 .map_err(|e| nodemap_error(py, e))?
1018 .map(|r| r.into())
1019 )
1020 }
1021
1022 def shortest(&self, node: PyBytes) -> PyResult<usize> {
1023 let nt = self.nt(py).borrow();
1024 let leaked = self.index(py).borrow();
1025 let idx = &*unsafe { leaked.try_borrow(py)? };
1026 match nt.unique_prefix_len_node(idx, &node_from_py_bytes(py, &node)?)
1027 {
1028 Ok(Some(l)) => Ok(l),
1029 Ok(None) => Err(revlog_error(py)),
1030 Err(e) => Err(nodemap_error(py, e)),
1031 }
1032 }
1033 });
1034
968 fn revlog_error(py: Python) -> PyErr {
1035 fn revlog_error(py: Python) -> PyErr {
969 match py
1036 match py
970 .import("mercurial.error")
1037 .import("mercurial.error")
971 .and_then(|m| m.get(py, "RevlogError"))
1038 .and_then(|m| m.get(py, "RevlogError"))
972 {
1039 {
973 Err(e) => e,
1040 Err(e) => e,
974 Ok(cls) => PyErr::from_instance(
1041 Ok(cls) => PyErr::from_instance(
975 py,
1042 py,
976 cls.call(py, (py.None(),), None).ok().into_py_object(py),
1043 cls.call(py, (py.None(),), None).ok().into_py_object(py),
977 ),
1044 ),
978 }
1045 }
979 }
1046 }
980
1047
981 fn revlog_error_with_msg(py: Python, msg: &[u8]) -> PyErr {
1048 fn revlog_error_with_msg(py: Python, msg: &[u8]) -> PyErr {
982 match py
1049 match py
983 .import("mercurial.error")
1050 .import("mercurial.error")
984 .and_then(|m| m.get(py, "RevlogError"))
1051 .and_then(|m| m.get(py, "RevlogError"))
985 {
1052 {
986 Err(e) => e,
1053 Err(e) => e,
987 Ok(cls) => PyErr::from_instance(
1054 Ok(cls) => PyErr::from_instance(
988 py,
1055 py,
989 cls.call(py, (PyBytes::new(py, msg),), None)
1056 cls.call(py, (PyBytes::new(py, msg),), None)
990 .ok()
1057 .ok()
991 .into_py_object(py),
1058 .into_py_object(py),
992 ),
1059 ),
993 }
1060 }
994 }
1061 }
995
1062
996 fn graph_error(py: Python, _err: hg::GraphError) -> PyErr {
1063 fn graph_error(py: Python, _err: hg::GraphError) -> PyErr {
997 // ParentOutOfRange is currently the only alternative
1064 // ParentOutOfRange is currently the only alternative
998 // in `hg::GraphError`. The C index always raises this simple ValueError.
1065 // in `hg::GraphError`. The C index always raises this simple ValueError.
999 PyErr::new::<ValueError, _>(py, "parent out of range")
1066 PyErr::new::<ValueError, _>(py, "parent out of range")
1000 }
1067 }
1001
1068
1002 fn nodemap_rev_not_in_index(py: Python, rev: UncheckedRevision) -> PyErr {
1069 fn nodemap_rev_not_in_index(py: Python, rev: UncheckedRevision) -> PyErr {
1003 PyErr::new::<ValueError, _>(
1070 PyErr::new::<ValueError, _>(
1004 py,
1071 py,
1005 format!(
1072 format!(
1006 "Inconsistency: Revision {} found in nodemap \
1073 "Inconsistency: Revision {} found in nodemap \
1007 is not in revlog index",
1074 is not in revlog index",
1008 rev
1075 rev
1009 ),
1076 ),
1010 )
1077 )
1011 }
1078 }
1012
1079
1013 fn rev_not_in_index(py: Python, rev: UncheckedRevision) -> PyErr {
1080 fn rev_not_in_index(py: Python, rev: UncheckedRevision) -> PyErr {
1014 PyErr::new::<ValueError, _>(
1081 PyErr::new::<ValueError, _>(
1015 py,
1082 py,
1016 format!("revlog index out of range: {}", rev),
1083 format!("revlog index out of range: {}", rev),
1017 )
1084 )
1018 }
1085 }
1019
1086
1020 /// Standard treatment of NodeMapError
1087 /// Standard treatment of NodeMapError
1021 fn nodemap_error(py: Python, err: NodeMapError) -> PyErr {
1088 fn nodemap_error(py: Python, err: NodeMapError) -> PyErr {
1022 match err {
1089 match err {
1023 NodeMapError::MultipleResults => revlog_error(py),
1090 NodeMapError::MultipleResults => revlog_error(py),
1024 NodeMapError::RevisionNotInIndex(r) => nodemap_rev_not_in_index(py, r),
1091 NodeMapError::RevisionNotInIndex(r) => nodemap_rev_not_in_index(py, r),
1025 }
1092 }
1026 }
1093 }
1027
1094
1028 /// Create the module, with __package__ given from parent
1095 /// Create the module, with __package__ given from parent
1029 pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> {
1096 pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> {
1030 let dotted_name = &format!("{}.revlog", package);
1097 let dotted_name = &format!("{}.revlog", package);
1031 let m = PyModule::new(py, dotted_name)?;
1098 let m = PyModule::new(py, dotted_name)?;
1032 m.add(py, "__package__", package)?;
1099 m.add(py, "__package__", package)?;
1033 m.add(py, "__doc__", "RevLog - Rust implementations")?;
1100 m.add(py, "__doc__", "RevLog - Rust implementations")?;
1034
1101
1035 m.add_class::<MixedIndex>(py)?;
1102 m.add_class::<MixedIndex>(py)?;
1103 m.add_class::<NodeTree>(py)?;
1036
1104
1037 let sys = PyModule::import(py, "sys")?;
1105 let sys = PyModule::import(py, "sys")?;
1038 let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?;
1106 let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?;
1039 sys_modules.set_item(py, dotted_name, &m)?;
1107 sys_modules.set_item(py, dotted_name, &m)?;
1040
1108
1041 Ok(m)
1109 Ok(m)
1042 }
1110 }
@@ -1,63 +1,94 b''
1 import struct
1 import struct
2 import unittest
2 import unittest
3
3
4 from mercurial.node import hex
5
4 try:
6 try:
5 from mercurial import rustext
7 from mercurial import rustext
6
8
7 rustext.__name__ # trigger immediate actual import
9 rustext.__name__ # trigger immediate actual import
8 except ImportError:
10 except ImportError:
9 rustext = None
11 rustext = None
10 else:
12 else:
11 from mercurial.rustext import revlog
13 from mercurial.rustext import revlog
12
14
13 # this would fail already without appropriate ancestor.__package__
15 # this would fail already without appropriate ancestor.__package__
14 from mercurial.rustext.ancestor import LazyAncestors
16 from mercurial.rustext.ancestor import LazyAncestors
15
17
16 from mercurial.testing import revlog as revlogtesting
18 from mercurial.testing import revlog as revlogtesting
17
19
18 header = struct.unpack(">I", revlogtesting.data_non_inlined[:4])[0]
20 header = struct.unpack(">I", revlogtesting.data_non_inlined[:4])[0]
19
21
20
22
21 @unittest.skipIf(
23 @unittest.skipIf(
22 rustext is None,
24 rustext is None,
23 "rustext module revlog relies on is not available",
25 "rustext module revlog relies on is not available",
24 )
26 )
25 class RustRevlogIndexTest(revlogtesting.RevlogBasedTestBase):
27 class RustRevlogIndexTest(revlogtesting.RevlogBasedTestBase):
26 def test_heads(self):
28 def test_heads(self):
27 idx = self.parseindex()
29 idx = self.parseindex()
28 rustidx = revlog.MixedIndex(idx, revlogtesting.data_non_inlined, header)
30 rustidx = revlog.MixedIndex(idx, revlogtesting.data_non_inlined, header)
29 self.assertEqual(rustidx.headrevs(), idx.headrevs())
31 self.assertEqual(rustidx.headrevs(), idx.headrevs())
30
32
31 def test_get_cindex(self):
33 def test_get_cindex(self):
32 # drop me once we no longer need the method for shortest node
34 # drop me once we no longer need the method for shortest node
33 idx = self.parseindex()
35 idx = self.parseindex()
34 rustidx = revlog.MixedIndex(idx, revlogtesting.data_non_inlined, header)
36 rustidx = revlog.MixedIndex(idx, revlogtesting.data_non_inlined, header)
35 cidx = rustidx.get_cindex()
37 cidx = rustidx.get_cindex()
36 self.assertTrue(idx is cidx)
38 self.assertTrue(idx is cidx)
37
39
38 def test_len(self):
40 def test_len(self):
39 idx = self.parseindex()
41 idx = self.parseindex()
40 rustidx = revlog.MixedIndex(idx, revlogtesting.data_non_inlined, header)
42 rustidx = revlog.MixedIndex(idx, revlogtesting.data_non_inlined, header)
41 self.assertEqual(len(rustidx), len(idx))
43 self.assertEqual(len(rustidx), len(idx))
42
44
43 def test_ancestors(self):
45 def test_ancestors(self):
44 idx = self.parseindex()
46 idx = self.parseindex()
45 rustidx = revlog.MixedIndex(idx, revlogtesting.data_non_inlined, header)
47 rustidx = revlog.MixedIndex(idx, revlogtesting.data_non_inlined, header)
46 lazy = LazyAncestors(rustidx, [3], 0, True)
48 lazy = LazyAncestors(rustidx, [3], 0, True)
47 # we have two more references to the index:
49 # we have two more references to the index:
48 # - in its inner iterator for __contains__ and __bool__
50 # - in its inner iterator for __contains__ and __bool__
49 # - in the LazyAncestors instance itself (to spawn new iterators)
51 # - in the LazyAncestors instance itself (to spawn new iterators)
50 self.assertTrue(2 in lazy)
52 self.assertTrue(2 in lazy)
51 self.assertTrue(bool(lazy))
53 self.assertTrue(bool(lazy))
52 self.assertEqual(list(lazy), [3, 2, 1, 0])
54 self.assertEqual(list(lazy), [3, 2, 1, 0])
53 # a second time to validate that we spawn new iterators
55 # a second time to validate that we spawn new iterators
54 self.assertEqual(list(lazy), [3, 2, 1, 0])
56 self.assertEqual(list(lazy), [3, 2, 1, 0])
55
57
56 # let's check bool for an empty one
58 # let's check bool for an empty one
57 self.assertFalse(LazyAncestors(rustidx, [0], 0, False))
59 self.assertFalse(LazyAncestors(rustidx, [0], 0, False))
58
60
59
61
62 @unittest.skipIf(
63 rustext is None,
64 "rustext module revlog relies on is not available",
65 )
66 class RustRevlogNodeTreeClassTest(revlogtesting.RustRevlogBasedTestBase):
67 def test_standalone_nodetree(self):
68 idx = self.parserustindex()
69 nt = revlog.NodeTree(idx)
70 for i in range(4):
71 nt.insert(i)
72
73 bin_nodes = [entry[7] for entry in idx]
74 hex_nodes = [hex(n) for n in bin_nodes]
75
76 for i, node in enumerate(hex_nodes):
77 self.assertEqual(nt.prefix_rev_lookup(node), i)
78 self.assertEqual(nt.prefix_rev_lookup(node[:5]), i)
79
80 # all 4 revisions in idx (standard data set) have different
81 # first nybbles in their Node IDs,
82 # hence `nt.shortest()` should return 1 for them, except when
83 # the leading nybble is 0 (ambiguity with NULL_NODE)
84 for i, (bin_node, hex_node) in enumerate(zip(bin_nodes, hex_nodes)):
85 shortest = nt.shortest(bin_node)
86 expected = 2 if hex_node[0] == ord('0') else 1
87 self.assertEqual(shortest, expected)
88 self.assertEqual(nt.prefix_rev_lookup(hex_node[:shortest]), i)
89
90
60 if __name__ == '__main__':
91 if __name__ == '__main__':
61 import silenttestrunner
92 import silenttestrunner
62
93
63 silenttestrunner.main(__name__)
94 silenttestrunner.main(__name__)
General Comments 0
You need to be logged in to leave comments. Login now