##// END OF EJS Templates
rust-index: cache the head nodeids python list...
Raphaël Gomès -
r52156:5b4995b4 default
parent child Browse files
Show More
@@ -1,1189 +1,1218 b''
1 // revlog.rs
1 // revlog.rs
2 //
2 //
3 // Copyright 2019-2020 Georges Racinet <georges.racinet@octobus.net>
3 // Copyright 2019-2020 Georges Racinet <georges.racinet@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 use crate::{
8 use crate::{
9 conversion::{rev_pyiter_collect, rev_pyiter_collect_or_else},
9 conversion::{rev_pyiter_collect, rev_pyiter_collect_or_else},
10 utils::{node_from_py_bytes, node_from_py_object},
10 utils::{node_from_py_bytes, node_from_py_object},
11 PyRevision,
11 PyRevision,
12 };
12 };
13 use cpython::{
13 use cpython::{
14 buffer::{Element, PyBuffer},
14 buffer::{Element, PyBuffer},
15 exc::{IndexError, ValueError},
15 exc::{IndexError, ValueError},
16 ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyInt, PyList,
16 ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyInt, PyList,
17 PyModule, PyObject, PyResult, PySet, PyString, PyTuple, Python,
17 PyModule, PyObject, PyResult, PySet, PyString, PyTuple, Python,
18 PythonObject, ToPyObject, UnsafePyLeaked,
18 PythonObject, ToPyObject, UnsafePyLeaked,
19 };
19 };
20 use hg::{
20 use hg::{
21 errors::HgError,
21 errors::HgError,
22 index::{
22 index::{
23 IndexHeader, Phase, RevisionDataParams, SnapshotsCache,
23 IndexHeader, Phase, RevisionDataParams, SnapshotsCache,
24 INDEX_ENTRY_SIZE,
24 INDEX_ENTRY_SIZE,
25 },
25 },
26 nodemap::{Block, NodeMapError, NodeTree as CoreNodeTree},
26 nodemap::{Block, NodeMapError, NodeTree as CoreNodeTree},
27 revlog::{nodemap::NodeMap, Graph, NodePrefix, RevlogError, RevlogIndex},
27 revlog::{nodemap::NodeMap, Graph, NodePrefix, RevlogError, RevlogIndex},
28 BaseRevision, Node, Revision, UncheckedRevision, NULL_REVISION,
28 BaseRevision, Node, Revision, UncheckedRevision, NULL_REVISION,
29 };
29 };
30 use std::{cell::RefCell, collections::HashMap};
30 use std::{cell::RefCell, collections::HashMap};
31 use vcsgraph::graph::Graph as VCSGraph;
31 use vcsgraph::graph::Graph as VCSGraph;
32
32
33 pub struct PySharedIndex {
33 pub struct PySharedIndex {
34 /// The underlying hg-core index
34 /// The underlying hg-core index
35 pub(crate) inner: &'static hg::index::Index,
35 pub(crate) inner: &'static hg::index::Index,
36 }
36 }
37
37
38 /// Return a Struct implementing the Graph trait
38 /// Return a Struct implementing the Graph trait
39 pub(crate) fn py_rust_index_to_graph(
39 pub(crate) fn py_rust_index_to_graph(
40 py: Python,
40 py: Python,
41 index: PyObject,
41 index: PyObject,
42 ) -> PyResult<UnsafePyLeaked<PySharedIndex>> {
42 ) -> PyResult<UnsafePyLeaked<PySharedIndex>> {
43 let midx = index.extract::<Index>(py)?;
43 let midx = index.extract::<Index>(py)?;
44 let leaked = midx.index(py).leak_immutable();
44 let leaked = midx.index(py).leak_immutable();
45 // Safety: we don't leak the "faked" reference out of the `UnsafePyLeaked`
45 // Safety: we don't leak the "faked" reference out of the `UnsafePyLeaked`
46 Ok(unsafe { leaked.map(py, |idx| PySharedIndex { inner: idx }) })
46 Ok(unsafe { leaked.map(py, |idx| PySharedIndex { inner: idx }) })
47 }
47 }
48
48
49 impl Clone for PySharedIndex {
49 impl Clone for PySharedIndex {
50 fn clone(&self) -> Self {
50 fn clone(&self) -> Self {
51 Self { inner: self.inner }
51 Self { inner: self.inner }
52 }
52 }
53 }
53 }
54
54
55 impl Graph for PySharedIndex {
55 impl Graph for PySharedIndex {
56 #[inline(always)]
56 #[inline(always)]
57 fn parents(&self, rev: Revision) -> Result<[Revision; 2], hg::GraphError> {
57 fn parents(&self, rev: Revision) -> Result<[Revision; 2], hg::GraphError> {
58 self.inner.parents(rev)
58 self.inner.parents(rev)
59 }
59 }
60 }
60 }
61
61
62 impl VCSGraph for PySharedIndex {
62 impl VCSGraph for PySharedIndex {
63 #[inline(always)]
63 #[inline(always)]
64 fn parents(
64 fn parents(
65 &self,
65 &self,
66 rev: BaseRevision,
66 rev: BaseRevision,
67 ) -> Result<vcsgraph::graph::Parents, vcsgraph::graph::GraphReadError>
67 ) -> Result<vcsgraph::graph::Parents, vcsgraph::graph::GraphReadError>
68 {
68 {
69 // FIXME This trait should be reworked to decide between Revision
69 // FIXME This trait should be reworked to decide between Revision
70 // and UncheckedRevision, get better errors names, etc.
70 // and UncheckedRevision, get better errors names, etc.
71 match Graph::parents(self, Revision(rev)) {
71 match Graph::parents(self, Revision(rev)) {
72 Ok(parents) => {
72 Ok(parents) => {
73 Ok(vcsgraph::graph::Parents([parents[0].0, parents[1].0]))
73 Ok(vcsgraph::graph::Parents([parents[0].0, parents[1].0]))
74 }
74 }
75 Err(hg::GraphError::ParentOutOfRange(rev)) => {
75 Err(hg::GraphError::ParentOutOfRange(rev)) => {
76 Err(vcsgraph::graph::GraphReadError::KeyedInvalidKey(rev.0))
76 Err(vcsgraph::graph::GraphReadError::KeyedInvalidKey(rev.0))
77 }
77 }
78 }
78 }
79 }
79 }
80 }
80 }
81
81
82 impl RevlogIndex for PySharedIndex {
82 impl RevlogIndex for PySharedIndex {
83 fn len(&self) -> usize {
83 fn len(&self) -> usize {
84 self.inner.len()
84 self.inner.len()
85 }
85 }
86 fn node(&self, rev: Revision) -> Option<&Node> {
86 fn node(&self, rev: Revision) -> Option<&Node> {
87 self.inner.node(rev)
87 self.inner.node(rev)
88 }
88 }
89 }
89 }
90
90
91 py_class!(pub class Index |py| {
91 py_class!(pub class Index |py| {
92 @shared data index: hg::index::Index;
92 @shared data index: hg::index::Index;
93 data nt: RefCell<Option<CoreNodeTree>>;
93 data nt: RefCell<Option<CoreNodeTree>>;
94 data docket: RefCell<Option<PyObject>>;
94 data docket: RefCell<Option<PyObject>>;
95 // Holds a reference to the mmap'ed persistent nodemap data
95 // Holds a reference to the mmap'ed persistent nodemap data
96 data nodemap_mmap: RefCell<Option<PyBuffer>>;
96 data nodemap_mmap: RefCell<Option<PyBuffer>>;
97 // Holds a reference to the mmap'ed persistent index data
97 // Holds a reference to the mmap'ed persistent index data
98 data index_mmap: RefCell<Option<PyBuffer>>;
98 data index_mmap: RefCell<Option<PyBuffer>>;
99 data head_revs_py_list: RefCell<Option<PyList>>;
99 data head_revs_py_list: RefCell<Option<PyList>>;
100 data head_node_ids_py_list: RefCell<Option<PyList>>;
100
101
101 def __new__(
102 def __new__(
102 _cls,
103 _cls,
103 data: PyObject,
104 data: PyObject,
104 default_header: u32,
105 default_header: u32,
105 ) -> PyResult<Self> {
106 ) -> PyResult<Self> {
106 Self::new(py, data, default_header)
107 Self::new(py, data, default_header)
107 }
108 }
108
109
109 /// Compatibility layer used for Python consumers needing access to the C index
110 /// Compatibility layer used for Python consumers needing access to the C index
110 ///
111 ///
111 /// Only use case so far is `scmutil.shortesthexnodeidprefix`,
112 /// Only use case so far is `scmutil.shortesthexnodeidprefix`,
112 /// that may need to build a custom `nodetree`, based on a specified revset.
113 /// that may need to build a custom `nodetree`, based on a specified revset.
113 /// With a Rust implementation of the nodemap, we will be able to get rid of
114 /// With a Rust implementation of the nodemap, we will be able to get rid of
114 /// this, by exposing our own standalone nodemap class,
115 /// this, by exposing our own standalone nodemap class,
115 /// ready to accept `Index`.
116 /// ready to accept `Index`.
116 /* def get_cindex(&self) -> PyResult<PyObject> {
117 /* def get_cindex(&self) -> PyResult<PyObject> {
117 Ok(self.cindex(py).borrow().inner().clone_ref(py))
118 Ok(self.cindex(py).borrow().inner().clone_ref(py))
118 }
119 }
119 */
120 */
120 // Index API involving nodemap, as defined in mercurial/pure/parsers.py
121 // Index API involving nodemap, as defined in mercurial/pure/parsers.py
121
122
122 /// Return Revision if found, raises a bare `error.RevlogError`
123 /// Return Revision if found, raises a bare `error.RevlogError`
123 /// in case of ambiguity, same as C version does
124 /// in case of ambiguity, same as C version does
124 def get_rev(&self, node: PyBytes) -> PyResult<Option<PyRevision>> {
125 def get_rev(&self, node: PyBytes) -> PyResult<Option<PyRevision>> {
125 let opt = self.get_nodetree(py)?.borrow();
126 let opt = self.get_nodetree(py)?.borrow();
126 let nt = opt.as_ref().unwrap();
127 let nt = opt.as_ref().unwrap();
127 let ridx = &*self.index(py).borrow();
128 let ridx = &*self.index(py).borrow();
128 let node = node_from_py_bytes(py, &node)?;
129 let node = node_from_py_bytes(py, &node)?;
129 let rust_rev =
130 let rust_rev =
130 nt.find_bin(ridx, node.into()).map_err(|e| nodemap_error(py, e))?;
131 nt.find_bin(ridx, node.into()).map_err(|e| nodemap_error(py, e))?;
131 Ok(rust_rev.map(Into::into))
132 Ok(rust_rev.map(Into::into))
132
133
133 }
134 }
134
135
135 /// same as `get_rev()` but raises a bare `error.RevlogError` if node
136 /// same as `get_rev()` but raises a bare `error.RevlogError` if node
136 /// is not found.
137 /// is not found.
137 ///
138 ///
138 /// No need to repeat `node` in the exception, `mercurial/revlog.py`
139 /// No need to repeat `node` in the exception, `mercurial/revlog.py`
139 /// will catch and rewrap with it
140 /// will catch and rewrap with it
140 def rev(&self, node: PyBytes) -> PyResult<PyRevision> {
141 def rev(&self, node: PyBytes) -> PyResult<PyRevision> {
141 self.get_rev(py, node)?.ok_or_else(|| revlog_error(py))
142 self.get_rev(py, node)?.ok_or_else(|| revlog_error(py))
142 }
143 }
143
144
144 /// return True if the node exist in the index
145 /// return True if the node exist in the index
145 def has_node(&self, node: PyBytes) -> PyResult<bool> {
146 def has_node(&self, node: PyBytes) -> PyResult<bool> {
146 // TODO OPTIM we could avoid a needless conversion here,
147 // TODO OPTIM we could avoid a needless conversion here,
147 // to do when scaffolding for pure Rust switch is removed,
148 // to do when scaffolding for pure Rust switch is removed,
148 // as `get_rev()` currently does the necessary assertions
149 // as `get_rev()` currently does the necessary assertions
149 self.get_rev(py, node).map(|opt| opt.is_some())
150 self.get_rev(py, node).map(|opt| opt.is_some())
150 }
151 }
151
152
152 /// find length of shortest hex nodeid of a binary ID
153 /// find length of shortest hex nodeid of a binary ID
153 def shortest(&self, node: PyBytes) -> PyResult<usize> {
154 def shortest(&self, node: PyBytes) -> PyResult<usize> {
154 let opt = self.get_nodetree(py)?.borrow();
155 let opt = self.get_nodetree(py)?.borrow();
155 let nt = opt.as_ref().unwrap();
156 let nt = opt.as_ref().unwrap();
156 let idx = &*self.index(py).borrow();
157 let idx = &*self.index(py).borrow();
157 match nt.unique_prefix_len_node(idx, &node_from_py_bytes(py, &node)?)
158 match nt.unique_prefix_len_node(idx, &node_from_py_bytes(py, &node)?)
158 {
159 {
159 Ok(Some(l)) => Ok(l),
160 Ok(Some(l)) => Ok(l),
160 Ok(None) => Err(revlog_error(py)),
161 Ok(None) => Err(revlog_error(py)),
161 Err(e) => Err(nodemap_error(py, e)),
162 Err(e) => Err(nodemap_error(py, e)),
162 }
163 }
163 }
164 }
164
165
165 def partialmatch(&self, node: PyObject) -> PyResult<Option<PyBytes>> {
166 def partialmatch(&self, node: PyObject) -> PyResult<Option<PyBytes>> {
166 let opt = self.get_nodetree(py)?.borrow();
167 let opt = self.get_nodetree(py)?.borrow();
167 let nt = opt.as_ref().unwrap();
168 let nt = opt.as_ref().unwrap();
168 let idx = &*self.index(py).borrow();
169 let idx = &*self.index(py).borrow();
169
170
170 let node_as_string = if cfg!(feature = "python3-sys") {
171 let node_as_string = if cfg!(feature = "python3-sys") {
171 node.cast_as::<PyString>(py)?.to_string(py)?.to_string()
172 node.cast_as::<PyString>(py)?.to_string(py)?.to_string()
172 }
173 }
173 else {
174 else {
174 let node = node.extract::<PyBytes>(py)?;
175 let node = node.extract::<PyBytes>(py)?;
175 String::from_utf8_lossy(node.data(py)).to_string()
176 String::from_utf8_lossy(node.data(py)).to_string()
176 };
177 };
177
178
178 let prefix = NodePrefix::from_hex(&node_as_string)
179 let prefix = NodePrefix::from_hex(&node_as_string)
179 .map_err(|_| PyErr::new::<ValueError, _>(
180 .map_err(|_| PyErr::new::<ValueError, _>(
180 py, format!("Invalid node or prefix '{}'", node_as_string))
181 py, format!("Invalid node or prefix '{}'", node_as_string))
181 )?;
182 )?;
182
183
183 nt.find_bin(idx, prefix)
184 nt.find_bin(idx, prefix)
184 // TODO make an inner API returning the node directly
185 // TODO make an inner API returning the node directly
185 .map(|opt| opt.map(
186 .map(|opt| opt.map(
186 |rev| PyBytes::new(py, idx.node(rev).unwrap().as_bytes())))
187 |rev| PyBytes::new(py, idx.node(rev).unwrap().as_bytes())))
187 .map_err(|e| nodemap_error(py, e))
188 .map_err(|e| nodemap_error(py, e))
188
189
189 }
190 }
190
191
191 /// append an index entry
192 /// append an index entry
192 def append(&self, tup: PyTuple) -> PyResult<PyObject> {
193 def append(&self, tup: PyTuple) -> PyResult<PyObject> {
193 if tup.len(py) < 8 {
194 if tup.len(py) < 8 {
194 // this is better than the panic promised by tup.get_item()
195 // this is better than the panic promised by tup.get_item()
195 return Err(
196 return Err(
196 PyErr::new::<IndexError, _>(py, "tuple index out of range"))
197 PyErr::new::<IndexError, _>(py, "tuple index out of range"))
197 }
198 }
198 let node_bytes = tup.get_item(py, 7).extract(py)?;
199 let node_bytes = tup.get_item(py, 7).extract(py)?;
199 let node = node_from_py_object(py, &node_bytes)?;
200 let node = node_from_py_object(py, &node_bytes)?;
200
201
201 let rev = self.len(py)? as BaseRevision;
202 let rev = self.len(py)? as BaseRevision;
202
203
203 // This is ok since we will just add the revision to the index
204 // This is ok since we will just add the revision to the index
204 let rev = Revision(rev);
205 let rev = Revision(rev);
205 self.index(py)
206 self.index(py)
206 .borrow_mut()
207 .borrow_mut()
207 .append(py_tuple_to_revision_data_params(py, tup)?)
208 .append(py_tuple_to_revision_data_params(py, tup)?)
208 .unwrap();
209 .unwrap();
209 let idx = &*self.index(py).borrow();
210 let idx = &*self.index(py).borrow();
210 self.get_nodetree(py)?.borrow_mut().as_mut().unwrap()
211 self.get_nodetree(py)?.borrow_mut().as_mut().unwrap()
211 .insert(idx, &node, rev)
212 .insert(idx, &node, rev)
212 .map_err(|e| nodemap_error(py, e))?;
213 .map_err(|e| nodemap_error(py, e))?;
213 Ok(py.None())
214 Ok(py.None())
214 }
215 }
215
216
216 def __delitem__(&self, key: PyObject) -> PyResult<()> {
217 def __delitem__(&self, key: PyObject) -> PyResult<()> {
217 // __delitem__ is both for `del idx[r]` and `del idx[r1:r2]`
218 // __delitem__ is both for `del idx[r]` and `del idx[r1:r2]`
218 let start = if let Ok(rev) = key.extract(py) {
219 let start = if let Ok(rev) = key.extract(py) {
219 UncheckedRevision(rev)
220 UncheckedRevision(rev)
220 } else {
221 } else {
221 let start = key.getattr(py, "start")?;
222 let start = key.getattr(py, "start")?;
222 UncheckedRevision(start.extract(py)?)
223 UncheckedRevision(start.extract(py)?)
223 };
224 };
224 let start = self.index(py)
225 let start = self.index(py)
225 .borrow()
226 .borrow()
226 .check_revision(start)
227 .check_revision(start)
227 .ok_or_else(|| {
228 .ok_or_else(|| {
228 nodemap_error(py, NodeMapError::RevisionNotInIndex(start))
229 nodemap_error(py, NodeMapError::RevisionNotInIndex(start))
229 })?;
230 })?;
230 self.index(py).borrow_mut().remove(start).unwrap();
231 self.index(py).borrow_mut().remove(start).unwrap();
231 let mut opt = self.get_nodetree(py)?.borrow_mut();
232 let mut opt = self.get_nodetree(py)?.borrow_mut();
232 let nt = opt.as_mut().unwrap();
233 let nt = opt.as_mut().unwrap();
233 nt.invalidate_all();
234 nt.invalidate_all();
234 self.fill_nodemap(py, nt)?;
235 self.fill_nodemap(py, nt)?;
235 Ok(())
236 Ok(())
236 }
237 }
237
238
238 //
239 //
239 // Index methods previously reforwarded to C index (tp_methods)
240 // Index methods previously reforwarded to C index (tp_methods)
240 // Same ordering as in revlog.c
241 // Same ordering as in revlog.c
241 //
242 //
242
243
243 /// return the gca set of the given revs
244 /// return the gca set of the given revs
244 def ancestors(&self, *args, **_kw) -> PyResult<PyObject> {
245 def ancestors(&self, *args, **_kw) -> PyResult<PyObject> {
245 let rust_res = self.inner_ancestors(py, args)?;
246 let rust_res = self.inner_ancestors(py, args)?;
246 Ok(rust_res)
247 Ok(rust_res)
247 }
248 }
248
249
249 /// return the heads of the common ancestors of the given revs
250 /// return the heads of the common ancestors of the given revs
250 def commonancestorsheads(&self, *args, **_kw) -> PyResult<PyObject> {
251 def commonancestorsheads(&self, *args, **_kw) -> PyResult<PyObject> {
251 let rust_res = self.inner_commonancestorsheads(py, args)?;
252 let rust_res = self.inner_commonancestorsheads(py, args)?;
252 Ok(rust_res)
253 Ok(rust_res)
253 }
254 }
254
255
255 /// Clear the index caches and inner py_class data.
256 /// Clear the index caches and inner py_class data.
256 /// It is Python's responsibility to call `update_nodemap_data` again.
257 /// It is Python's responsibility to call `update_nodemap_data` again.
257 def clearcaches(&self) -> PyResult<PyObject> {
258 def clearcaches(&self) -> PyResult<PyObject> {
258 self.nt(py).borrow_mut().take();
259 self.nt(py).borrow_mut().take();
259 self.docket(py).borrow_mut().take();
260 self.docket(py).borrow_mut().take();
260 self.nodemap_mmap(py).borrow_mut().take();
261 self.nodemap_mmap(py).borrow_mut().take();
261 self.head_revs_py_list(py).borrow_mut().take();
262 self.head_revs_py_list(py).borrow_mut().take();
263 self.head_node_ids_py_list(py).borrow_mut().take();
262 self.index(py).borrow().clear_caches();
264 self.index(py).borrow().clear_caches();
263 Ok(py.None())
265 Ok(py.None())
264 }
266 }
265
267
266 /// return the raw binary string representing a revision
268 /// return the raw binary string representing a revision
267 def entry_binary(&self, *args, **_kw) -> PyResult<PyObject> {
269 def entry_binary(&self, *args, **_kw) -> PyResult<PyObject> {
268 let rindex = self.index(py).borrow();
270 let rindex = self.index(py).borrow();
269 let rev = UncheckedRevision(args.get_item(py, 0).extract(py)?);
271 let rev = UncheckedRevision(args.get_item(py, 0).extract(py)?);
270 let rust_bytes = rindex.check_revision(rev).and_then(
272 let rust_bytes = rindex.check_revision(rev).and_then(
271 |r| rindex.entry_binary(r))
273 |r| rindex.entry_binary(r))
272 .ok_or_else(|| rev_not_in_index(py, rev))?;
274 .ok_or_else(|| rev_not_in_index(py, rev))?;
273 let rust_res = PyBytes::new(py, rust_bytes).into_object();
275 let rust_res = PyBytes::new(py, rust_bytes).into_object();
274 Ok(rust_res)
276 Ok(rust_res)
275 }
277 }
276
278
277 /// return a binary packed version of the header
279 /// return a binary packed version of the header
278 def pack_header(&self, *args, **_kw) -> PyResult<PyObject> {
280 def pack_header(&self, *args, **_kw) -> PyResult<PyObject> {
279 let rindex = self.index(py).borrow();
281 let rindex = self.index(py).borrow();
280 let packed = rindex.pack_header(args.get_item(py, 0).extract(py)?);
282 let packed = rindex.pack_header(args.get_item(py, 0).extract(py)?);
281 let rust_res = PyBytes::new(py, &packed).into_object();
283 let rust_res = PyBytes::new(py, &packed).into_object();
282 Ok(rust_res)
284 Ok(rust_res)
283 }
285 }
284
286
285 /// compute phases
287 /// compute phases
286 def computephasesmapsets(&self, *args, **_kw) -> PyResult<PyObject> {
288 def computephasesmapsets(&self, *args, **_kw) -> PyResult<PyObject> {
287 let py_roots = args.get_item(py, 0).extract::<PyDict>(py)?;
289 let py_roots = args.get_item(py, 0).extract::<PyDict>(py)?;
288 let rust_res = self.inner_computephasesmapsets(py, py_roots)?;
290 let rust_res = self.inner_computephasesmapsets(py, py_roots)?;
289 Ok(rust_res)
291 Ok(rust_res)
290 }
292 }
291
293
292 /// reachableroots
294 /// reachableroots
293 def reachableroots2(&self, *args, **_kw) -> PyResult<PyObject> {
295 def reachableroots2(&self, *args, **_kw) -> PyResult<PyObject> {
294 let rust_res = self.inner_reachableroots2(
296 let rust_res = self.inner_reachableroots2(
295 py,
297 py,
296 UncheckedRevision(args.get_item(py, 0).extract(py)?),
298 UncheckedRevision(args.get_item(py, 0).extract(py)?),
297 args.get_item(py, 1),
299 args.get_item(py, 1),
298 args.get_item(py, 2),
300 args.get_item(py, 2),
299 args.get_item(py, 3).extract(py)?,
301 args.get_item(py, 3).extract(py)?,
300 )?;
302 )?;
301 Ok(rust_res)
303 Ok(rust_res)
302 }
304 }
303
305
304 /// get head revisions
306 /// get head revisions
305 def headrevs(&self) -> PyResult<PyObject> {
307 def headrevs(&self) -> PyResult<PyObject> {
306 let rust_res = self.inner_headrevs(py)?;
308 let rust_res = self.inner_headrevs(py)?;
307 Ok(rust_res)
309 Ok(rust_res)
308 }
310 }
309
311
310 /// get head nodeids
312 /// get head nodeids
311 def head_node_ids(&self) -> PyResult<PyObject> {
313 def head_node_ids(&self) -> PyResult<PyObject> {
312 let rust_res = self.inner_head_node_ids(py)?;
314 let rust_res = self.inner_head_node_ids(py)?;
313 Ok(rust_res)
315 Ok(rust_res)
314 }
316 }
315
317
316 /// get filtered head revisions
318 /// get filtered head revisions
317 def headrevsfiltered(&self, *args, **_kw) -> PyResult<PyObject> {
319 def headrevsfiltered(&self, *args, **_kw) -> PyResult<PyObject> {
318 let rust_res = self.inner_headrevsfiltered(py, &args.get_item(py, 0))?;
320 let rust_res = self.inner_headrevsfiltered(py, &args.get_item(py, 0))?;
319 Ok(rust_res)
321 Ok(rust_res)
320 }
322 }
321
323
322 /// True if the object is a snapshot
324 /// True if the object is a snapshot
323 def issnapshot(&self, *args, **_kw) -> PyResult<bool> {
325 def issnapshot(&self, *args, **_kw) -> PyResult<bool> {
324 let index = self.index(py).borrow();
326 let index = self.index(py).borrow();
325 let result = index
327 let result = index
326 .is_snapshot(UncheckedRevision(args.get_item(py, 0).extract(py)?))
328 .is_snapshot(UncheckedRevision(args.get_item(py, 0).extract(py)?))
327 .map_err(|e| {
329 .map_err(|e| {
328 PyErr::new::<cpython::exc::ValueError, _>(py, e.to_string())
330 PyErr::new::<cpython::exc::ValueError, _>(py, e.to_string())
329 })?;
331 })?;
330 Ok(result)
332 Ok(result)
331 }
333 }
332
334
333 /// Gather snapshot data in a cache dict
335 /// Gather snapshot data in a cache dict
334 def findsnapshots(&self, *args, **_kw) -> PyResult<PyObject> {
336 def findsnapshots(&self, *args, **_kw) -> PyResult<PyObject> {
335 let index = self.index(py).borrow();
337 let index = self.index(py).borrow();
336 let cache: PyDict = args.get_item(py, 0).extract(py)?;
338 let cache: PyDict = args.get_item(py, 0).extract(py)?;
337 // this methods operates by setting new values in the cache,
339 // this methods operates by setting new values in the cache,
338 // hence we will compare results by letting the C implementation
340 // hence we will compare results by letting the C implementation
339 // operate over a deepcopy of the cache, and finally compare both
341 // operate over a deepcopy of the cache, and finally compare both
340 // caches.
342 // caches.
341 let c_cache = PyDict::new(py);
343 let c_cache = PyDict::new(py);
342 for (k, v) in cache.items(py) {
344 for (k, v) in cache.items(py) {
343 c_cache.set_item(py, k, PySet::new(py, v)?)?;
345 c_cache.set_item(py, k, PySet::new(py, v)?)?;
344 }
346 }
345
347
346 let start_rev = UncheckedRevision(args.get_item(py, 1).extract(py)?);
348 let start_rev = UncheckedRevision(args.get_item(py, 1).extract(py)?);
347 let end_rev = UncheckedRevision(args.get_item(py, 2).extract(py)?);
349 let end_rev = UncheckedRevision(args.get_item(py, 2).extract(py)?);
348 let mut cache_wrapper = PySnapshotsCache{ py, dict: cache };
350 let mut cache_wrapper = PySnapshotsCache{ py, dict: cache };
349 index.find_snapshots(
351 index.find_snapshots(
350 start_rev,
352 start_rev,
351 end_rev,
353 end_rev,
352 &mut cache_wrapper,
354 &mut cache_wrapper,
353 ).map_err(|_| revlog_error(py))?;
355 ).map_err(|_| revlog_error(py))?;
354 Ok(py.None())
356 Ok(py.None())
355 }
357 }
356
358
357 /// determine revisions with deltas to reconstruct fulltext
359 /// determine revisions with deltas to reconstruct fulltext
358 def deltachain(&self, *args, **_kw) -> PyResult<PyObject> {
360 def deltachain(&self, *args, **_kw) -> PyResult<PyObject> {
359 let index = self.index(py).borrow();
361 let index = self.index(py).borrow();
360 let rev = args.get_item(py, 0).extract::<BaseRevision>(py)?.into();
362 let rev = args.get_item(py, 0).extract::<BaseRevision>(py)?.into();
361 let stop_rev =
363 let stop_rev =
362 args.get_item(py, 1).extract::<Option<BaseRevision>>(py)?;
364 args.get_item(py, 1).extract::<Option<BaseRevision>>(py)?;
363 let rev = index.check_revision(rev).ok_or_else(|| {
365 let rev = index.check_revision(rev).ok_or_else(|| {
364 nodemap_error(py, NodeMapError::RevisionNotInIndex(rev))
366 nodemap_error(py, NodeMapError::RevisionNotInIndex(rev))
365 })?;
367 })?;
366 let stop_rev = if let Some(stop_rev) = stop_rev {
368 let stop_rev = if let Some(stop_rev) = stop_rev {
367 let stop_rev = UncheckedRevision(stop_rev);
369 let stop_rev = UncheckedRevision(stop_rev);
368 Some(index.check_revision(stop_rev).ok_or_else(|| {
370 Some(index.check_revision(stop_rev).ok_or_else(|| {
369 nodemap_error(py, NodeMapError::RevisionNotInIndex(stop_rev))
371 nodemap_error(py, NodeMapError::RevisionNotInIndex(stop_rev))
370 })?)
372 })?)
371 } else {None};
373 } else {None};
372 let using_general_delta = args.get_item(py, 2)
374 let using_general_delta = args.get_item(py, 2)
373 .extract::<Option<u32>>(py)?
375 .extract::<Option<u32>>(py)?
374 .map(|i| i != 0);
376 .map(|i| i != 0);
375 let (chain, stopped) = index.delta_chain(
377 let (chain, stopped) = index.delta_chain(
376 rev, stop_rev, using_general_delta
378 rev, stop_rev, using_general_delta
377 ).map_err(|e| {
379 ).map_err(|e| {
378 PyErr::new::<cpython::exc::ValueError, _>(py, e.to_string())
380 PyErr::new::<cpython::exc::ValueError, _>(py, e.to_string())
379 })?;
381 })?;
380
382
381 let chain: Vec<_> = chain.into_iter().map(|r| r.0).collect();
383 let chain: Vec<_> = chain.into_iter().map(|r| r.0).collect();
382 Ok(
384 Ok(
383 PyTuple::new(
385 PyTuple::new(
384 py,
386 py,
385 &[
387 &[
386 chain.into_py_object(py).into_object(),
388 chain.into_py_object(py).into_object(),
387 stopped.into_py_object(py).into_object()
389 stopped.into_py_object(py).into_object()
388 ]
390 ]
389 ).into_object()
391 ).into_object()
390 )
392 )
391
393
392 }
394 }
393
395
394 /// slice planned chunk read to reach a density threshold
396 /// slice planned chunk read to reach a density threshold
395 def slicechunktodensity(&self, *args, **_kw) -> PyResult<PyObject> {
397 def slicechunktodensity(&self, *args, **_kw) -> PyResult<PyObject> {
396 let rust_res = self.inner_slicechunktodensity(
398 let rust_res = self.inner_slicechunktodensity(
397 py,
399 py,
398 args.get_item(py, 0),
400 args.get_item(py, 0),
399 args.get_item(py, 1).extract(py)?,
401 args.get_item(py, 1).extract(py)?,
400 args.get_item(py, 2).extract(py)?
402 args.get_item(py, 2).extract(py)?
401 )?;
403 )?;
402 Ok(rust_res)
404 Ok(rust_res)
403 }
405 }
404
406
405 // index_sequence_methods and index_mapping_methods.
407 // index_sequence_methods and index_mapping_methods.
406 //
408 //
407 // Since we call back through the high level Python API,
409 // Since we call back through the high level Python API,
408 // there's no point making a distinction between index_get
410 // there's no point making a distinction between index_get
409 // and index_getitem.
411 // and index_getitem.
410 // gracinet 2023: this above is no longer true for the pure Rust impl
412 // gracinet 2023: this above is no longer true for the pure Rust impl
411
413
412 def __len__(&self) -> PyResult<usize> {
414 def __len__(&self) -> PyResult<usize> {
413 self.len(py)
415 self.len(py)
414 }
416 }
415
417
416 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
418 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
417 let rust_res = self.inner_getitem(py, key.clone_ref(py))?;
419 let rust_res = self.inner_getitem(py, key.clone_ref(py))?;
418 Ok(rust_res)
420 Ok(rust_res)
419 }
421 }
420
422
421 def __contains__(&self, item: PyObject) -> PyResult<bool> {
423 def __contains__(&self, item: PyObject) -> PyResult<bool> {
422 // ObjectProtocol does not seem to provide contains(), so
424 // ObjectProtocol does not seem to provide contains(), so
423 // this is an equivalent implementation of the index_contains()
425 // this is an equivalent implementation of the index_contains()
424 // defined in revlog.c
426 // defined in revlog.c
425 match item.extract::<i32>(py) {
427 match item.extract::<i32>(py) {
426 Ok(rev) => {
428 Ok(rev) => {
427 Ok(rev >= -1 && rev < self.len(py)? as BaseRevision)
429 Ok(rev >= -1 && rev < self.len(py)? as BaseRevision)
428 }
430 }
429 Err(_) => {
431 Err(_) => {
430 let item_bytes: PyBytes = item.extract(py)?;
432 let item_bytes: PyBytes = item.extract(py)?;
431 let rust_res = self.has_node(py, item_bytes)?;
433 let rust_res = self.has_node(py, item_bytes)?;
432 Ok(rust_res)
434 Ok(rust_res)
433 }
435 }
434 }
436 }
435 }
437 }
436
438
437 def nodemap_data_all(&self) -> PyResult<PyBytes> {
439 def nodemap_data_all(&self) -> PyResult<PyBytes> {
438 self.inner_nodemap_data_all(py)
440 self.inner_nodemap_data_all(py)
439 }
441 }
440
442
441 def nodemap_data_incremental(&self) -> PyResult<PyObject> {
443 def nodemap_data_incremental(&self) -> PyResult<PyObject> {
442 self.inner_nodemap_data_incremental(py)
444 self.inner_nodemap_data_incremental(py)
443 }
445 }
444 def update_nodemap_data(
446 def update_nodemap_data(
445 &self,
447 &self,
446 docket: PyObject,
448 docket: PyObject,
447 nm_data: PyObject
449 nm_data: PyObject
448 ) -> PyResult<PyObject> {
450 ) -> PyResult<PyObject> {
449 self.inner_update_nodemap_data(py, docket, nm_data)
451 self.inner_update_nodemap_data(py, docket, nm_data)
450 }
452 }
451
453
452 @property
454 @property
453 def entry_size(&self) -> PyResult<PyInt> {
455 def entry_size(&self) -> PyResult<PyInt> {
454 let rust_res: PyInt = INDEX_ENTRY_SIZE.to_py_object(py);
456 let rust_res: PyInt = INDEX_ENTRY_SIZE.to_py_object(py);
455 Ok(rust_res)
457 Ok(rust_res)
456 }
458 }
457
459
458 @property
460 @property
459 def rust_ext_compat(&self) -> PyResult<PyInt> {
461 def rust_ext_compat(&self) -> PyResult<PyInt> {
460 // will be entirely removed when the Rust index yet useful to
462 // will be entirely removed when the Rust index yet useful to
461 // implement in Rust to detangle things when removing `self.cindex`
463 // implement in Rust to detangle things when removing `self.cindex`
462 let rust_res: PyInt = 1.to_py_object(py);
464 let rust_res: PyInt = 1.to_py_object(py);
463 Ok(rust_res)
465 Ok(rust_res)
464 }
466 }
465
467
466 @property
468 @property
467 def is_rust(&self) -> PyResult<PyBool> {
469 def is_rust(&self) -> PyResult<PyBool> {
468 Ok(false.to_py_object(py))
470 Ok(false.to_py_object(py))
469 }
471 }
470
472
471 });
473 });
472
474
473 /// Take a (potentially) mmap'ed buffer, and return the underlying Python
475 /// Take a (potentially) mmap'ed buffer, and return the underlying Python
474 /// buffer along with the Rust slice into said buffer. We need to keep the
476 /// buffer along with the Rust slice into said buffer. We need to keep the
475 /// Python buffer around, otherwise we'd get a dangling pointer once the buffer
477 /// Python buffer around, otherwise we'd get a dangling pointer once the buffer
476 /// is freed from Python's side.
478 /// is freed from Python's side.
477 ///
479 ///
478 /// # Safety
480 /// # Safety
479 ///
481 ///
480 /// The caller must make sure that the buffer is kept around for at least as
482 /// The caller must make sure that the buffer is kept around for at least as
481 /// long as the slice.
483 /// long as the slice.
482 #[deny(unsafe_op_in_unsafe_fn)]
484 #[deny(unsafe_op_in_unsafe_fn)]
483 unsafe fn mmap_keeparound(
485 unsafe fn mmap_keeparound(
484 py: Python,
486 py: Python,
485 data: PyObject,
487 data: PyObject,
486 ) -> PyResult<(
488 ) -> PyResult<(
487 PyBuffer,
489 PyBuffer,
488 Box<dyn std::ops::Deref<Target = [u8]> + Send + Sync + 'static>,
490 Box<dyn std::ops::Deref<Target = [u8]> + Send + Sync + 'static>,
489 )> {
491 )> {
490 let buf = PyBuffer::get(py, &data)?;
492 let buf = PyBuffer::get(py, &data)?;
491 let len = buf.item_count();
493 let len = buf.item_count();
492
494
493 // Build a slice from the mmap'ed buffer data
495 // Build a slice from the mmap'ed buffer data
494 let cbuf = buf.buf_ptr();
496 let cbuf = buf.buf_ptr();
495 let bytes = if std::mem::size_of::<u8>() == buf.item_size()
497 let bytes = if std::mem::size_of::<u8>() == buf.item_size()
496 && buf.is_c_contiguous()
498 && buf.is_c_contiguous()
497 && u8::is_compatible_format(buf.format())
499 && u8::is_compatible_format(buf.format())
498 {
500 {
499 unsafe { std::slice::from_raw_parts(cbuf as *const u8, len) }
501 unsafe { std::slice::from_raw_parts(cbuf as *const u8, len) }
500 } else {
502 } else {
501 return Err(PyErr::new::<ValueError, _>(
503 return Err(PyErr::new::<ValueError, _>(
502 py,
504 py,
503 "Nodemap data buffer has an invalid memory representation"
505 "Nodemap data buffer has an invalid memory representation"
504 .to_string(),
506 .to_string(),
505 ));
507 ));
506 };
508 };
507
509
508 Ok((buf, Box::new(bytes)))
510 Ok((buf, Box::new(bytes)))
509 }
511 }
510
512
511 fn py_tuple_to_revision_data_params(
513 fn py_tuple_to_revision_data_params(
512 py: Python,
514 py: Python,
513 tuple: PyTuple,
515 tuple: PyTuple,
514 ) -> PyResult<RevisionDataParams> {
516 ) -> PyResult<RevisionDataParams> {
515 if tuple.len(py) < 8 {
517 if tuple.len(py) < 8 {
516 // this is better than the panic promised by tup.get_item()
518 // this is better than the panic promised by tup.get_item()
517 return Err(PyErr::new::<IndexError, _>(
519 return Err(PyErr::new::<IndexError, _>(
518 py,
520 py,
519 "tuple index out of range",
521 "tuple index out of range",
520 ));
522 ));
521 }
523 }
522 let offset_or_flags: u64 = tuple.get_item(py, 0).extract(py)?;
524 let offset_or_flags: u64 = tuple.get_item(py, 0).extract(py)?;
523 let node_id = tuple
525 let node_id = tuple
524 .get_item(py, 7)
526 .get_item(py, 7)
525 .extract::<PyBytes>(py)?
527 .extract::<PyBytes>(py)?
526 .data(py)
528 .data(py)
527 .try_into()
529 .try_into()
528 .unwrap();
530 .unwrap();
529 let flags = (offset_or_flags & 0xFFFF) as u16;
531 let flags = (offset_or_flags & 0xFFFF) as u16;
530 let data_offset = offset_or_flags >> 16;
532 let data_offset = offset_or_flags >> 16;
531 Ok(RevisionDataParams {
533 Ok(RevisionDataParams {
532 flags,
534 flags,
533 data_offset,
535 data_offset,
534 data_compressed_length: tuple.get_item(py, 1).extract(py)?,
536 data_compressed_length: tuple.get_item(py, 1).extract(py)?,
535 data_uncompressed_length: tuple.get_item(py, 2).extract(py)?,
537 data_uncompressed_length: tuple.get_item(py, 2).extract(py)?,
536 data_delta_base: tuple.get_item(py, 3).extract(py)?,
538 data_delta_base: tuple.get_item(py, 3).extract(py)?,
537 link_rev: tuple.get_item(py, 4).extract(py)?,
539 link_rev: tuple.get_item(py, 4).extract(py)?,
538 parent_rev_1: tuple.get_item(py, 5).extract(py)?,
540 parent_rev_1: tuple.get_item(py, 5).extract(py)?,
539 parent_rev_2: tuple.get_item(py, 6).extract(py)?,
541 parent_rev_2: tuple.get_item(py, 6).extract(py)?,
540 node_id,
542 node_id,
541 ..Default::default()
543 ..Default::default()
542 })
544 })
543 }
545 }
544 fn revision_data_params_to_py_tuple(
546 fn revision_data_params_to_py_tuple(
545 py: Python,
547 py: Python,
546 params: RevisionDataParams,
548 params: RevisionDataParams,
547 ) -> PyTuple {
549 ) -> PyTuple {
548 PyTuple::new(
550 PyTuple::new(
549 py,
551 py,
550 &[
552 &[
551 params.data_offset.into_py_object(py).into_object(),
553 params.data_offset.into_py_object(py).into_object(),
552 params
554 params
553 .data_compressed_length
555 .data_compressed_length
554 .into_py_object(py)
556 .into_py_object(py)
555 .into_object(),
557 .into_object(),
556 params
558 params
557 .data_uncompressed_length
559 .data_uncompressed_length
558 .into_py_object(py)
560 .into_py_object(py)
559 .into_object(),
561 .into_object(),
560 params.data_delta_base.into_py_object(py).into_object(),
562 params.data_delta_base.into_py_object(py).into_object(),
561 params.link_rev.into_py_object(py).into_object(),
563 params.link_rev.into_py_object(py).into_object(),
562 params.parent_rev_1.into_py_object(py).into_object(),
564 params.parent_rev_1.into_py_object(py).into_object(),
563 params.parent_rev_2.into_py_object(py).into_object(),
565 params.parent_rev_2.into_py_object(py).into_object(),
564 PyBytes::new(py, &params.node_id)
566 PyBytes::new(py, &params.node_id)
565 .into_py_object(py)
567 .into_py_object(py)
566 .into_object(),
568 .into_object(),
567 params._sidedata_offset.into_py_object(py).into_object(),
569 params._sidedata_offset.into_py_object(py).into_object(),
568 params
570 params
569 ._sidedata_compressed_length
571 ._sidedata_compressed_length
570 .into_py_object(py)
572 .into_py_object(py)
571 .into_object(),
573 .into_object(),
572 params
574 params
573 .data_compression_mode
575 .data_compression_mode
574 .into_py_object(py)
576 .into_py_object(py)
575 .into_object(),
577 .into_object(),
576 params
578 params
577 ._sidedata_compression_mode
579 ._sidedata_compression_mode
578 .into_py_object(py)
580 .into_py_object(py)
579 .into_object(),
581 .into_object(),
580 params._rank.into_py_object(py).into_object(),
582 params._rank.into_py_object(py).into_object(),
581 ],
583 ],
582 )
584 )
583 }
585 }
584
586
585 struct PySnapshotsCache<'p> {
587 struct PySnapshotsCache<'p> {
586 py: Python<'p>,
588 py: Python<'p>,
587 dict: PyDict,
589 dict: PyDict,
588 }
590 }
589
591
590 impl<'p> SnapshotsCache for PySnapshotsCache<'p> {
592 impl<'p> SnapshotsCache for PySnapshotsCache<'p> {
591 fn insert_for(
593 fn insert_for(
592 &mut self,
594 &mut self,
593 rev: BaseRevision,
595 rev: BaseRevision,
594 value: BaseRevision,
596 value: BaseRevision,
595 ) -> Result<(), RevlogError> {
597 ) -> Result<(), RevlogError> {
596 let pyvalue = value.into_py_object(self.py).into_object();
598 let pyvalue = value.into_py_object(self.py).into_object();
597 match self.dict.get_item(self.py, rev) {
599 match self.dict.get_item(self.py, rev) {
598 Some(obj) => obj
600 Some(obj) => obj
599 .extract::<PySet>(self.py)
601 .extract::<PySet>(self.py)
600 .and_then(|set| set.add(self.py, pyvalue)),
602 .and_then(|set| set.add(self.py, pyvalue)),
601 None => PySet::new(self.py, vec![pyvalue])
603 None => PySet::new(self.py, vec![pyvalue])
602 .and_then(|set| self.dict.set_item(self.py, rev, set)),
604 .and_then(|set| self.dict.set_item(self.py, rev, set)),
603 }
605 }
604 .map_err(|_| {
606 .map_err(|_| {
605 RevlogError::Other(HgError::unsupported(
607 RevlogError::Other(HgError::unsupported(
606 "Error in Python caches handling",
608 "Error in Python caches handling",
607 ))
609 ))
608 })
610 })
609 }
611 }
610 }
612 }
611
613
612 impl Index {
614 impl Index {
613 fn new(py: Python, data: PyObject, header: u32) -> PyResult<Self> {
615 fn new(py: Python, data: PyObject, header: u32) -> PyResult<Self> {
614 // Safety: we keep the buffer around inside the class as `index_mmap`
616 // Safety: we keep the buffer around inside the class as `index_mmap`
615 let (buf, bytes) = unsafe { mmap_keeparound(py, data)? };
617 let (buf, bytes) = unsafe { mmap_keeparound(py, data)? };
616
618
617 Self::create_instance(
619 Self::create_instance(
618 py,
620 py,
619 hg::index::Index::new(
621 hg::index::Index::new(
620 bytes,
622 bytes,
621 IndexHeader::parse(&header.to_be_bytes())
623 IndexHeader::parse(&header.to_be_bytes())
622 .expect("default header is broken")
624 .expect("default header is broken")
623 .unwrap(),
625 .unwrap(),
624 )
626 )
625 .map_err(|e| {
627 .map_err(|e| {
626 revlog_error_with_msg(py, e.to_string().as_bytes())
628 revlog_error_with_msg(py, e.to_string().as_bytes())
627 })?,
629 })?,
628 RefCell::new(None),
630 RefCell::new(None),
629 RefCell::new(None),
631 RefCell::new(None),
630 RefCell::new(None),
632 RefCell::new(None),
631 RefCell::new(Some(buf)),
633 RefCell::new(Some(buf)),
632 RefCell::new(None),
634 RefCell::new(None),
635 RefCell::new(None),
633 )
636 )
634 }
637 }
635
638
636 fn len(&self, py: Python) -> PyResult<usize> {
639 fn len(&self, py: Python) -> PyResult<usize> {
637 let rust_index_len = self.index(py).borrow().len();
640 let rust_index_len = self.index(py).borrow().len();
638 Ok(rust_index_len)
641 Ok(rust_index_len)
639 }
642 }
640
643
641 /// This is scaffolding at this point, but it could also become
644 /// This is scaffolding at this point, but it could also become
642 /// a way to start a persistent nodemap or perform a
645 /// a way to start a persistent nodemap or perform a
643 /// vacuum / repack operation
646 /// vacuum / repack operation
644 fn fill_nodemap(
647 fn fill_nodemap(
645 &self,
648 &self,
646 py: Python,
649 py: Python,
647 nt: &mut CoreNodeTree,
650 nt: &mut CoreNodeTree,
648 ) -> PyResult<PyObject> {
651 ) -> PyResult<PyObject> {
649 let index = self.index(py).borrow();
652 let index = self.index(py).borrow();
650 for r in 0..self.len(py)? {
653 for r in 0..self.len(py)? {
651 let rev = Revision(r as BaseRevision);
654 let rev = Revision(r as BaseRevision);
652 // in this case node() won't ever return None
655 // in this case node() won't ever return None
653 nt.insert(&*index, index.node(rev).unwrap(), rev)
656 nt.insert(&*index, index.node(rev).unwrap(), rev)
654 .map_err(|e| nodemap_error(py, e))?
657 .map_err(|e| nodemap_error(py, e))?
655 }
658 }
656 Ok(py.None())
659 Ok(py.None())
657 }
660 }
658
661
659 fn get_nodetree<'a>(
662 fn get_nodetree<'a>(
660 &'a self,
663 &'a self,
661 py: Python<'a>,
664 py: Python<'a>,
662 ) -> PyResult<&'a RefCell<Option<CoreNodeTree>>> {
665 ) -> PyResult<&'a RefCell<Option<CoreNodeTree>>> {
663 if self.nt(py).borrow().is_none() {
666 if self.nt(py).borrow().is_none() {
664 let readonly = Box::<Vec<_>>::default();
667 let readonly = Box::<Vec<_>>::default();
665 let mut nt = CoreNodeTree::load_bytes(readonly, 0);
668 let mut nt = CoreNodeTree::load_bytes(readonly, 0);
666 self.fill_nodemap(py, &mut nt)?;
669 self.fill_nodemap(py, &mut nt)?;
667 self.nt(py).borrow_mut().replace(nt);
670 self.nt(py).borrow_mut().replace(nt);
668 }
671 }
669 Ok(self.nt(py))
672 Ok(self.nt(py))
670 }
673 }
671
674
672 /// Returns the full nodemap bytes to be written as-is to disk
675 /// Returns the full nodemap bytes to be written as-is to disk
673 fn inner_nodemap_data_all(&self, py: Python) -> PyResult<PyBytes> {
676 fn inner_nodemap_data_all(&self, py: Python) -> PyResult<PyBytes> {
674 let nodemap = self.get_nodetree(py)?.borrow_mut().take().unwrap();
677 let nodemap = self.get_nodetree(py)?.borrow_mut().take().unwrap();
675 let (readonly, bytes) = nodemap.into_readonly_and_added_bytes();
678 let (readonly, bytes) = nodemap.into_readonly_and_added_bytes();
676
679
677 // If there's anything readonly, we need to build the data again from
680 // If there's anything readonly, we need to build the data again from
678 // scratch
681 // scratch
679 let bytes = if readonly.len() > 0 {
682 let bytes = if readonly.len() > 0 {
680 let mut nt = CoreNodeTree::load_bytes(Box::<Vec<_>>::default(), 0);
683 let mut nt = CoreNodeTree::load_bytes(Box::<Vec<_>>::default(), 0);
681 self.fill_nodemap(py, &mut nt)?;
684 self.fill_nodemap(py, &mut nt)?;
682
685
683 let (readonly, bytes) = nt.into_readonly_and_added_bytes();
686 let (readonly, bytes) = nt.into_readonly_and_added_bytes();
684 assert_eq!(readonly.len(), 0);
687 assert_eq!(readonly.len(), 0);
685
688
686 bytes
689 bytes
687 } else {
690 } else {
688 bytes
691 bytes
689 };
692 };
690
693
691 let bytes = PyBytes::new(py, &bytes);
694 let bytes = PyBytes::new(py, &bytes);
692 Ok(bytes)
695 Ok(bytes)
693 }
696 }
694
697
695 /// Returns the last saved docket along with the size of any changed data
698 /// Returns the last saved docket along with the size of any changed data
696 /// (in number of blocks), and said data as bytes.
699 /// (in number of blocks), and said data as bytes.
697 fn inner_nodemap_data_incremental(
700 fn inner_nodemap_data_incremental(
698 &self,
701 &self,
699 py: Python,
702 py: Python,
700 ) -> PyResult<PyObject> {
703 ) -> PyResult<PyObject> {
701 let docket = self.docket(py).borrow();
704 let docket = self.docket(py).borrow();
702 let docket = match docket.as_ref() {
705 let docket = match docket.as_ref() {
703 Some(d) => d,
706 Some(d) => d,
704 None => return Ok(py.None()),
707 None => return Ok(py.None()),
705 };
708 };
706
709
707 let node_tree = self.get_nodetree(py)?.borrow_mut().take().unwrap();
710 let node_tree = self.get_nodetree(py)?.borrow_mut().take().unwrap();
708 let masked_blocks = node_tree.masked_readonly_blocks();
711 let masked_blocks = node_tree.masked_readonly_blocks();
709 let (_, data) = node_tree.into_readonly_and_added_bytes();
712 let (_, data) = node_tree.into_readonly_and_added_bytes();
710 let changed = masked_blocks * std::mem::size_of::<Block>();
713 let changed = masked_blocks * std::mem::size_of::<Block>();
711
714
712 Ok((docket, changed, PyBytes::new(py, &data))
715 Ok((docket, changed, PyBytes::new(py, &data))
713 .to_py_object(py)
716 .to_py_object(py)
714 .into_object())
717 .into_object())
715 }
718 }
716
719
717 /// Update the nodemap from the new (mmaped) data.
720 /// Update the nodemap from the new (mmaped) data.
718 /// The docket is kept as a reference for later incremental calls.
721 /// The docket is kept as a reference for later incremental calls.
719 fn inner_update_nodemap_data(
722 fn inner_update_nodemap_data(
720 &self,
723 &self,
721 py: Python,
724 py: Python,
722 docket: PyObject,
725 docket: PyObject,
723 nm_data: PyObject,
726 nm_data: PyObject,
724 ) -> PyResult<PyObject> {
727 ) -> PyResult<PyObject> {
725 // Safety: we keep the buffer around inside the class as `nodemap_mmap`
728 // Safety: we keep the buffer around inside the class as `nodemap_mmap`
726 let (buf, bytes) = unsafe { mmap_keeparound(py, nm_data)? };
729 let (buf, bytes) = unsafe { mmap_keeparound(py, nm_data)? };
727 let len = buf.item_count();
730 let len = buf.item_count();
728 self.nodemap_mmap(py).borrow_mut().replace(buf);
731 self.nodemap_mmap(py).borrow_mut().replace(buf);
729
732
730 let mut nt = CoreNodeTree::load_bytes(bytes, len);
733 let mut nt = CoreNodeTree::load_bytes(bytes, len);
731
734
732 let data_tip = docket
735 let data_tip = docket
733 .getattr(py, "tip_rev")?
736 .getattr(py, "tip_rev")?
734 .extract::<BaseRevision>(py)?
737 .extract::<BaseRevision>(py)?
735 .into();
738 .into();
736 self.docket(py).borrow_mut().replace(docket.clone_ref(py));
739 self.docket(py).borrow_mut().replace(docket.clone_ref(py));
737 let idx = self.index(py).borrow();
740 let idx = self.index(py).borrow();
738 let data_tip = idx.check_revision(data_tip).ok_or_else(|| {
741 let data_tip = idx.check_revision(data_tip).ok_or_else(|| {
739 nodemap_error(py, NodeMapError::RevisionNotInIndex(data_tip))
742 nodemap_error(py, NodeMapError::RevisionNotInIndex(data_tip))
740 })?;
743 })?;
741 let current_tip = idx.len();
744 let current_tip = idx.len();
742
745
743 for r in (data_tip.0 + 1)..current_tip as BaseRevision {
746 for r in (data_tip.0 + 1)..current_tip as BaseRevision {
744 let rev = Revision(r);
747 let rev = Revision(r);
745 // in this case node() won't ever return None
748 // in this case node() won't ever return None
746 nt.insert(&*idx, idx.node(rev).unwrap(), rev)
749 nt.insert(&*idx, idx.node(rev).unwrap(), rev)
747 .map_err(|e| nodemap_error(py, e))?
750 .map_err(|e| nodemap_error(py, e))?
748 }
751 }
749
752
750 *self.nt(py).borrow_mut() = Some(nt);
753 *self.nt(py).borrow_mut() = Some(nt);
751
754
752 Ok(py.None())
755 Ok(py.None())
753 }
756 }
754
757
755 fn inner_getitem(&self, py: Python, key: PyObject) -> PyResult<PyObject> {
758 fn inner_getitem(&self, py: Python, key: PyObject) -> PyResult<PyObject> {
756 let idx = self.index(py).borrow();
759 let idx = self.index(py).borrow();
757 Ok(match key.extract::<BaseRevision>(py) {
760 Ok(match key.extract::<BaseRevision>(py) {
758 Ok(key_as_int) => {
761 Ok(key_as_int) => {
759 let entry_params = if key_as_int == NULL_REVISION.0 {
762 let entry_params = if key_as_int == NULL_REVISION.0 {
760 RevisionDataParams::default()
763 RevisionDataParams::default()
761 } else {
764 } else {
762 let rev = UncheckedRevision(key_as_int);
765 let rev = UncheckedRevision(key_as_int);
763 match idx.entry_as_params(rev) {
766 match idx.entry_as_params(rev) {
764 Some(e) => e,
767 Some(e) => e,
765 None => {
768 None => {
766 return Err(PyErr::new::<IndexError, _>(
769 return Err(PyErr::new::<IndexError, _>(
767 py,
770 py,
768 "revlog index out of range",
771 "revlog index out of range",
769 ));
772 ));
770 }
773 }
771 }
774 }
772 };
775 };
773 revision_data_params_to_py_tuple(py, entry_params)
776 revision_data_params_to_py_tuple(py, entry_params)
774 .into_object()
777 .into_object()
775 }
778 }
776 _ => self.get_rev(py, key.extract::<PyBytes>(py)?)?.map_or_else(
779 _ => self.get_rev(py, key.extract::<PyBytes>(py)?)?.map_or_else(
777 || py.None(),
780 || py.None(),
778 |py_rev| py_rev.into_py_object(py).into_object(),
781 |py_rev| py_rev.into_py_object(py).into_object(),
779 ),
782 ),
780 })
783 })
781 }
784 }
782
785
783 fn inner_head_node_ids(&self, py: Python) -> PyResult<PyObject> {
786 fn inner_head_node_ids(&self, py: Python) -> PyResult<PyObject> {
784 let index = &*self.index(py).borrow();
787 let index = &*self.index(py).borrow();
785
788
786 // We don't use the shortcut here, as it's actually slower to loop
789 // We don't use the shortcut here, as it's actually slower to loop
787 // through the cached `PyList` than to re-do the whole computation for
790 // through the cached `PyList` than to re-do the whole computation for
788 // large lists, which are the performance sensitive ones anyway.
791 // large lists, which are the performance sensitive ones anyway.
789 let head_revs = index.head_revs().map_err(|e| graph_error(py, e))?;
792 let head_revs = index.head_revs().map_err(|e| graph_error(py, e))?;
790 let res: Vec<_> = head_revs
793 let res: Vec<_> = head_revs
791 .iter()
794 .iter()
792 .map(|r| {
795 .map(|r| {
793 PyBytes::new(
796 PyBytes::new(
794 py,
797 py,
795 index
798 index
796 .node(*r)
799 .node(*r)
797 .expect("rev should have been in the index")
800 .expect("rev should have been in the index")
798 .as_bytes(),
801 .as_bytes(),
799 )
802 )
800 .into_object()
803 .into_object()
801 })
804 })
802 .collect();
805 .collect();
803
806
804 self.cache_new_heads_py_list(head_revs, py);
807 self.cache_new_heads_py_list(&head_revs, py);
808 self.cache_new_heads_node_ids_py_list(&head_revs, py);
805
809
806 Ok(PyList::new(py, &res).into_object())
810 Ok(PyList::new(py, &res).into_object())
807 }
811 }
808
812
809 fn inner_headrevs(&self, py: Python) -> PyResult<PyObject> {
813 fn inner_headrevs(&self, py: Python) -> PyResult<PyObject> {
810 let index = &*self.index(py).borrow();
814 let index = &*self.index(py).borrow();
811 if let Some(new_heads) =
815 if let Some(new_heads) =
812 index.head_revs_shortcut().map_err(|e| graph_error(py, e))?
816 index.head_revs_shortcut().map_err(|e| graph_error(py, e))?
813 {
817 {
814 self.cache_new_heads_py_list(new_heads, py);
818 self.cache_new_heads_py_list(&new_heads, py);
815 }
819 }
816
820
817 Ok(self
821 Ok(self
818 .head_revs_py_list(py)
822 .head_revs_py_list(py)
819 .borrow()
823 .borrow()
820 .as_ref()
824 .as_ref()
821 .expect("head revs should be cached")
825 .expect("head revs should be cached")
822 .clone_ref(py)
826 .clone_ref(py)
823 .into_object())
827 .into_object())
824 }
828 }
825
829
826 fn inner_headrevsfiltered(
830 fn inner_headrevsfiltered(
827 &self,
831 &self,
828 py: Python,
832 py: Python,
829 filtered_revs: &PyObject,
833 filtered_revs: &PyObject,
830 ) -> PyResult<PyObject> {
834 ) -> PyResult<PyObject> {
831 let index = &mut *self.index(py).borrow_mut();
835 let index = &mut *self.index(py).borrow_mut();
832 let filtered_revs = rev_pyiter_collect(py, filtered_revs, index)?;
836 let filtered_revs = rev_pyiter_collect(py, filtered_revs, index)?;
833
837
834 if let Some(new_heads) = index
838 if let Some(new_heads) = index
835 .head_revs_filtered(&filtered_revs, true)
839 .head_revs_filtered(&filtered_revs, true)
836 .map_err(|e| graph_error(py, e))?
840 .map_err(|e| graph_error(py, e))?
837 {
841 {
838 self.cache_new_heads_py_list(new_heads, py);
842 self.cache_new_heads_py_list(&new_heads, py);
839 }
843 }
840
844
841 Ok(self
845 Ok(self
842 .head_revs_py_list(py)
846 .head_revs_py_list(py)
843 .borrow()
847 .borrow()
844 .as_ref()
848 .as_ref()
845 .expect("head revs should be cached")
849 .expect("head revs should be cached")
846 .clone_ref(py)
850 .clone_ref(py)
847 .into_object())
851 .into_object())
848 }
852 }
849
853
854 fn cache_new_heads_node_ids_py_list(
855 &self,
856 new_heads: &[Revision],
857 py: Python<'_>,
858 ) -> PyList {
859 let index = self.index(py).borrow();
860 let as_vec: Vec<PyObject> = new_heads
861 .iter()
862 .map(|r| {
863 PyBytes::new(
864 py,
865 index
866 .node(*r)
867 .expect("rev should have been in the index")
868 .as_bytes(),
869 )
870 .into_object()
871 })
872 .collect();
873 let new_heads_py_list = PyList::new(py, &as_vec);
874 *self.head_node_ids_py_list(py).borrow_mut() =
875 Some(new_heads_py_list.clone_ref(py));
876 new_heads_py_list
877 }
878
850 fn cache_new_heads_py_list(
879 fn cache_new_heads_py_list(
851 &self,
880 &self,
852 new_heads: Vec<Revision>,
881 new_heads: &[Revision],
853 py: Python<'_>,
882 py: Python<'_>,
854 ) -> PyList {
883 ) -> PyList {
855 let as_vec: Vec<PyObject> = new_heads
884 let as_vec: Vec<PyObject> = new_heads
856 .iter()
885 .iter()
857 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
886 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
858 .collect();
887 .collect();
859 let new_heads_py_list = PyList::new(py, &as_vec);
888 let new_heads_py_list = PyList::new(py, &as_vec);
860 *self.head_revs_py_list(py).borrow_mut() =
889 *self.head_revs_py_list(py).borrow_mut() =
861 Some(new_heads_py_list.clone_ref(py));
890 Some(new_heads_py_list.clone_ref(py));
862 new_heads_py_list
891 new_heads_py_list
863 }
892 }
864
893
865 fn inner_ancestors(
894 fn inner_ancestors(
866 &self,
895 &self,
867 py: Python,
896 py: Python,
868 py_revs: &PyTuple,
897 py_revs: &PyTuple,
869 ) -> PyResult<PyObject> {
898 ) -> PyResult<PyObject> {
870 let index = &*self.index(py).borrow();
899 let index = &*self.index(py).borrow();
871 let revs: Vec<_> = rev_pyiter_collect(py, py_revs.as_object(), index)?;
900 let revs: Vec<_> = rev_pyiter_collect(py, py_revs.as_object(), index)?;
872 let as_vec: Vec<_> = index
901 let as_vec: Vec<_> = index
873 .ancestors(&revs)
902 .ancestors(&revs)
874 .map_err(|e| graph_error(py, e))?
903 .map_err(|e| graph_error(py, e))?
875 .iter()
904 .iter()
876 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
905 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
877 .collect();
906 .collect();
878 Ok(PyList::new(py, &as_vec).into_object())
907 Ok(PyList::new(py, &as_vec).into_object())
879 }
908 }
880
909
881 fn inner_commonancestorsheads(
910 fn inner_commonancestorsheads(
882 &self,
911 &self,
883 py: Python,
912 py: Python,
884 py_revs: &PyTuple,
913 py_revs: &PyTuple,
885 ) -> PyResult<PyObject> {
914 ) -> PyResult<PyObject> {
886 let index = &*self.index(py).borrow();
915 let index = &*self.index(py).borrow();
887 let revs: Vec<_> = rev_pyiter_collect(py, py_revs.as_object(), index)?;
916 let revs: Vec<_> = rev_pyiter_collect(py, py_revs.as_object(), index)?;
888 let as_vec: Vec<_> = index
917 let as_vec: Vec<_> = index
889 .common_ancestor_heads(&revs)
918 .common_ancestor_heads(&revs)
890 .map_err(|e| graph_error(py, e))?
919 .map_err(|e| graph_error(py, e))?
891 .iter()
920 .iter()
892 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
921 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
893 .collect();
922 .collect();
894 Ok(PyList::new(py, &as_vec).into_object())
923 Ok(PyList::new(py, &as_vec).into_object())
895 }
924 }
896
925
897 fn inner_computephasesmapsets(
926 fn inner_computephasesmapsets(
898 &self,
927 &self,
899 py: Python,
928 py: Python,
900 py_roots: PyDict,
929 py_roots: PyDict,
901 ) -> PyResult<PyObject> {
930 ) -> PyResult<PyObject> {
902 let index = &*self.index(py).borrow();
931 let index = &*self.index(py).borrow();
903 let opt = self.get_nodetree(py)?.borrow();
932 let opt = self.get_nodetree(py)?.borrow();
904 let nt = opt.as_ref().unwrap();
933 let nt = opt.as_ref().unwrap();
905 let roots: Result<HashMap<Phase, Vec<Revision>>, PyErr> = py_roots
934 let roots: Result<HashMap<Phase, Vec<Revision>>, PyErr> = py_roots
906 .items_list(py)
935 .items_list(py)
907 .iter(py)
936 .iter(py)
908 .map(|r| {
937 .map(|r| {
909 let phase = r.get_item(py, 0)?;
938 let phase = r.get_item(py, 0)?;
910 let nodes = r.get_item(py, 1)?;
939 let nodes = r.get_item(py, 1)?;
911 // Transform the nodes from Python to revs here since we
940 // Transform the nodes from Python to revs here since we
912 // have access to the nodemap
941 // have access to the nodemap
913 let revs: Result<_, _> = nodes
942 let revs: Result<_, _> = nodes
914 .iter(py)?
943 .iter(py)?
915 .map(|node| match node?.extract::<PyBytes>(py) {
944 .map(|node| match node?.extract::<PyBytes>(py) {
916 Ok(py_bytes) => {
945 Ok(py_bytes) => {
917 let node = node_from_py_bytes(py, &py_bytes)?;
946 let node = node_from_py_bytes(py, &py_bytes)?;
918 nt.find_bin(index, node.into())
947 nt.find_bin(index, node.into())
919 .map_err(|e| nodemap_error(py, e))?
948 .map_err(|e| nodemap_error(py, e))?
920 .ok_or_else(|| revlog_error(py))
949 .ok_or_else(|| revlog_error(py))
921 }
950 }
922 Err(e) => Err(e),
951 Err(e) => Err(e),
923 })
952 })
924 .collect();
953 .collect();
925 let phase = Phase::try_from(phase.extract::<usize>(py)?)
954 let phase = Phase::try_from(phase.extract::<usize>(py)?)
926 .map_err(|_| revlog_error(py));
955 .map_err(|_| revlog_error(py));
927 Ok((phase?, revs?))
956 Ok((phase?, revs?))
928 })
957 })
929 .collect();
958 .collect();
930 let (len, phase_maps) = index
959 let (len, phase_maps) = index
931 .compute_phases_map_sets(roots?)
960 .compute_phases_map_sets(roots?)
932 .map_err(|e| graph_error(py, e))?;
961 .map_err(|e| graph_error(py, e))?;
933
962
934 // Ugly hack, but temporary
963 // Ugly hack, but temporary
935 const IDX_TO_PHASE_NUM: [usize; 4] = [1, 2, 32, 96];
964 const IDX_TO_PHASE_NUM: [usize; 4] = [1, 2, 32, 96];
936 let py_phase_maps = PyDict::new(py);
965 let py_phase_maps = PyDict::new(py);
937 for (idx, roots) in phase_maps.iter().enumerate() {
966 for (idx, roots) in phase_maps.iter().enumerate() {
938 let phase_num = IDX_TO_PHASE_NUM[idx].into_py_object(py);
967 let phase_num = IDX_TO_PHASE_NUM[idx].into_py_object(py);
939 // OPTIM too bad we have to collect here. At least, we could
968 // OPTIM too bad we have to collect here. At least, we could
940 // reuse the same Vec and allocate it with capacity at
969 // reuse the same Vec and allocate it with capacity at
941 // max(len(phase_maps)
970 // max(len(phase_maps)
942 let roots_vec: Vec<PyInt> = roots
971 let roots_vec: Vec<PyInt> = roots
943 .iter()
972 .iter()
944 .map(|r| PyRevision::from(*r).into_py_object(py))
973 .map(|r| PyRevision::from(*r).into_py_object(py))
945 .collect();
974 .collect();
946 py_phase_maps.set_item(
975 py_phase_maps.set_item(
947 py,
976 py,
948 phase_num,
977 phase_num,
949 PySet::new(py, roots_vec)?,
978 PySet::new(py, roots_vec)?,
950 )?;
979 )?;
951 }
980 }
952 Ok(PyTuple::new(
981 Ok(PyTuple::new(
953 py,
982 py,
954 &[
983 &[
955 len.into_py_object(py).into_object(),
984 len.into_py_object(py).into_object(),
956 py_phase_maps.into_object(),
985 py_phase_maps.into_object(),
957 ],
986 ],
958 )
987 )
959 .into_object())
988 .into_object())
960 }
989 }
961
990
962 fn inner_slicechunktodensity(
991 fn inner_slicechunktodensity(
963 &self,
992 &self,
964 py: Python,
993 py: Python,
965 revs: PyObject,
994 revs: PyObject,
966 target_density: f64,
995 target_density: f64,
967 min_gap_size: usize,
996 min_gap_size: usize,
968 ) -> PyResult<PyObject> {
997 ) -> PyResult<PyObject> {
969 let index = &*self.index(py).borrow();
998 let index = &*self.index(py).borrow();
970 let revs: Vec<_> = rev_pyiter_collect(py, &revs, index)?;
999 let revs: Vec<_> = rev_pyiter_collect(py, &revs, index)?;
971 let as_nested_vec =
1000 let as_nested_vec =
972 index.slice_chunk_to_density(&revs, target_density, min_gap_size);
1001 index.slice_chunk_to_density(&revs, target_density, min_gap_size);
973 let mut res = Vec::with_capacity(as_nested_vec.len());
1002 let mut res = Vec::with_capacity(as_nested_vec.len());
974 let mut py_chunk = Vec::new();
1003 let mut py_chunk = Vec::new();
975 for chunk in as_nested_vec {
1004 for chunk in as_nested_vec {
976 py_chunk.clear();
1005 py_chunk.clear();
977 py_chunk.reserve_exact(chunk.len());
1006 py_chunk.reserve_exact(chunk.len());
978 for rev in chunk {
1007 for rev in chunk {
979 py_chunk.push(
1008 py_chunk.push(
980 PyRevision::from(rev).into_py_object(py).into_object(),
1009 PyRevision::from(rev).into_py_object(py).into_object(),
981 );
1010 );
982 }
1011 }
983 res.push(PyList::new(py, &py_chunk).into_object());
1012 res.push(PyList::new(py, &py_chunk).into_object());
984 }
1013 }
985 // This is just to do the same as C, not sure why it does this
1014 // This is just to do the same as C, not sure why it does this
986 if res.len() == 1 {
1015 if res.len() == 1 {
987 Ok(PyTuple::new(py, &res).into_object())
1016 Ok(PyTuple::new(py, &res).into_object())
988 } else {
1017 } else {
989 Ok(PyList::new(py, &res).into_object())
1018 Ok(PyList::new(py, &res).into_object())
990 }
1019 }
991 }
1020 }
992
1021
993 fn inner_reachableroots2(
1022 fn inner_reachableroots2(
994 &self,
1023 &self,
995 py: Python,
1024 py: Python,
996 min_root: UncheckedRevision,
1025 min_root: UncheckedRevision,
997 heads: PyObject,
1026 heads: PyObject,
998 roots: PyObject,
1027 roots: PyObject,
999 include_path: bool,
1028 include_path: bool,
1000 ) -> PyResult<PyObject> {
1029 ) -> PyResult<PyObject> {
1001 let index = &*self.index(py).borrow();
1030 let index = &*self.index(py).borrow();
1002 let heads = rev_pyiter_collect_or_else(py, &heads, index, |_rev| {
1031 let heads = rev_pyiter_collect_or_else(py, &heads, index, |_rev| {
1003 PyErr::new::<IndexError, _>(py, "head out of range")
1032 PyErr::new::<IndexError, _>(py, "head out of range")
1004 })?;
1033 })?;
1005 let roots: Result<_, _> = roots
1034 let roots: Result<_, _> = roots
1006 .iter(py)?
1035 .iter(py)?
1007 .map(|r| {
1036 .map(|r| {
1008 r.and_then(|o| match o.extract::<PyRevision>(py) {
1037 r.and_then(|o| match o.extract::<PyRevision>(py) {
1009 Ok(r) => Ok(UncheckedRevision(r.0)),
1038 Ok(r) => Ok(UncheckedRevision(r.0)),
1010 Err(e) => Err(e),
1039 Err(e) => Err(e),
1011 })
1040 })
1012 })
1041 })
1013 .collect();
1042 .collect();
1014 let as_set = index
1043 let as_set = index
1015 .reachable_roots(min_root, heads, roots?, include_path)
1044 .reachable_roots(min_root, heads, roots?, include_path)
1016 .map_err(|e| graph_error(py, e))?;
1045 .map_err(|e| graph_error(py, e))?;
1017 let as_vec: Vec<PyObject> = as_set
1046 let as_vec: Vec<PyObject> = as_set
1018 .iter()
1047 .iter()
1019 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
1048 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
1020 .collect();
1049 .collect();
1021 Ok(PyList::new(py, &as_vec).into_object())
1050 Ok(PyList::new(py, &as_vec).into_object())
1022 }
1051 }
1023 }
1052 }
1024
1053
1025 py_class!(pub class NodeTree |py| {
1054 py_class!(pub class NodeTree |py| {
1026 data nt: RefCell<CoreNodeTree>;
1055 data nt: RefCell<CoreNodeTree>;
1027 data index: RefCell<UnsafePyLeaked<PySharedIndex>>;
1056 data index: RefCell<UnsafePyLeaked<PySharedIndex>>;
1028
1057
1029 def __new__(_cls, index: PyObject) -> PyResult<NodeTree> {
1058 def __new__(_cls, index: PyObject) -> PyResult<NodeTree> {
1030 let index = py_rust_index_to_graph(py, index)?;
1059 let index = py_rust_index_to_graph(py, index)?;
1031 let nt = CoreNodeTree::default(); // in-RAM, fully mutable
1060 let nt = CoreNodeTree::default(); // in-RAM, fully mutable
1032 Self::create_instance(py, RefCell::new(nt), RefCell::new(index))
1061 Self::create_instance(py, RefCell::new(nt), RefCell::new(index))
1033 }
1062 }
1034
1063
1035 /// Tell whether the NodeTree is still valid
1064 /// Tell whether the NodeTree is still valid
1036 ///
1065 ///
1037 /// In case of mutation of the index, the given results are not
1066 /// In case of mutation of the index, the given results are not
1038 /// guaranteed to be correct, and in fact, the methods borrowing
1067 /// guaranteed to be correct, and in fact, the methods borrowing
1039 /// the inner index would fail because of `PySharedRef` poisoning
1068 /// the inner index would fail because of `PySharedRef` poisoning
1040 /// (generation-based guard), same as iterating on a `dict` that has
1069 /// (generation-based guard), same as iterating on a `dict` that has
1041 /// been meanwhile mutated.
1070 /// been meanwhile mutated.
1042 def is_invalidated(&self) -> PyResult<bool> {
1071 def is_invalidated(&self) -> PyResult<bool> {
1043 let leaked = self.index(py).borrow();
1072 let leaked = self.index(py).borrow();
1044 // Safety: we don't leak the "faked" reference out of `UnsafePyLeaked`
1073 // Safety: we don't leak the "faked" reference out of `UnsafePyLeaked`
1045 let result = unsafe { leaked.try_borrow(py) };
1074 let result = unsafe { leaked.try_borrow(py) };
1046 // two cases for result to be an error:
1075 // two cases for result to be an error:
1047 // - the index has previously been mutably borrowed
1076 // - the index has previously been mutably borrowed
1048 // - there is currently a mutable borrow
1077 // - there is currently a mutable borrow
1049 // in both cases this amounts for previous results related to
1078 // in both cases this amounts for previous results related to
1050 // the index to still be valid.
1079 // the index to still be valid.
1051 Ok(result.is_err())
1080 Ok(result.is_err())
1052 }
1081 }
1053
1082
1054 def insert(&self, rev: PyRevision) -> PyResult<PyObject> {
1083 def insert(&self, rev: PyRevision) -> PyResult<PyObject> {
1055 let leaked = self.index(py).borrow();
1084 let leaked = self.index(py).borrow();
1056 // Safety: we don't leak the "faked" reference out of `UnsafePyLeaked`
1085 // Safety: we don't leak the "faked" reference out of `UnsafePyLeaked`
1057 let index = &*unsafe { leaked.try_borrow(py)? };
1086 let index = &*unsafe { leaked.try_borrow(py)? };
1058
1087
1059 let rev = UncheckedRevision(rev.0);
1088 let rev = UncheckedRevision(rev.0);
1060 let rev = index
1089 let rev = index
1061 .check_revision(rev)
1090 .check_revision(rev)
1062 .ok_or_else(|| rev_not_in_index(py, rev))?;
1091 .ok_or_else(|| rev_not_in_index(py, rev))?;
1063 if rev == NULL_REVISION {
1092 if rev == NULL_REVISION {
1064 return Err(rev_not_in_index(py, rev.into()))
1093 return Err(rev_not_in_index(py, rev.into()))
1065 }
1094 }
1066
1095
1067 let entry = index.inner.get_entry(rev).unwrap();
1096 let entry = index.inner.get_entry(rev).unwrap();
1068 let mut nt = self.nt(py).borrow_mut();
1097 let mut nt = self.nt(py).borrow_mut();
1069 nt.insert(index, entry.hash(), rev).map_err(|e| nodemap_error(py, e))?;
1098 nt.insert(index, entry.hash(), rev).map_err(|e| nodemap_error(py, e))?;
1070
1099
1071 Ok(py.None())
1100 Ok(py.None())
1072 }
1101 }
1073
1102
1074 /// Lookup by node hex prefix in the NodeTree, returning revision number.
1103 /// Lookup by node hex prefix in the NodeTree, returning revision number.
1075 ///
1104 ///
1076 /// This is not part of the classical NodeTree API, but is good enough
1105 /// This is not part of the classical NodeTree API, but is good enough
1077 /// for unit testing, as in `test-rust-revlog.py`.
1106 /// for unit testing, as in `test-rust-revlog.py`.
1078 def prefix_rev_lookup(
1107 def prefix_rev_lookup(
1079 &self,
1108 &self,
1080 node_prefix: PyBytes
1109 node_prefix: PyBytes
1081 ) -> PyResult<Option<PyRevision>> {
1110 ) -> PyResult<Option<PyRevision>> {
1082 let prefix = NodePrefix::from_hex(node_prefix.data(py))
1111 let prefix = NodePrefix::from_hex(node_prefix.data(py))
1083 .map_err(|_| PyErr::new::<ValueError, _>(
1112 .map_err(|_| PyErr::new::<ValueError, _>(
1084 py,
1113 py,
1085 format!("Invalid node or prefix {:?}",
1114 format!("Invalid node or prefix {:?}",
1086 node_prefix.as_object()))
1115 node_prefix.as_object()))
1087 )?;
1116 )?;
1088
1117
1089 let nt = self.nt(py).borrow();
1118 let nt = self.nt(py).borrow();
1090 let leaked = self.index(py).borrow();
1119 let leaked = self.index(py).borrow();
1091 // Safety: we don't leak the "faked" reference out of `UnsafePyLeaked`
1120 // Safety: we don't leak the "faked" reference out of `UnsafePyLeaked`
1092 let index = &*unsafe { leaked.try_borrow(py)? };
1121 let index = &*unsafe { leaked.try_borrow(py)? };
1093
1122
1094 Ok(nt.find_bin(index, prefix)
1123 Ok(nt.find_bin(index, prefix)
1095 .map_err(|e| nodemap_error(py, e))?
1124 .map_err(|e| nodemap_error(py, e))?
1096 .map(|r| r.into())
1125 .map(|r| r.into())
1097 )
1126 )
1098 }
1127 }
1099
1128
1100 def shortest(&self, node: PyBytes) -> PyResult<usize> {
1129 def shortest(&self, node: PyBytes) -> PyResult<usize> {
1101 let nt = self.nt(py).borrow();
1130 let nt = self.nt(py).borrow();
1102 let leaked = self.index(py).borrow();
1131 let leaked = self.index(py).borrow();
1103 // Safety: we don't leak the "faked" reference out of `UnsafePyLeaked`
1132 // Safety: we don't leak the "faked" reference out of `UnsafePyLeaked`
1104 let idx = &*unsafe { leaked.try_borrow(py)? };
1133 let idx = &*unsafe { leaked.try_borrow(py)? };
1105 match nt.unique_prefix_len_node(idx, &node_from_py_bytes(py, &node)?)
1134 match nt.unique_prefix_len_node(idx, &node_from_py_bytes(py, &node)?)
1106 {
1135 {
1107 Ok(Some(l)) => Ok(l),
1136 Ok(Some(l)) => Ok(l),
1108 Ok(None) => Err(revlog_error(py)),
1137 Ok(None) => Err(revlog_error(py)),
1109 Err(e) => Err(nodemap_error(py, e)),
1138 Err(e) => Err(nodemap_error(py, e)),
1110 }
1139 }
1111 }
1140 }
1112 });
1141 });
1113
1142
1114 fn revlog_error(py: Python) -> PyErr {
1143 fn revlog_error(py: Python) -> PyErr {
1115 match py
1144 match py
1116 .import("mercurial.error")
1145 .import("mercurial.error")
1117 .and_then(|m| m.get(py, "RevlogError"))
1146 .and_then(|m| m.get(py, "RevlogError"))
1118 {
1147 {
1119 Err(e) => e,
1148 Err(e) => e,
1120 Ok(cls) => PyErr::from_instance(
1149 Ok(cls) => PyErr::from_instance(
1121 py,
1150 py,
1122 cls.call(py, (py.None(),), None).ok().into_py_object(py),
1151 cls.call(py, (py.None(),), None).ok().into_py_object(py),
1123 ),
1152 ),
1124 }
1153 }
1125 }
1154 }
1126
1155
1127 fn revlog_error_with_msg(py: Python, msg: &[u8]) -> PyErr {
1156 fn revlog_error_with_msg(py: Python, msg: &[u8]) -> PyErr {
1128 match py
1157 match py
1129 .import("mercurial.error")
1158 .import("mercurial.error")
1130 .and_then(|m| m.get(py, "RevlogError"))
1159 .and_then(|m| m.get(py, "RevlogError"))
1131 {
1160 {
1132 Err(e) => e,
1161 Err(e) => e,
1133 Ok(cls) => PyErr::from_instance(
1162 Ok(cls) => PyErr::from_instance(
1134 py,
1163 py,
1135 cls.call(py, (PyBytes::new(py, msg),), None)
1164 cls.call(py, (PyBytes::new(py, msg),), None)
1136 .ok()
1165 .ok()
1137 .into_py_object(py),
1166 .into_py_object(py),
1138 ),
1167 ),
1139 }
1168 }
1140 }
1169 }
1141
1170
1142 fn graph_error(py: Python, _err: hg::GraphError) -> PyErr {
1171 fn graph_error(py: Python, _err: hg::GraphError) -> PyErr {
1143 // ParentOutOfRange is currently the only alternative
1172 // ParentOutOfRange is currently the only alternative
1144 // in `hg::GraphError`. The C index always raises this simple ValueError.
1173 // in `hg::GraphError`. The C index always raises this simple ValueError.
1145 PyErr::new::<ValueError, _>(py, "parent out of range")
1174 PyErr::new::<ValueError, _>(py, "parent out of range")
1146 }
1175 }
1147
1176
1148 fn nodemap_rev_not_in_index(py: Python, rev: UncheckedRevision) -> PyErr {
1177 fn nodemap_rev_not_in_index(py: Python, rev: UncheckedRevision) -> PyErr {
1149 PyErr::new::<ValueError, _>(
1178 PyErr::new::<ValueError, _>(
1150 py,
1179 py,
1151 format!(
1180 format!(
1152 "Inconsistency: Revision {} found in nodemap \
1181 "Inconsistency: Revision {} found in nodemap \
1153 is not in revlog index",
1182 is not in revlog index",
1154 rev
1183 rev
1155 ),
1184 ),
1156 )
1185 )
1157 }
1186 }
1158
1187
1159 fn rev_not_in_index(py: Python, rev: UncheckedRevision) -> PyErr {
1188 fn rev_not_in_index(py: Python, rev: UncheckedRevision) -> PyErr {
1160 PyErr::new::<ValueError, _>(
1189 PyErr::new::<ValueError, _>(
1161 py,
1190 py,
1162 format!("revlog index out of range: {}", rev),
1191 format!("revlog index out of range: {}", rev),
1163 )
1192 )
1164 }
1193 }
1165
1194
1166 /// Standard treatment of NodeMapError
1195 /// Standard treatment of NodeMapError
1167 fn nodemap_error(py: Python, err: NodeMapError) -> PyErr {
1196 fn nodemap_error(py: Python, err: NodeMapError) -> PyErr {
1168 match err {
1197 match err {
1169 NodeMapError::MultipleResults => revlog_error(py),
1198 NodeMapError::MultipleResults => revlog_error(py),
1170 NodeMapError::RevisionNotInIndex(r) => nodemap_rev_not_in_index(py, r),
1199 NodeMapError::RevisionNotInIndex(r) => nodemap_rev_not_in_index(py, r),
1171 }
1200 }
1172 }
1201 }
1173
1202
1174 /// Create the module, with __package__ given from parent
1203 /// Create the module, with __package__ given from parent
1175 pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> {
1204 pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> {
1176 let dotted_name = &format!("{}.revlog", package);
1205 let dotted_name = &format!("{}.revlog", package);
1177 let m = PyModule::new(py, dotted_name)?;
1206 let m = PyModule::new(py, dotted_name)?;
1178 m.add(py, "__package__", package)?;
1207 m.add(py, "__package__", package)?;
1179 m.add(py, "__doc__", "RevLog - Rust implementations")?;
1208 m.add(py, "__doc__", "RevLog - Rust implementations")?;
1180
1209
1181 m.add_class::<Index>(py)?;
1210 m.add_class::<Index>(py)?;
1182 m.add_class::<NodeTree>(py)?;
1211 m.add_class::<NodeTree>(py)?;
1183
1212
1184 let sys = PyModule::import(py, "sys")?;
1213 let sys = PyModule::import(py, "sys")?;
1185 let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?;
1214 let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?;
1186 sys_modules.set_item(py, dotted_name, &m)?;
1215 sys_modules.set_item(py, dotted_name, &m)?;
1187
1216
1188 Ok(m)
1217 Ok(m)
1189 }
1218 }
General Comments 0
You need to be logged in to leave comments. Login now