##// END OF EJS Templates
rust-index: don't use mutable borrow to computed filtered heads...
marmoute -
r52360:68ed56ba stable
parent child Browse files
Show More
@@ -1,1239 +1,1239 b''
1 1 // revlog.rs
2 2 //
3 3 // Copyright 2019-2020 Georges Racinet <georges.racinet@octobus.net>
4 4 //
5 5 // This software may be used and distributed according to the terms of the
6 6 // GNU General Public License version 2 or any later version.
7 7
8 8 use crate::{
9 9 conversion::{rev_pyiter_collect, rev_pyiter_collect_or_else},
10 10 utils::{node_from_py_bytes, node_from_py_object},
11 11 PyRevision,
12 12 };
13 13 use cpython::{
14 14 buffer::{Element, PyBuffer},
15 15 exc::{IndexError, ValueError},
16 16 ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyInt, PyList,
17 17 PyModule, PyObject, PyResult, PySet, PyString, PyTuple, Python,
18 18 PythonObject, ToPyObject, UnsafePyLeaked,
19 19 };
20 20 use hg::{
21 21 errors::HgError,
22 22 index::{
23 23 IndexHeader, Phase, RevisionDataParams, SnapshotsCache,
24 24 INDEX_ENTRY_SIZE,
25 25 },
26 26 nodemap::{Block, NodeMapError, NodeTree as CoreNodeTree},
27 27 revlog::{nodemap::NodeMap, Graph, NodePrefix, RevlogError, RevlogIndex},
28 28 BaseRevision, Node, Revision, UncheckedRevision, NULL_REVISION,
29 29 };
30 30 use std::{cell::RefCell, collections::HashMap};
31 31 use vcsgraph::graph::Graph as VCSGraph;
32 32
33 33 pub struct PySharedIndex {
34 34 /// The underlying hg-core index
35 35 pub(crate) inner: &'static hg::index::Index,
36 36 }
37 37
38 38 /// Return a Struct implementing the Graph trait
39 39 pub(crate) fn py_rust_index_to_graph(
40 40 py: Python,
41 41 index: PyObject,
42 42 ) -> PyResult<UnsafePyLeaked<PySharedIndex>> {
43 43 let midx = index.extract::<Index>(py)?;
44 44 let leaked = midx.index(py).leak_immutable();
45 45 // Safety: we don't leak the "faked" reference out of the `UnsafePyLeaked`
46 46 Ok(unsafe { leaked.map(py, |idx| PySharedIndex { inner: idx }) })
47 47 }
48 48
49 49 impl Clone for PySharedIndex {
50 50 fn clone(&self) -> Self {
51 51 Self { inner: self.inner }
52 52 }
53 53 }
54 54
55 55 impl Graph for PySharedIndex {
56 56 #[inline(always)]
57 57 fn parents(&self, rev: Revision) -> Result<[Revision; 2], hg::GraphError> {
58 58 self.inner.parents(rev)
59 59 }
60 60 }
61 61
62 62 impl VCSGraph for PySharedIndex {
63 63 #[inline(always)]
64 64 fn parents(
65 65 &self,
66 66 rev: BaseRevision,
67 67 ) -> Result<vcsgraph::graph::Parents, vcsgraph::graph::GraphReadError>
68 68 {
69 69 // FIXME This trait should be reworked to decide between Revision
70 70 // and UncheckedRevision, get better errors names, etc.
71 71 match Graph::parents(self, Revision(rev)) {
72 72 Ok(parents) => {
73 73 Ok(vcsgraph::graph::Parents([parents[0].0, parents[1].0]))
74 74 }
75 75 Err(hg::GraphError::ParentOutOfRange(rev)) => {
76 76 Err(vcsgraph::graph::GraphReadError::KeyedInvalidKey(rev.0))
77 77 }
78 78 }
79 79 }
80 80 }
81 81
82 82 impl RevlogIndex for PySharedIndex {
83 83 fn len(&self) -> usize {
84 84 self.inner.len()
85 85 }
86 86 fn node(&self, rev: Revision) -> Option<&Node> {
87 87 self.inner.node(rev)
88 88 }
89 89 }
90 90
91 91 py_class!(pub class Index |py| {
92 92 @shared data index: hg::index::Index;
93 93 data nt: RefCell<Option<CoreNodeTree>>;
94 94 data docket: RefCell<Option<PyObject>>;
95 95 // Holds a reference to the mmap'ed persistent nodemap data
96 96 data nodemap_mmap: RefCell<Option<PyBuffer>>;
97 97 // Holds a reference to the mmap'ed persistent index data
98 98 data index_mmap: RefCell<Option<PyBuffer>>;
99 99 data head_revs_py_list: RefCell<Option<PyList>>;
100 100 data head_node_ids_py_list: RefCell<Option<PyList>>;
101 101
102 102 def __new__(
103 103 _cls,
104 104 data: PyObject,
105 105 default_header: u32,
106 106 ) -> PyResult<Self> {
107 107 Self::new(py, data, default_header)
108 108 }
109 109
110 110 /// Compatibility layer used for Python consumers needing access to the C index
111 111 ///
112 112 /// Only use case so far is `scmutil.shortesthexnodeidprefix`,
113 113 /// that may need to build a custom `nodetree`, based on a specified revset.
114 114 /// With a Rust implementation of the nodemap, we will be able to get rid of
115 115 /// this, by exposing our own standalone nodemap class,
116 116 /// ready to accept `Index`.
117 117 /* def get_cindex(&self) -> PyResult<PyObject> {
118 118 Ok(self.cindex(py).borrow().inner().clone_ref(py))
119 119 }
120 120 */
121 121 // Index API involving nodemap, as defined in mercurial/pure/parsers.py
122 122
123 123 /// Return Revision if found, raises a bare `error.RevlogError`
124 124 /// in case of ambiguity, same as C version does
125 125 def get_rev(&self, node: PyBytes) -> PyResult<Option<PyRevision>> {
126 126 let opt = self.get_nodetree(py)?.borrow();
127 127 let nt = opt.as_ref().unwrap();
128 128 let ridx = &*self.index(py).borrow();
129 129 let node = node_from_py_bytes(py, &node)?;
130 130 let rust_rev =
131 131 nt.find_bin(ridx, node.into()).map_err(|e| nodemap_error(py, e))?;
132 132 Ok(rust_rev.map(Into::into))
133 133
134 134 }
135 135
136 136 /// same as `get_rev()` but raises a bare `error.RevlogError` if node
137 137 /// is not found.
138 138 ///
139 139 /// No need to repeat `node` in the exception, `mercurial/revlog.py`
140 140 /// will catch and rewrap with it
141 141 def rev(&self, node: PyBytes) -> PyResult<PyRevision> {
142 142 self.get_rev(py, node)?.ok_or_else(|| revlog_error(py))
143 143 }
144 144
145 145 /// return True if the node exist in the index
146 146 def has_node(&self, node: PyBytes) -> PyResult<bool> {
147 147 // TODO OPTIM we could avoid a needless conversion here,
148 148 // to do when scaffolding for pure Rust switch is removed,
149 149 // as `get_rev()` currently does the necessary assertions
150 150 self.get_rev(py, node).map(|opt| opt.is_some())
151 151 }
152 152
153 153 /// find length of shortest hex nodeid of a binary ID
154 154 def shortest(&self, node: PyBytes) -> PyResult<usize> {
155 155 let opt = self.get_nodetree(py)?.borrow();
156 156 let nt = opt.as_ref().unwrap();
157 157 let idx = &*self.index(py).borrow();
158 158 match nt.unique_prefix_len_node(idx, &node_from_py_bytes(py, &node)?)
159 159 {
160 160 Ok(Some(l)) => Ok(l),
161 161 Ok(None) => Err(revlog_error(py)),
162 162 Err(e) => Err(nodemap_error(py, e)),
163 163 }
164 164 }
165 165
166 166 def partialmatch(&self, node: PyObject) -> PyResult<Option<PyBytes>> {
167 167 let opt = self.get_nodetree(py)?.borrow();
168 168 let nt = opt.as_ref().unwrap();
169 169 let idx = &*self.index(py).borrow();
170 170
171 171 let node_as_string = if cfg!(feature = "python3-sys") {
172 172 node.cast_as::<PyString>(py)?.to_string(py)?.to_string()
173 173 }
174 174 else {
175 175 let node = node.extract::<PyBytes>(py)?;
176 176 String::from_utf8_lossy(node.data(py)).to_string()
177 177 };
178 178
179 179 let prefix = NodePrefix::from_hex(&node_as_string)
180 180 .map_err(|_| PyErr::new::<ValueError, _>(
181 181 py, format!("Invalid node or prefix '{}'", node_as_string))
182 182 )?;
183 183
184 184 nt.find_bin(idx, prefix)
185 185 // TODO make an inner API returning the node directly
186 186 .map(|opt| opt.map(
187 187 |rev| PyBytes::new(py, idx.node(rev).unwrap().as_bytes())))
188 188 .map_err(|e| nodemap_error(py, e))
189 189
190 190 }
191 191
192 192 /// append an index entry
193 193 def append(&self, tup: PyTuple) -> PyResult<PyObject> {
194 194 if tup.len(py) < 8 {
195 195 // this is better than the panic promised by tup.get_item()
196 196 return Err(
197 197 PyErr::new::<IndexError, _>(py, "tuple index out of range"))
198 198 }
199 199 let node_bytes = tup.get_item(py, 7).extract(py)?;
200 200 let node = node_from_py_object(py, &node_bytes)?;
201 201
202 202 let rev = self.len(py)? as BaseRevision;
203 203
204 204 // This is ok since we will just add the revision to the index
205 205 let rev = Revision(rev);
206 206 self.index(py)
207 207 .borrow_mut()
208 208 .append(py_tuple_to_revision_data_params(py, tup)?)
209 209 .unwrap();
210 210 let idx = &*self.index(py).borrow();
211 211 self.get_nodetree(py)?.borrow_mut().as_mut().unwrap()
212 212 .insert(idx, &node, rev)
213 213 .map_err(|e| nodemap_error(py, e))?;
214 214 Ok(py.None())
215 215 }
216 216
217 217 def __delitem__(&self, key: PyObject) -> PyResult<()> {
218 218 // __delitem__ is both for `del idx[r]` and `del idx[r1:r2]`
219 219 let start = if let Ok(rev) = key.extract(py) {
220 220 UncheckedRevision(rev)
221 221 } else {
222 222 let start = key.getattr(py, "start")?;
223 223 UncheckedRevision(start.extract(py)?)
224 224 };
225 225 let start = self.index(py)
226 226 .borrow()
227 227 .check_revision(start)
228 228 .ok_or_else(|| {
229 229 nodemap_error(py, NodeMapError::RevisionNotInIndex(start))
230 230 })?;
231 231 self.index(py).borrow_mut().remove(start).unwrap();
232 232 let mut opt = self.get_nodetree(py)?.borrow_mut();
233 233 let nt = opt.as_mut().unwrap();
234 234 nt.invalidate_all();
235 235 self.fill_nodemap(py, nt)?;
236 236 Ok(())
237 237 }
238 238
239 239 //
240 240 // Index methods previously reforwarded to C index (tp_methods)
241 241 // Same ordering as in revlog.c
242 242 //
243 243
244 244 /// return the gca set of the given revs
245 245 def ancestors(&self, *args, **_kw) -> PyResult<PyObject> {
246 246 let rust_res = self.inner_ancestors(py, args)?;
247 247 Ok(rust_res)
248 248 }
249 249
250 250 /// return the heads of the common ancestors of the given revs
251 251 def commonancestorsheads(&self, *args, **_kw) -> PyResult<PyObject> {
252 252 let rust_res = self.inner_commonancestorsheads(py, args)?;
253 253 Ok(rust_res)
254 254 }
255 255
256 256 /// Clear the index caches and inner py_class data.
257 257 /// It is Python's responsibility to call `update_nodemap_data` again.
258 258 def clearcaches(&self) -> PyResult<PyObject> {
259 259 self.nt(py).borrow_mut().take();
260 260 self.docket(py).borrow_mut().take();
261 261 self.nodemap_mmap(py).borrow_mut().take();
262 262 self.head_revs_py_list(py).borrow_mut().take();
263 263 self.head_node_ids_py_list(py).borrow_mut().take();
264 264 self.index(py).borrow().clear_caches();
265 265 Ok(py.None())
266 266 }
267 267
268 268 /// return the raw binary string representing a revision
269 269 def entry_binary(&self, *args, **_kw) -> PyResult<PyObject> {
270 270 let rindex = self.index(py).borrow();
271 271 let rev = UncheckedRevision(args.get_item(py, 0).extract(py)?);
272 272 let rust_bytes = rindex.check_revision(rev).and_then(
273 273 |r| rindex.entry_binary(r))
274 274 .ok_or_else(|| rev_not_in_index(py, rev))?;
275 275 let rust_res = PyBytes::new(py, rust_bytes).into_object();
276 276 Ok(rust_res)
277 277 }
278 278
279 279 /// return a binary packed version of the header
280 280 def pack_header(&self, *args, **_kw) -> PyResult<PyObject> {
281 281 let rindex = self.index(py).borrow();
282 282 let packed = rindex.pack_header(args.get_item(py, 0).extract(py)?);
283 283 let rust_res = PyBytes::new(py, &packed).into_object();
284 284 Ok(rust_res)
285 285 }
286 286
287 287 /// compute phases
288 288 def computephasesmapsets(&self, *args, **_kw) -> PyResult<PyObject> {
289 289 let py_roots = args.get_item(py, 0).extract::<PyDict>(py)?;
290 290 let rust_res = self.inner_computephasesmapsets(py, py_roots)?;
291 291 Ok(rust_res)
292 292 }
293 293
294 294 /// reachableroots
295 295 def reachableroots2(&self, *args, **_kw) -> PyResult<PyObject> {
296 296 let rust_res = self.inner_reachableroots2(
297 297 py,
298 298 UncheckedRevision(args.get_item(py, 0).extract(py)?),
299 299 args.get_item(py, 1),
300 300 args.get_item(py, 2),
301 301 args.get_item(py, 3).extract(py)?,
302 302 )?;
303 303 Ok(rust_res)
304 304 }
305 305
306 306 /// get head revisions
307 307 def headrevs(&self) -> PyResult<PyObject> {
308 308 let rust_res = self.inner_headrevs(py)?;
309 309 Ok(rust_res)
310 310 }
311 311
312 312 /// get head nodeids
313 313 def head_node_ids(&self) -> PyResult<PyObject> {
314 314 let rust_res = self.inner_head_node_ids(py)?;
315 315 Ok(rust_res)
316 316 }
317 317
318 318 /// get diff in head revisions
319 319 def headrevsdiff(&self, *args, **_kw) -> PyResult<PyObject> {
320 320 let rust_res = self.inner_headrevsdiff(
321 321 py,
322 322 &args.get_item(py, 0),
323 323 &args.get_item(py, 1))?;
324 324 Ok(rust_res)
325 325 }
326 326
327 327 /// get filtered head revisions
328 328 def headrevsfiltered(&self, *args, **_kw) -> PyResult<PyObject> {
329 329 let rust_res = self.inner_headrevsfiltered(py, &args.get_item(py, 0))?;
330 330 Ok(rust_res)
331 331 }
332 332
333 333 /// True if the object is a snapshot
334 334 def issnapshot(&self, *args, **_kw) -> PyResult<bool> {
335 335 let index = self.index(py).borrow();
336 336 let result = index
337 337 .is_snapshot(UncheckedRevision(args.get_item(py, 0).extract(py)?))
338 338 .map_err(|e| {
339 339 PyErr::new::<cpython::exc::ValueError, _>(py, e.to_string())
340 340 })?;
341 341 Ok(result)
342 342 }
343 343
344 344 /// Gather snapshot data in a cache dict
345 345 def findsnapshots(&self, *args, **_kw) -> PyResult<PyObject> {
346 346 let index = self.index(py).borrow();
347 347 let cache: PyDict = args.get_item(py, 0).extract(py)?;
348 348 // this methods operates by setting new values in the cache,
349 349 // hence we will compare results by letting the C implementation
350 350 // operate over a deepcopy of the cache, and finally compare both
351 351 // caches.
352 352 let c_cache = PyDict::new(py);
353 353 for (k, v) in cache.items(py) {
354 354 c_cache.set_item(py, k, PySet::new(py, v)?)?;
355 355 }
356 356
357 357 let start_rev = UncheckedRevision(args.get_item(py, 1).extract(py)?);
358 358 let end_rev = UncheckedRevision(args.get_item(py, 2).extract(py)?);
359 359 let mut cache_wrapper = PySnapshotsCache{ py, dict: cache };
360 360 index.find_snapshots(
361 361 start_rev,
362 362 end_rev,
363 363 &mut cache_wrapper,
364 364 ).map_err(|_| revlog_error(py))?;
365 365 Ok(py.None())
366 366 }
367 367
368 368 /// determine revisions with deltas to reconstruct fulltext
369 369 def deltachain(&self, *args, **_kw) -> PyResult<PyObject> {
370 370 let index = self.index(py).borrow();
371 371 let rev = args.get_item(py, 0).extract::<BaseRevision>(py)?.into();
372 372 let stop_rev =
373 373 args.get_item(py, 1).extract::<Option<BaseRevision>>(py)?;
374 374 let rev = index.check_revision(rev).ok_or_else(|| {
375 375 nodemap_error(py, NodeMapError::RevisionNotInIndex(rev))
376 376 })?;
377 377 let stop_rev = if let Some(stop_rev) = stop_rev {
378 378 let stop_rev = UncheckedRevision(stop_rev);
379 379 Some(index.check_revision(stop_rev).ok_or_else(|| {
380 380 nodemap_error(py, NodeMapError::RevisionNotInIndex(stop_rev))
381 381 })?)
382 382 } else {None};
383 383 let using_general_delta = args.get_item(py, 2)
384 384 .extract::<Option<u32>>(py)?
385 385 .map(|i| i != 0);
386 386 let (chain, stopped) = index.delta_chain(
387 387 rev, stop_rev, using_general_delta
388 388 ).map_err(|e| {
389 389 PyErr::new::<cpython::exc::ValueError, _>(py, e.to_string())
390 390 })?;
391 391
392 392 let chain: Vec<_> = chain.into_iter().map(|r| r.0).collect();
393 393 Ok(
394 394 PyTuple::new(
395 395 py,
396 396 &[
397 397 chain.into_py_object(py).into_object(),
398 398 stopped.into_py_object(py).into_object()
399 399 ]
400 400 ).into_object()
401 401 )
402 402
403 403 }
404 404
405 405 /// slice planned chunk read to reach a density threshold
406 406 def slicechunktodensity(&self, *args, **_kw) -> PyResult<PyObject> {
407 407 let rust_res = self.inner_slicechunktodensity(
408 408 py,
409 409 args.get_item(py, 0),
410 410 args.get_item(py, 1).extract(py)?,
411 411 args.get_item(py, 2).extract(py)?
412 412 )?;
413 413 Ok(rust_res)
414 414 }
415 415
416 416 // index_sequence_methods and index_mapping_methods.
417 417 //
418 418 // Since we call back through the high level Python API,
419 419 // there's no point making a distinction between index_get
420 420 // and index_getitem.
421 421 // gracinet 2023: this above is no longer true for the pure Rust impl
422 422
423 423 def __len__(&self) -> PyResult<usize> {
424 424 self.len(py)
425 425 }
426 426
427 427 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
428 428 let rust_res = self.inner_getitem(py, key.clone_ref(py))?;
429 429 Ok(rust_res)
430 430 }
431 431
432 432 def __contains__(&self, item: PyObject) -> PyResult<bool> {
433 433 // ObjectProtocol does not seem to provide contains(), so
434 434 // this is an equivalent implementation of the index_contains()
435 435 // defined in revlog.c
436 436 match item.extract::<i32>(py) {
437 437 Ok(rev) => {
438 438 Ok(rev >= -1 && rev < self.len(py)? as BaseRevision)
439 439 }
440 440 Err(_) => {
441 441 let item_bytes: PyBytes = item.extract(py)?;
442 442 let rust_res = self.has_node(py, item_bytes)?;
443 443 Ok(rust_res)
444 444 }
445 445 }
446 446 }
447 447
448 448 def nodemap_data_all(&self) -> PyResult<PyBytes> {
449 449 self.inner_nodemap_data_all(py)
450 450 }
451 451
452 452 def nodemap_data_incremental(&self) -> PyResult<PyObject> {
453 453 self.inner_nodemap_data_incremental(py)
454 454 }
455 455 def update_nodemap_data(
456 456 &self,
457 457 docket: PyObject,
458 458 nm_data: PyObject
459 459 ) -> PyResult<PyObject> {
460 460 self.inner_update_nodemap_data(py, docket, nm_data)
461 461 }
462 462
463 463 @property
464 464 def entry_size(&self) -> PyResult<PyInt> {
465 465 let rust_res: PyInt = INDEX_ENTRY_SIZE.to_py_object(py);
466 466 Ok(rust_res)
467 467 }
468 468
469 469 @property
470 470 def rust_ext_compat(&self) -> PyResult<PyInt> {
471 471 // will be entirely removed when the Rust index yet useful to
472 472 // implement in Rust to detangle things when removing `self.cindex`
473 473 let rust_res: PyInt = 1.to_py_object(py);
474 474 Ok(rust_res)
475 475 }
476 476
477 477 @property
478 478 def is_rust(&self) -> PyResult<PyBool> {
479 479 Ok(false.to_py_object(py))
480 480 }
481 481
482 482 });
483 483
484 484 /// Take a (potentially) mmap'ed buffer, and return the underlying Python
485 485 /// buffer along with the Rust slice into said buffer. We need to keep the
486 486 /// Python buffer around, otherwise we'd get a dangling pointer once the buffer
487 487 /// is freed from Python's side.
488 488 ///
489 489 /// # Safety
490 490 ///
491 491 /// The caller must make sure that the buffer is kept around for at least as
492 492 /// long as the slice.
493 493 #[deny(unsafe_op_in_unsafe_fn)]
494 494 unsafe fn mmap_keeparound(
495 495 py: Python,
496 496 data: PyObject,
497 497 ) -> PyResult<(
498 498 PyBuffer,
499 499 Box<dyn std::ops::Deref<Target = [u8]> + Send + Sync + 'static>,
500 500 )> {
501 501 let buf = PyBuffer::get(py, &data)?;
502 502 let len = buf.item_count();
503 503
504 504 // Build a slice from the mmap'ed buffer data
505 505 let cbuf = buf.buf_ptr();
506 506 let bytes = if std::mem::size_of::<u8>() == buf.item_size()
507 507 && buf.is_c_contiguous()
508 508 && u8::is_compatible_format(buf.format())
509 509 {
510 510 unsafe { std::slice::from_raw_parts(cbuf as *const u8, len) }
511 511 } else {
512 512 return Err(PyErr::new::<ValueError, _>(
513 513 py,
514 514 "Nodemap data buffer has an invalid memory representation"
515 515 .to_string(),
516 516 ));
517 517 };
518 518
519 519 Ok((buf, Box::new(bytes)))
520 520 }
521 521
522 522 fn py_tuple_to_revision_data_params(
523 523 py: Python,
524 524 tuple: PyTuple,
525 525 ) -> PyResult<RevisionDataParams> {
526 526 if tuple.len(py) < 8 {
527 527 // this is better than the panic promised by tup.get_item()
528 528 return Err(PyErr::new::<IndexError, _>(
529 529 py,
530 530 "tuple index out of range",
531 531 ));
532 532 }
533 533 let offset_or_flags: u64 = tuple.get_item(py, 0).extract(py)?;
534 534 let node_id = tuple
535 535 .get_item(py, 7)
536 536 .extract::<PyBytes>(py)?
537 537 .data(py)
538 538 .try_into()
539 539 .unwrap();
540 540 let flags = (offset_or_flags & 0xFFFF) as u16;
541 541 let data_offset = offset_or_flags >> 16;
542 542 Ok(RevisionDataParams {
543 543 flags,
544 544 data_offset,
545 545 data_compressed_length: tuple.get_item(py, 1).extract(py)?,
546 546 data_uncompressed_length: tuple.get_item(py, 2).extract(py)?,
547 547 data_delta_base: tuple.get_item(py, 3).extract(py)?,
548 548 link_rev: tuple.get_item(py, 4).extract(py)?,
549 549 parent_rev_1: tuple.get_item(py, 5).extract(py)?,
550 550 parent_rev_2: tuple.get_item(py, 6).extract(py)?,
551 551 node_id,
552 552 ..Default::default()
553 553 })
554 554 }
555 555 fn revision_data_params_to_py_tuple(
556 556 py: Python,
557 557 params: RevisionDataParams,
558 558 ) -> PyTuple {
559 559 PyTuple::new(
560 560 py,
561 561 &[
562 562 params.data_offset.into_py_object(py).into_object(),
563 563 params
564 564 .data_compressed_length
565 565 .into_py_object(py)
566 566 .into_object(),
567 567 params
568 568 .data_uncompressed_length
569 569 .into_py_object(py)
570 570 .into_object(),
571 571 params.data_delta_base.into_py_object(py).into_object(),
572 572 params.link_rev.into_py_object(py).into_object(),
573 573 params.parent_rev_1.into_py_object(py).into_object(),
574 574 params.parent_rev_2.into_py_object(py).into_object(),
575 575 PyBytes::new(py, &params.node_id)
576 576 .into_py_object(py)
577 577 .into_object(),
578 578 params._sidedata_offset.into_py_object(py).into_object(),
579 579 params
580 580 ._sidedata_compressed_length
581 581 .into_py_object(py)
582 582 .into_object(),
583 583 params
584 584 .data_compression_mode
585 585 .into_py_object(py)
586 586 .into_object(),
587 587 params
588 588 ._sidedata_compression_mode
589 589 .into_py_object(py)
590 590 .into_object(),
591 591 params._rank.into_py_object(py).into_object(),
592 592 ],
593 593 )
594 594 }
595 595
596 596 struct PySnapshotsCache<'p> {
597 597 py: Python<'p>,
598 598 dict: PyDict,
599 599 }
600 600
601 601 impl<'p> SnapshotsCache for PySnapshotsCache<'p> {
602 602 fn insert_for(
603 603 &mut self,
604 604 rev: BaseRevision,
605 605 value: BaseRevision,
606 606 ) -> Result<(), RevlogError> {
607 607 let pyvalue = value.into_py_object(self.py).into_object();
608 608 match self.dict.get_item(self.py, rev) {
609 609 Some(obj) => obj
610 610 .extract::<PySet>(self.py)
611 611 .and_then(|set| set.add(self.py, pyvalue)),
612 612 None => PySet::new(self.py, vec![pyvalue])
613 613 .and_then(|set| self.dict.set_item(self.py, rev, set)),
614 614 }
615 615 .map_err(|_| {
616 616 RevlogError::Other(HgError::unsupported(
617 617 "Error in Python caches handling",
618 618 ))
619 619 })
620 620 }
621 621 }
622 622
623 623 impl Index {
624 624 fn new(py: Python, data: PyObject, header: u32) -> PyResult<Self> {
625 625 // Safety: we keep the buffer around inside the class as `index_mmap`
626 626 let (buf, bytes) = unsafe { mmap_keeparound(py, data)? };
627 627
628 628 Self::create_instance(
629 629 py,
630 630 hg::index::Index::new(
631 631 bytes,
632 632 IndexHeader::parse(&header.to_be_bytes())
633 633 .expect("default header is broken")
634 634 .unwrap(),
635 635 )
636 636 .map_err(|e| {
637 637 revlog_error_with_msg(py, e.to_string().as_bytes())
638 638 })?,
639 639 RefCell::new(None),
640 640 RefCell::new(None),
641 641 RefCell::new(None),
642 642 RefCell::new(Some(buf)),
643 643 RefCell::new(None),
644 644 RefCell::new(None),
645 645 )
646 646 }
647 647
648 648 fn len(&self, py: Python) -> PyResult<usize> {
649 649 let rust_index_len = self.index(py).borrow().len();
650 650 Ok(rust_index_len)
651 651 }
652 652
653 653 /// This is scaffolding at this point, but it could also become
654 654 /// a way to start a persistent nodemap or perform a
655 655 /// vacuum / repack operation
656 656 fn fill_nodemap(
657 657 &self,
658 658 py: Python,
659 659 nt: &mut CoreNodeTree,
660 660 ) -> PyResult<PyObject> {
661 661 let index = self.index(py).borrow();
662 662 for r in 0..self.len(py)? {
663 663 let rev = Revision(r as BaseRevision);
664 664 // in this case node() won't ever return None
665 665 nt.insert(&*index, index.node(rev).unwrap(), rev)
666 666 .map_err(|e| nodemap_error(py, e))?
667 667 }
668 668 Ok(py.None())
669 669 }
670 670
671 671 fn get_nodetree<'a>(
672 672 &'a self,
673 673 py: Python<'a>,
674 674 ) -> PyResult<&'a RefCell<Option<CoreNodeTree>>> {
675 675 if self.nt(py).borrow().is_none() {
676 676 let readonly = Box::<Vec<_>>::default();
677 677 let mut nt = CoreNodeTree::load_bytes(readonly, 0);
678 678 self.fill_nodemap(py, &mut nt)?;
679 679 self.nt(py).borrow_mut().replace(nt);
680 680 }
681 681 Ok(self.nt(py))
682 682 }
683 683
684 684 /// Returns the full nodemap bytes to be written as-is to disk
685 685 fn inner_nodemap_data_all(&self, py: Python) -> PyResult<PyBytes> {
686 686 let nodemap = self.get_nodetree(py)?.borrow_mut().take().unwrap();
687 687 let (readonly, bytes) = nodemap.into_readonly_and_added_bytes();
688 688
689 689 // If there's anything readonly, we need to build the data again from
690 690 // scratch
691 691 let bytes = if readonly.len() > 0 {
692 692 let mut nt = CoreNodeTree::load_bytes(Box::<Vec<_>>::default(), 0);
693 693 self.fill_nodemap(py, &mut nt)?;
694 694
695 695 let (readonly, bytes) = nt.into_readonly_and_added_bytes();
696 696 assert_eq!(readonly.len(), 0);
697 697
698 698 bytes
699 699 } else {
700 700 bytes
701 701 };
702 702
703 703 let bytes = PyBytes::new(py, &bytes);
704 704 Ok(bytes)
705 705 }
706 706
707 707 /// Returns the last saved docket along with the size of any changed data
708 708 /// (in number of blocks), and said data as bytes.
709 709 fn inner_nodemap_data_incremental(
710 710 &self,
711 711 py: Python,
712 712 ) -> PyResult<PyObject> {
713 713 let docket = self.docket(py).borrow();
714 714 let docket = match docket.as_ref() {
715 715 Some(d) => d,
716 716 None => return Ok(py.None()),
717 717 };
718 718
719 719 let node_tree = self.get_nodetree(py)?.borrow_mut().take().unwrap();
720 720 let masked_blocks = node_tree.masked_readonly_blocks();
721 721 let (_, data) = node_tree.into_readonly_and_added_bytes();
722 722 let changed = masked_blocks * std::mem::size_of::<Block>();
723 723
724 724 Ok((docket, changed, PyBytes::new(py, &data))
725 725 .to_py_object(py)
726 726 .into_object())
727 727 }
728 728
729 729 /// Update the nodemap from the new (mmaped) data.
730 730 /// The docket is kept as a reference for later incremental calls.
731 731 fn inner_update_nodemap_data(
732 732 &self,
733 733 py: Python,
734 734 docket: PyObject,
735 735 nm_data: PyObject,
736 736 ) -> PyResult<PyObject> {
737 737 // Safety: we keep the buffer around inside the class as `nodemap_mmap`
738 738 let (buf, bytes) = unsafe { mmap_keeparound(py, nm_data)? };
739 739 let len = buf.item_count();
740 740 self.nodemap_mmap(py).borrow_mut().replace(buf);
741 741
742 742 let mut nt = CoreNodeTree::load_bytes(bytes, len);
743 743
744 744 let data_tip = docket
745 745 .getattr(py, "tip_rev")?
746 746 .extract::<BaseRevision>(py)?
747 747 .into();
748 748 self.docket(py).borrow_mut().replace(docket.clone_ref(py));
749 749 let idx = self.index(py).borrow();
750 750 let data_tip = idx.check_revision(data_tip).ok_or_else(|| {
751 751 nodemap_error(py, NodeMapError::RevisionNotInIndex(data_tip))
752 752 })?;
753 753 let current_tip = idx.len();
754 754
755 755 for r in (data_tip.0 + 1)..current_tip as BaseRevision {
756 756 let rev = Revision(r);
757 757 // in this case node() won't ever return None
758 758 nt.insert(&*idx, idx.node(rev).unwrap(), rev)
759 759 .map_err(|e| nodemap_error(py, e))?
760 760 }
761 761
762 762 *self.nt(py).borrow_mut() = Some(nt);
763 763
764 764 Ok(py.None())
765 765 }
766 766
767 767 fn inner_getitem(&self, py: Python, key: PyObject) -> PyResult<PyObject> {
768 768 let idx = self.index(py).borrow();
769 769 Ok(match key.extract::<BaseRevision>(py) {
770 770 Ok(key_as_int) => {
771 771 let entry_params = if key_as_int == NULL_REVISION.0 {
772 772 RevisionDataParams::default()
773 773 } else {
774 774 let rev = UncheckedRevision(key_as_int);
775 775 match idx.entry_as_params(rev) {
776 776 Some(e) => e,
777 777 None => {
778 778 return Err(PyErr::new::<IndexError, _>(
779 779 py,
780 780 "revlog index out of range",
781 781 ));
782 782 }
783 783 }
784 784 };
785 785 revision_data_params_to_py_tuple(py, entry_params)
786 786 .into_object()
787 787 }
788 788 _ => self.get_rev(py, key.extract::<PyBytes>(py)?)?.map_or_else(
789 789 || py.None(),
790 790 |py_rev| py_rev.into_py_object(py).into_object(),
791 791 ),
792 792 })
793 793 }
794 794
795 795 fn inner_head_node_ids(&self, py: Python) -> PyResult<PyObject> {
796 796 let index = &*self.index(py).borrow();
797 797
798 798 // We don't use the shortcut here, as it's actually slower to loop
799 799 // through the cached `PyList` than to re-do the whole computation for
800 800 // large lists, which are the performance sensitive ones anyway.
801 801 let head_revs = index.head_revs().map_err(|e| graph_error(py, e))?;
802 802 let res: Vec<_> = head_revs
803 803 .iter()
804 804 .map(|r| {
805 805 PyBytes::new(
806 806 py,
807 807 index
808 808 .node(*r)
809 809 .expect("rev should have been in the index")
810 810 .as_bytes(),
811 811 )
812 812 .into_object()
813 813 })
814 814 .collect();
815 815
816 816 self.cache_new_heads_py_list(&head_revs, py);
817 817 self.cache_new_heads_node_ids_py_list(&head_revs, py);
818 818
819 819 Ok(PyList::new(py, &res).into_object())
820 820 }
821 821
822 822 fn inner_headrevs(&self, py: Python) -> PyResult<PyObject> {
823 823 let index = &*self.index(py).borrow();
824 824 if let Some(new_heads) =
825 825 index.head_revs_shortcut().map_err(|e| graph_error(py, e))?
826 826 {
827 827 self.cache_new_heads_py_list(&new_heads, py);
828 828 }
829 829
830 830 Ok(self
831 831 .head_revs_py_list(py)
832 832 .borrow()
833 833 .as_ref()
834 834 .expect("head revs should be cached")
835 835 .clone_ref(py)
836 836 .into_object())
837 837 }
838 838
839 839 fn check_revision(
840 840 index: &hg::index::Index,
841 841 rev: UncheckedRevision,
842 842 py: Python,
843 843 ) -> PyResult<Revision> {
844 844 index
845 845 .check_revision(rev)
846 846 .ok_or_else(|| rev_not_in_index(py, rev))
847 847 }
848 848
849 849 fn inner_headrevsdiff(
850 850 &self,
851 851 py: Python,
852 852 begin: &PyObject,
853 853 end: &PyObject,
854 854 ) -> PyResult<PyObject> {
855 855 let begin = begin.extract::<BaseRevision>(py)?;
856 856 let end = end.extract::<BaseRevision>(py)?;
857 857 let index = &*self.index(py).borrow();
858 858 let begin =
859 859 Self::check_revision(index, UncheckedRevision(begin - 1), py)?;
860 860 let end = Self::check_revision(index, UncheckedRevision(end - 1), py)?;
861 861 let (removed, added) = index
862 862 .head_revs_diff(begin, end)
863 863 .map_err(|e| graph_error(py, e))?;
864 864 let removed: Vec<_> =
865 865 removed.into_iter().map(PyRevision::from).collect();
866 866 let added: Vec<_> = added.into_iter().map(PyRevision::from).collect();
867 867 let res = (removed, added).to_py_object(py).into_object();
868 868 Ok(res)
869 869 }
870 870
871 871 fn inner_headrevsfiltered(
872 872 &self,
873 873 py: Python,
874 874 filtered_revs: &PyObject,
875 875 ) -> PyResult<PyObject> {
876 let index = &mut *self.index(py).borrow_mut();
876 let index = &*self.index(py).borrow();
877 877 let filtered_revs = rev_pyiter_collect(py, filtered_revs, index)?;
878 878
879 879 if let Some(new_heads) = index
880 880 .head_revs_filtered(&filtered_revs, true)
881 881 .map_err(|e| graph_error(py, e))?
882 882 {
883 883 self.cache_new_heads_py_list(&new_heads, py);
884 884 }
885 885
886 886 Ok(self
887 887 .head_revs_py_list(py)
888 888 .borrow()
889 889 .as_ref()
890 890 .expect("head revs should be cached")
891 891 .clone_ref(py)
892 892 .into_object())
893 893 }
894 894
895 895 fn cache_new_heads_node_ids_py_list(
896 896 &self,
897 897 new_heads: &[Revision],
898 898 py: Python<'_>,
899 899 ) -> PyList {
900 900 let index = self.index(py).borrow();
901 901 let as_vec: Vec<PyObject> = new_heads
902 902 .iter()
903 903 .map(|r| {
904 904 PyBytes::new(
905 905 py,
906 906 index
907 907 .node(*r)
908 908 .expect("rev should have been in the index")
909 909 .as_bytes(),
910 910 )
911 911 .into_object()
912 912 })
913 913 .collect();
914 914 let new_heads_py_list = PyList::new(py, &as_vec);
915 915 *self.head_node_ids_py_list(py).borrow_mut() =
916 916 Some(new_heads_py_list.clone_ref(py));
917 917 new_heads_py_list
918 918 }
919 919
920 920 fn cache_new_heads_py_list(
921 921 &self,
922 922 new_heads: &[Revision],
923 923 py: Python<'_>,
924 924 ) -> PyList {
925 925 let as_vec: Vec<PyObject> = new_heads
926 926 .iter()
927 927 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
928 928 .collect();
929 929 let new_heads_py_list = PyList::new(py, &as_vec);
930 930 *self.head_revs_py_list(py).borrow_mut() =
931 931 Some(new_heads_py_list.clone_ref(py));
932 932 new_heads_py_list
933 933 }
934 934
935 935 fn inner_ancestors(
936 936 &self,
937 937 py: Python,
938 938 py_revs: &PyTuple,
939 939 ) -> PyResult<PyObject> {
940 940 let index = &*self.index(py).borrow();
941 941 let revs: Vec<_> = rev_pyiter_collect(py, py_revs.as_object(), index)?;
942 942 let as_vec: Vec<_> = index
943 943 .ancestors(&revs)
944 944 .map_err(|e| graph_error(py, e))?
945 945 .iter()
946 946 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
947 947 .collect();
948 948 Ok(PyList::new(py, &as_vec).into_object())
949 949 }
950 950
951 951 fn inner_commonancestorsheads(
952 952 &self,
953 953 py: Python,
954 954 py_revs: &PyTuple,
955 955 ) -> PyResult<PyObject> {
956 956 let index = &*self.index(py).borrow();
957 957 let revs: Vec<_> = rev_pyiter_collect(py, py_revs.as_object(), index)?;
958 958 let as_vec: Vec<_> = index
959 959 .common_ancestor_heads(&revs)
960 960 .map_err(|e| graph_error(py, e))?
961 961 .iter()
962 962 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
963 963 .collect();
964 964 Ok(PyList::new(py, &as_vec).into_object())
965 965 }
966 966
967 967 fn inner_computephasesmapsets(
968 968 &self,
969 969 py: Python,
970 970 py_roots: PyDict,
971 971 ) -> PyResult<PyObject> {
972 972 let index = &*self.index(py).borrow();
973 973 let roots: Result<HashMap<Phase, Vec<Revision>>, PyErr> = py_roots
974 974 .items_list(py)
975 975 .iter(py)
976 976 .map(|r| {
977 977 let phase = r.get_item(py, 0)?;
978 978 let revs: Vec<_> =
979 979 rev_pyiter_collect(py, &r.get_item(py, 1)?, index)?;
980 980 let phase = Phase::try_from(phase.extract::<usize>(py)?)
981 981 .map_err(|_| revlog_error(py));
982 982 Ok((phase?, revs))
983 983 })
984 984 .collect();
985 985 let (len, phase_maps) = index
986 986 .compute_phases_map_sets(roots?)
987 987 .map_err(|e| graph_error(py, e))?;
988 988
989 989 // Ugly hack, but temporary
990 990 const IDX_TO_PHASE_NUM: [usize; 4] = [1, 2, 32, 96];
991 991 let py_phase_maps = PyDict::new(py);
992 992 for (idx, roots) in phase_maps.into_iter().enumerate() {
993 993 let phase_num = IDX_TO_PHASE_NUM[idx].into_py_object(py);
994 994 // This is a bit faster than collecting into a `Vec` and passing
995 995 // it to `PySet::new`.
996 996 let set = PySet::empty(py)?;
997 997 for rev in roots {
998 998 set.add(py, PyRevision::from(rev).into_py_object(py))?;
999 999 }
1000 1000 py_phase_maps.set_item(py, phase_num, set)?;
1001 1001 }
1002 1002 Ok(PyTuple::new(
1003 1003 py,
1004 1004 &[
1005 1005 len.into_py_object(py).into_object(),
1006 1006 py_phase_maps.into_object(),
1007 1007 ],
1008 1008 )
1009 1009 .into_object())
1010 1010 }
1011 1011
1012 1012 fn inner_slicechunktodensity(
1013 1013 &self,
1014 1014 py: Python,
1015 1015 revs: PyObject,
1016 1016 target_density: f64,
1017 1017 min_gap_size: usize,
1018 1018 ) -> PyResult<PyObject> {
1019 1019 let index = &*self.index(py).borrow();
1020 1020 let revs: Vec<_> = rev_pyiter_collect(py, &revs, index)?;
1021 1021 let as_nested_vec =
1022 1022 index.slice_chunk_to_density(&revs, target_density, min_gap_size);
1023 1023 let mut res = Vec::with_capacity(as_nested_vec.len());
1024 1024 let mut py_chunk = Vec::new();
1025 1025 for chunk in as_nested_vec {
1026 1026 py_chunk.clear();
1027 1027 py_chunk.reserve_exact(chunk.len());
1028 1028 for rev in chunk {
1029 1029 py_chunk.push(
1030 1030 PyRevision::from(rev).into_py_object(py).into_object(),
1031 1031 );
1032 1032 }
1033 1033 res.push(PyList::new(py, &py_chunk).into_object());
1034 1034 }
1035 1035 // This is just to do the same as C, not sure why it does this
1036 1036 if res.len() == 1 {
1037 1037 Ok(PyTuple::new(py, &res).into_object())
1038 1038 } else {
1039 1039 Ok(PyList::new(py, &res).into_object())
1040 1040 }
1041 1041 }
1042 1042
1043 1043 fn inner_reachableroots2(
1044 1044 &self,
1045 1045 py: Python,
1046 1046 min_root: UncheckedRevision,
1047 1047 heads: PyObject,
1048 1048 roots: PyObject,
1049 1049 include_path: bool,
1050 1050 ) -> PyResult<PyObject> {
1051 1051 let index = &*self.index(py).borrow();
1052 1052 let heads = rev_pyiter_collect_or_else(py, &heads, index, |_rev| {
1053 1053 PyErr::new::<IndexError, _>(py, "head out of range")
1054 1054 })?;
1055 1055 let roots: Result<_, _> = roots
1056 1056 .iter(py)?
1057 1057 .map(|r| {
1058 1058 r.and_then(|o| match o.extract::<PyRevision>(py) {
1059 1059 Ok(r) => Ok(UncheckedRevision(r.0)),
1060 1060 Err(e) => Err(e),
1061 1061 })
1062 1062 })
1063 1063 .collect();
1064 1064 let as_set = index
1065 1065 .reachable_roots(min_root, heads, roots?, include_path)
1066 1066 .map_err(|e| graph_error(py, e))?;
1067 1067 let as_vec: Vec<PyObject> = as_set
1068 1068 .iter()
1069 1069 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
1070 1070 .collect();
1071 1071 Ok(PyList::new(py, &as_vec).into_object())
1072 1072 }
1073 1073 }
1074 1074
1075 1075 py_class!(pub class NodeTree |py| {
1076 1076 data nt: RefCell<CoreNodeTree>;
1077 1077 data index: RefCell<UnsafePyLeaked<PySharedIndex>>;
1078 1078
1079 1079 def __new__(_cls, index: PyObject) -> PyResult<NodeTree> {
1080 1080 let index = py_rust_index_to_graph(py, index)?;
1081 1081 let nt = CoreNodeTree::default(); // in-RAM, fully mutable
1082 1082 Self::create_instance(py, RefCell::new(nt), RefCell::new(index))
1083 1083 }
1084 1084
1085 1085 /// Tell whether the NodeTree is still valid
1086 1086 ///
1087 1087 /// In case of mutation of the index, the given results are not
1088 1088 /// guaranteed to be correct, and in fact, the methods borrowing
1089 1089 /// the inner index would fail because of `PySharedRef` poisoning
1090 1090 /// (generation-based guard), same as iterating on a `dict` that has
1091 1091 /// been meanwhile mutated.
1092 1092 def is_invalidated(&self) -> PyResult<bool> {
1093 1093 let leaked = self.index(py).borrow();
1094 1094 // Safety: we don't leak the "faked" reference out of `UnsafePyLeaked`
1095 1095 let result = unsafe { leaked.try_borrow(py) };
1096 1096 // two cases for result to be an error:
1097 1097 // - the index has previously been mutably borrowed
1098 1098 // - there is currently a mutable borrow
1099 1099 // in both cases this amounts for previous results related to
1100 1100 // the index to still be valid.
1101 1101 Ok(result.is_err())
1102 1102 }
1103 1103
1104 1104 def insert(&self, rev: PyRevision) -> PyResult<PyObject> {
1105 1105 let leaked = self.index(py).borrow();
1106 1106 // Safety: we don't leak the "faked" reference out of `UnsafePyLeaked`
1107 1107 let index = &*unsafe { leaked.try_borrow(py)? };
1108 1108
1109 1109 let rev = UncheckedRevision(rev.0);
1110 1110 let rev = index
1111 1111 .check_revision(rev)
1112 1112 .ok_or_else(|| rev_not_in_index(py, rev))?;
1113 1113 if rev == NULL_REVISION {
1114 1114 return Err(rev_not_in_index(py, rev.into()))
1115 1115 }
1116 1116
1117 1117 let entry = index.inner.get_entry(rev).unwrap();
1118 1118 let mut nt = self.nt(py).borrow_mut();
1119 1119 nt.insert(index, entry.hash(), rev).map_err(|e| nodemap_error(py, e))?;
1120 1120
1121 1121 Ok(py.None())
1122 1122 }
1123 1123
1124 1124 /// Lookup by node hex prefix in the NodeTree, returning revision number.
1125 1125 ///
1126 1126 /// This is not part of the classical NodeTree API, but is good enough
1127 1127 /// for unit testing, as in `test-rust-revlog.py`.
1128 1128 def prefix_rev_lookup(
1129 1129 &self,
1130 1130 node_prefix: PyBytes
1131 1131 ) -> PyResult<Option<PyRevision>> {
1132 1132 let prefix = NodePrefix::from_hex(node_prefix.data(py))
1133 1133 .map_err(|_| PyErr::new::<ValueError, _>(
1134 1134 py,
1135 1135 format!("Invalid node or prefix {:?}",
1136 1136 node_prefix.as_object()))
1137 1137 )?;
1138 1138
1139 1139 let nt = self.nt(py).borrow();
1140 1140 let leaked = self.index(py).borrow();
1141 1141 // Safety: we don't leak the "faked" reference out of `UnsafePyLeaked`
1142 1142 let index = &*unsafe { leaked.try_borrow(py)? };
1143 1143
1144 1144 Ok(nt.find_bin(index, prefix)
1145 1145 .map_err(|e| nodemap_error(py, e))?
1146 1146 .map(|r| r.into())
1147 1147 )
1148 1148 }
1149 1149
1150 1150 def shortest(&self, node: PyBytes) -> PyResult<usize> {
1151 1151 let nt = self.nt(py).borrow();
1152 1152 let leaked = self.index(py).borrow();
1153 1153 // Safety: we don't leak the "faked" reference out of `UnsafePyLeaked`
1154 1154 let idx = &*unsafe { leaked.try_borrow(py)? };
1155 1155 match nt.unique_prefix_len_node(idx, &node_from_py_bytes(py, &node)?)
1156 1156 {
1157 1157 Ok(Some(l)) => Ok(l),
1158 1158 Ok(None) => Err(revlog_error(py)),
1159 1159 Err(e) => Err(nodemap_error(py, e)),
1160 1160 }
1161 1161 }
1162 1162 });
1163 1163
1164 1164 fn revlog_error(py: Python) -> PyErr {
1165 1165 match py
1166 1166 .import("mercurial.error")
1167 1167 .and_then(|m| m.get(py, "RevlogError"))
1168 1168 {
1169 1169 Err(e) => e,
1170 1170 Ok(cls) => PyErr::from_instance(
1171 1171 py,
1172 1172 cls.call(py, (py.None(),), None).ok().into_py_object(py),
1173 1173 ),
1174 1174 }
1175 1175 }
1176 1176
1177 1177 fn revlog_error_with_msg(py: Python, msg: &[u8]) -> PyErr {
1178 1178 match py
1179 1179 .import("mercurial.error")
1180 1180 .and_then(|m| m.get(py, "RevlogError"))
1181 1181 {
1182 1182 Err(e) => e,
1183 1183 Ok(cls) => PyErr::from_instance(
1184 1184 py,
1185 1185 cls.call(py, (PyBytes::new(py, msg),), None)
1186 1186 .ok()
1187 1187 .into_py_object(py),
1188 1188 ),
1189 1189 }
1190 1190 }
1191 1191
1192 1192 fn graph_error(py: Python, _err: hg::GraphError) -> PyErr {
1193 1193 // ParentOutOfRange is currently the only alternative
1194 1194 // in `hg::GraphError`. The C index always raises this simple ValueError.
1195 1195 PyErr::new::<ValueError, _>(py, "parent out of range")
1196 1196 }
1197 1197
1198 1198 fn nodemap_rev_not_in_index(py: Python, rev: UncheckedRevision) -> PyErr {
1199 1199 PyErr::new::<ValueError, _>(
1200 1200 py,
1201 1201 format!(
1202 1202 "Inconsistency: Revision {} found in nodemap \
1203 1203 is not in revlog index",
1204 1204 rev
1205 1205 ),
1206 1206 )
1207 1207 }
1208 1208
1209 1209 fn rev_not_in_index(py: Python, rev: UncheckedRevision) -> PyErr {
1210 1210 PyErr::new::<ValueError, _>(
1211 1211 py,
1212 1212 format!("revlog index out of range: {}", rev),
1213 1213 )
1214 1214 }
1215 1215
1216 1216 /// Standard treatment of NodeMapError
1217 1217 fn nodemap_error(py: Python, err: NodeMapError) -> PyErr {
1218 1218 match err {
1219 1219 NodeMapError::MultipleResults => revlog_error(py),
1220 1220 NodeMapError::RevisionNotInIndex(r) => nodemap_rev_not_in_index(py, r),
1221 1221 }
1222 1222 }
1223 1223
1224 1224 /// Create the module, with __package__ given from parent
1225 1225 pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> {
1226 1226 let dotted_name = &format!("{}.revlog", package);
1227 1227 let m = PyModule::new(py, dotted_name)?;
1228 1228 m.add(py, "__package__", package)?;
1229 1229 m.add(py, "__doc__", "RevLog - Rust implementations")?;
1230 1230
1231 1231 m.add_class::<Index>(py)?;
1232 1232 m.add_class::<NodeTree>(py)?;
1233 1233
1234 1234 let sys = PyModule::import(py, "sys")?;
1235 1235 let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?;
1236 1236 sys_modules.set_item(py, dotted_name, &m)?;
1237 1237
1238 1238 Ok(m)
1239 1239 }
General Comments 0
You need to be logged in to leave comments. Login now