##// END OF EJS Templates
rust-cpython: also accept the `filteredrevs` argument in index.headrevs...
marmoute -
r52859:fb4d49c5 default
parent child Browse files
Show More
@@ -1,1239 +1,1248
1 1 // revlog.rs
2 2 //
3 3 // Copyright 2019-2020 Georges Racinet <georges.racinet@octobus.net>
4 4 //
5 5 // This software may be used and distributed according to the terms of the
6 6 // GNU General Public License version 2 or any later version.
7 7
8 8 use crate::{
9 9 conversion::{rev_pyiter_collect, rev_pyiter_collect_or_else},
10 10 utils::{node_from_py_bytes, node_from_py_object},
11 11 PyRevision,
12 12 };
13 13 use cpython::{
14 14 buffer::{Element, PyBuffer},
15 15 exc::{IndexError, ValueError},
16 16 ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyInt, PyList,
17 17 PyModule, PyObject, PyResult, PySet, PyString, PyTuple, Python,
18 18 PythonObject, ToPyObject, UnsafePyLeaked,
19 19 };
20 20 use hg::{
21 21 errors::HgError,
22 22 index::{
23 23 IndexHeader, Phase, RevisionDataParams, SnapshotsCache,
24 24 INDEX_ENTRY_SIZE,
25 25 },
26 26 nodemap::{Block, NodeMapError, NodeTree as CoreNodeTree},
27 27 revlog::{nodemap::NodeMap, Graph, NodePrefix, RevlogError, RevlogIndex},
28 28 BaseRevision, Node, Revision, UncheckedRevision, NULL_REVISION,
29 29 };
30 30 use std::{cell::RefCell, collections::HashMap};
31 31 use vcsgraph::graph::Graph as VCSGraph;
32 32
33 33 pub struct PySharedIndex {
34 34 /// The underlying hg-core index
35 35 pub(crate) inner: &'static hg::index::Index,
36 36 }
37 37
38 38 /// Return a Struct implementing the Graph trait
39 39 pub(crate) fn py_rust_index_to_graph(
40 40 py: Python,
41 41 index: PyObject,
42 42 ) -> PyResult<UnsafePyLeaked<PySharedIndex>> {
43 43 let midx = index.extract::<Index>(py)?;
44 44 let leaked = midx.index(py).leak_immutable();
45 45 // Safety: we don't leak the "faked" reference out of the `UnsafePyLeaked`
46 46 Ok(unsafe { leaked.map(py, |idx| PySharedIndex { inner: idx }) })
47 47 }
48 48
49 49 impl Clone for PySharedIndex {
50 50 fn clone(&self) -> Self {
51 51 Self { inner: self.inner }
52 52 }
53 53 }
54 54
55 55 impl Graph for PySharedIndex {
56 56 #[inline(always)]
57 57 fn parents(&self, rev: Revision) -> Result<[Revision; 2], hg::GraphError> {
58 58 self.inner.parents(rev)
59 59 }
60 60 }
61 61
62 62 impl VCSGraph for PySharedIndex {
63 63 #[inline(always)]
64 64 fn parents(
65 65 &self,
66 66 rev: BaseRevision,
67 67 ) -> Result<vcsgraph::graph::Parents, vcsgraph::graph::GraphReadError>
68 68 {
69 69 // FIXME This trait should be reworked to decide between Revision
70 70 // and UncheckedRevision, get better errors names, etc.
71 71 match Graph::parents(self, Revision(rev)) {
72 72 Ok(parents) => {
73 73 Ok(vcsgraph::graph::Parents([parents[0].0, parents[1].0]))
74 74 }
75 75 Err(hg::GraphError::ParentOutOfRange(rev)) => {
76 76 Err(vcsgraph::graph::GraphReadError::KeyedInvalidKey(rev.0))
77 77 }
78 78 }
79 79 }
80 80 }
81 81
82 82 impl RevlogIndex for PySharedIndex {
83 83 fn len(&self) -> usize {
84 84 self.inner.len()
85 85 }
86 86 fn node(&self, rev: Revision) -> Option<&Node> {
87 87 self.inner.node(rev)
88 88 }
89 89 }
90 90
91 91 py_class!(pub class Index |py| {
92 92 @shared data index: hg::index::Index;
93 93 data nt: RefCell<Option<CoreNodeTree>>;
94 94 data docket: RefCell<Option<PyObject>>;
95 95 // Holds a reference to the mmap'ed persistent nodemap data
96 96 data nodemap_mmap: RefCell<Option<PyBuffer>>;
97 97 // Holds a reference to the mmap'ed persistent index data
98 98 data index_mmap: RefCell<Option<PyBuffer>>;
99 99 data head_revs_py_list: RefCell<Option<PyList>>;
100 100 data head_node_ids_py_list: RefCell<Option<PyList>>;
101 101
102 102 def __new__(
103 103 _cls,
104 104 data: PyObject,
105 105 default_header: u32,
106 106 ) -> PyResult<Self> {
107 107 Self::new(py, data, default_header)
108 108 }
109 109
110 110 /// Compatibility layer used for Python consumers needing access to the C index
111 111 ///
112 112 /// Only use case so far is `scmutil.shortesthexnodeidprefix`,
113 113 /// that may need to build a custom `nodetree`, based on a specified revset.
114 114 /// With a Rust implementation of the nodemap, we will be able to get rid of
115 115 /// this, by exposing our own standalone nodemap class,
116 116 /// ready to accept `Index`.
117 117 /* def get_cindex(&self) -> PyResult<PyObject> {
118 118 Ok(self.cindex(py).borrow().inner().clone_ref(py))
119 119 }
120 120 */
121 121 // Index API involving nodemap, as defined in mercurial/pure/parsers.py
122 122
123 123 /// Return Revision if found, raises a bare `error.RevlogError`
124 124 /// in case of ambiguity, same as C version does
125 125 def get_rev(&self, node: PyBytes) -> PyResult<Option<PyRevision>> {
126 126 let opt = self.get_nodetree(py)?.borrow();
127 127 let nt = opt.as_ref().unwrap();
128 128 let ridx = &*self.index(py).borrow();
129 129 let node = node_from_py_bytes(py, &node)?;
130 130 let rust_rev =
131 131 nt.find_bin(ridx, node.into()).map_err(|e| nodemap_error(py, e))?;
132 132 Ok(rust_rev.map(Into::into))
133 133
134 134 }
135 135
136 136 /// same as `get_rev()` but raises a bare `error.RevlogError` if node
137 137 /// is not found.
138 138 ///
139 139 /// No need to repeat `node` in the exception, `mercurial/revlog.py`
140 140 /// will catch and rewrap with it
141 141 def rev(&self, node: PyBytes) -> PyResult<PyRevision> {
142 142 self.get_rev(py, node)?.ok_or_else(|| revlog_error(py))
143 143 }
144 144
145 145 /// return True if the node exist in the index
146 146 def has_node(&self, node: PyBytes) -> PyResult<bool> {
147 147 // TODO OPTIM we could avoid a needless conversion here,
148 148 // to do when scaffolding for pure Rust switch is removed,
149 149 // as `get_rev()` currently does the necessary assertions
150 150 self.get_rev(py, node).map(|opt| opt.is_some())
151 151 }
152 152
153 153 /// find length of shortest hex nodeid of a binary ID
154 154 def shortest(&self, node: PyBytes) -> PyResult<usize> {
155 155 let opt = self.get_nodetree(py)?.borrow();
156 156 let nt = opt.as_ref().unwrap();
157 157 let idx = &*self.index(py).borrow();
158 158 match nt.unique_prefix_len_node(idx, &node_from_py_bytes(py, &node)?)
159 159 {
160 160 Ok(Some(l)) => Ok(l),
161 161 Ok(None) => Err(revlog_error(py)),
162 162 Err(e) => Err(nodemap_error(py, e)),
163 163 }
164 164 }
165 165
166 166 def partialmatch(&self, node: PyObject) -> PyResult<Option<PyBytes>> {
167 167 let opt = self.get_nodetree(py)?.borrow();
168 168 let nt = opt.as_ref().unwrap();
169 169 let idx = &*self.index(py).borrow();
170 170
171 171 let node_as_string = if cfg!(feature = "python3-sys") {
172 172 node.cast_as::<PyString>(py)?.to_string(py)?.to_string()
173 173 }
174 174 else {
175 175 let node = node.extract::<PyBytes>(py)?;
176 176 String::from_utf8_lossy(node.data(py)).to_string()
177 177 };
178 178
179 179 let prefix = NodePrefix::from_hex(&node_as_string)
180 180 .map_err(|_| PyErr::new::<ValueError, _>(
181 181 py, format!("Invalid node or prefix '{}'", node_as_string))
182 182 )?;
183 183
184 184 nt.find_bin(idx, prefix)
185 185 // TODO make an inner API returning the node directly
186 186 .map(|opt| opt.map(
187 187 |rev| PyBytes::new(py, idx.node(rev).unwrap().as_bytes())))
188 188 .map_err(|e| nodemap_error(py, e))
189 189
190 190 }
191 191
192 192 /// append an index entry
193 193 def append(&self, tup: PyTuple) -> PyResult<PyObject> {
194 194 if tup.len(py) < 8 {
195 195 // this is better than the panic promised by tup.get_item()
196 196 return Err(
197 197 PyErr::new::<IndexError, _>(py, "tuple index out of range"))
198 198 }
199 199 let node_bytes = tup.get_item(py, 7).extract(py)?;
200 200 let node = node_from_py_object(py, &node_bytes)?;
201 201
202 202 let rev = self.len(py)? as BaseRevision;
203 203
204 204 // This is ok since we will just add the revision to the index
205 205 let rev = Revision(rev);
206 206 self.index(py)
207 207 .borrow_mut()
208 208 .append(py_tuple_to_revision_data_params(py, tup)?)
209 209 .unwrap();
210 210 let idx = &*self.index(py).borrow();
211 211 self.get_nodetree(py)?.borrow_mut().as_mut().unwrap()
212 212 .insert(idx, &node, rev)
213 213 .map_err(|e| nodemap_error(py, e))?;
214 214 Ok(py.None())
215 215 }
216 216
217 217 def __delitem__(&self, key: PyObject) -> PyResult<()> {
218 218 // __delitem__ is both for `del idx[r]` and `del idx[r1:r2]`
219 219 let start = if let Ok(rev) = key.extract(py) {
220 220 UncheckedRevision(rev)
221 221 } else {
222 222 let start = key.getattr(py, "start")?;
223 223 UncheckedRevision(start.extract(py)?)
224 224 };
225 225 let start = self.index(py)
226 226 .borrow()
227 227 .check_revision(start)
228 228 .ok_or_else(|| {
229 229 nodemap_error(py, NodeMapError::RevisionNotInIndex(start))
230 230 })?;
231 231 self.index(py).borrow_mut().remove(start).unwrap();
232 232 let mut opt = self.get_nodetree(py)?.borrow_mut();
233 233 let nt = opt.as_mut().unwrap();
234 234 nt.invalidate_all();
235 235 self.fill_nodemap(py, nt)?;
236 236 Ok(())
237 237 }
238 238
239 239 //
240 240 // Index methods previously reforwarded to C index (tp_methods)
241 241 // Same ordering as in revlog.c
242 242 //
243 243
244 244 /// return the gca set of the given revs
245 245 def ancestors(&self, *args, **_kw) -> PyResult<PyObject> {
246 246 let rust_res = self.inner_ancestors(py, args)?;
247 247 Ok(rust_res)
248 248 }
249 249
250 250 /// return the heads of the common ancestors of the given revs
251 251 def commonancestorsheads(&self, *args, **_kw) -> PyResult<PyObject> {
252 252 let rust_res = self.inner_commonancestorsheads(py, args)?;
253 253 Ok(rust_res)
254 254 }
255 255
256 256 /// Clear the index caches and inner py_class data.
257 257 /// It is Python's responsibility to call `update_nodemap_data` again.
258 258 def clearcaches(&self) -> PyResult<PyObject> {
259 259 self.nt(py).borrow_mut().take();
260 260 self.docket(py).borrow_mut().take();
261 261 self.nodemap_mmap(py).borrow_mut().take();
262 262 self.head_revs_py_list(py).borrow_mut().take();
263 263 self.head_node_ids_py_list(py).borrow_mut().take();
264 264 self.index(py).borrow().clear_caches();
265 265 Ok(py.None())
266 266 }
267 267
268 268 /// return the raw binary string representing a revision
269 269 def entry_binary(&self, *args, **_kw) -> PyResult<PyObject> {
270 270 let rindex = self.index(py).borrow();
271 271 let rev = UncheckedRevision(args.get_item(py, 0).extract(py)?);
272 272 let rust_bytes = rindex.check_revision(rev).and_then(
273 273 |r| rindex.entry_binary(r))
274 274 .ok_or_else(|| rev_not_in_index(py, rev))?;
275 275 let rust_res = PyBytes::new(py, rust_bytes).into_object();
276 276 Ok(rust_res)
277 277 }
278 278
279 279 /// return a binary packed version of the header
280 280 def pack_header(&self, *args, **_kw) -> PyResult<PyObject> {
281 281 let rindex = self.index(py).borrow();
282 282 let packed = rindex.pack_header(args.get_item(py, 0).extract(py)?);
283 283 let rust_res = PyBytes::new(py, &packed).into_object();
284 284 Ok(rust_res)
285 285 }
286 286
287 287 /// compute phases
288 288 def computephasesmapsets(&self, *args, **_kw) -> PyResult<PyObject> {
289 289 let py_roots = args.get_item(py, 0).extract::<PyDict>(py)?;
290 290 let rust_res = self.inner_computephasesmapsets(py, py_roots)?;
291 291 Ok(rust_res)
292 292 }
293 293
294 294 /// reachableroots
295 295 def reachableroots2(&self, *args, **_kw) -> PyResult<PyObject> {
296 296 let rust_res = self.inner_reachableroots2(
297 297 py,
298 298 UncheckedRevision(args.get_item(py, 0).extract(py)?),
299 299 args.get_item(py, 1),
300 300 args.get_item(py, 2),
301 301 args.get_item(py, 3).extract(py)?,
302 302 )?;
303 303 Ok(rust_res)
304 304 }
305 305
306 306 /// get head revisions
307 def headrevs(&self) -> PyResult<PyObject> {
308 let rust_res = self.inner_headrevs(py)?;
307 def headrevs(&self, *args, **_kw) -> PyResult<PyObject> {
308 let filtered_revs = match &args.len(py) {
309 0 => Ok(py.None()),
310 1 => Ok(args.get_item(py, 0)),
311 _ => Err(PyErr::new::<cpython::exc::TypeError, _>(py, "too many arguments")),
312 }?;
313 let rust_res = if filtered_revs.is_none(py) {
314 self.inner_headrevs(py)
315 } else {
316 self.inner_headrevsfiltered(py, &filtered_revs)
317 }?;
309 318 Ok(rust_res)
310 319 }
311 320
312 321 /// get head nodeids
313 322 def head_node_ids(&self) -> PyResult<PyObject> {
314 323 let rust_res = self.inner_head_node_ids(py)?;
315 324 Ok(rust_res)
316 325 }
317 326
318 327 /// get diff in head revisions
319 328 def headrevsdiff(&self, *args, **_kw) -> PyResult<PyObject> {
320 329 let rust_res = self.inner_headrevsdiff(
321 330 py,
322 331 &args.get_item(py, 0),
323 332 &args.get_item(py, 1))?;
324 333 Ok(rust_res)
325 334 }
326 335
327 336 /// get filtered head revisions
328 337 def headrevsfiltered(&self, *args, **_kw) -> PyResult<PyObject> {
329 338 let rust_res = self.inner_headrevsfiltered(py, &args.get_item(py, 0))?;
330 339 Ok(rust_res)
331 340 }
332 341
333 342 /// True if the object is a snapshot
334 343 def issnapshot(&self, *args, **_kw) -> PyResult<bool> {
335 344 let index = self.index(py).borrow();
336 345 let result = index
337 346 .is_snapshot(UncheckedRevision(args.get_item(py, 0).extract(py)?))
338 347 .map_err(|e| {
339 348 PyErr::new::<cpython::exc::ValueError, _>(py, e.to_string())
340 349 })?;
341 350 Ok(result)
342 351 }
343 352
344 353 /// Gather snapshot data in a cache dict
345 354 def findsnapshots(&self, *args, **_kw) -> PyResult<PyObject> {
346 355 let index = self.index(py).borrow();
347 356 let cache: PyDict = args.get_item(py, 0).extract(py)?;
348 357 // this methods operates by setting new values in the cache,
349 358 // hence we will compare results by letting the C implementation
350 359 // operate over a deepcopy of the cache, and finally compare both
351 360 // caches.
352 361 let c_cache = PyDict::new(py);
353 362 for (k, v) in cache.items(py) {
354 363 c_cache.set_item(py, k, PySet::new(py, v)?)?;
355 364 }
356 365
357 366 let start_rev = UncheckedRevision(args.get_item(py, 1).extract(py)?);
358 367 let end_rev = UncheckedRevision(args.get_item(py, 2).extract(py)?);
359 368 let mut cache_wrapper = PySnapshotsCache{ py, dict: cache };
360 369 index.find_snapshots(
361 370 start_rev,
362 371 end_rev,
363 372 &mut cache_wrapper,
364 373 ).map_err(|_| revlog_error(py))?;
365 374 Ok(py.None())
366 375 }
367 376
368 377 /// determine revisions with deltas to reconstruct fulltext
369 378 def deltachain(&self, *args, **_kw) -> PyResult<PyObject> {
370 379 let index = self.index(py).borrow();
371 380 let rev = args.get_item(py, 0).extract::<BaseRevision>(py)?.into();
372 381 let stop_rev =
373 382 args.get_item(py, 1).extract::<Option<BaseRevision>>(py)?;
374 383 let rev = index.check_revision(rev).ok_or_else(|| {
375 384 nodemap_error(py, NodeMapError::RevisionNotInIndex(rev))
376 385 })?;
377 386 let stop_rev = if let Some(stop_rev) = stop_rev {
378 387 let stop_rev = UncheckedRevision(stop_rev);
379 388 Some(index.check_revision(stop_rev).ok_or_else(|| {
380 389 nodemap_error(py, NodeMapError::RevisionNotInIndex(stop_rev))
381 390 })?)
382 391 } else {None};
383 392 let using_general_delta = args.get_item(py, 2)
384 393 .extract::<Option<u32>>(py)?
385 394 .map(|i| i != 0);
386 395 let (chain, stopped) = index.delta_chain(
387 396 rev, stop_rev, using_general_delta
388 397 ).map_err(|e| {
389 398 PyErr::new::<cpython::exc::ValueError, _>(py, e.to_string())
390 399 })?;
391 400
392 401 let chain: Vec<_> = chain.into_iter().map(|r| r.0).collect();
393 402 Ok(
394 403 PyTuple::new(
395 404 py,
396 405 &[
397 406 chain.into_py_object(py).into_object(),
398 407 stopped.into_py_object(py).into_object()
399 408 ]
400 409 ).into_object()
401 410 )
402 411
403 412 }
404 413
405 414 /// slice planned chunk read to reach a density threshold
406 415 def slicechunktodensity(&self, *args, **_kw) -> PyResult<PyObject> {
407 416 let rust_res = self.inner_slicechunktodensity(
408 417 py,
409 418 args.get_item(py, 0),
410 419 args.get_item(py, 1).extract(py)?,
411 420 args.get_item(py, 2).extract(py)?
412 421 )?;
413 422 Ok(rust_res)
414 423 }
415 424
416 425 // index_sequence_methods and index_mapping_methods.
417 426 //
418 427 // Since we call back through the high level Python API,
419 428 // there's no point making a distinction between index_get
420 429 // and index_getitem.
421 430 // gracinet 2023: this above is no longer true for the pure Rust impl
422 431
423 432 def __len__(&self) -> PyResult<usize> {
424 433 self.len(py)
425 434 }
426 435
427 436 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
428 437 let rust_res = self.inner_getitem(py, key.clone_ref(py))?;
429 438 Ok(rust_res)
430 439 }
431 440
432 441 def __contains__(&self, item: PyObject) -> PyResult<bool> {
433 442 // ObjectProtocol does not seem to provide contains(), so
434 443 // this is an equivalent implementation of the index_contains()
435 444 // defined in revlog.c
436 445 match item.extract::<i32>(py) {
437 446 Ok(rev) => {
438 447 Ok(rev >= -1 && rev < self.len(py)? as BaseRevision)
439 448 }
440 449 Err(_) => {
441 450 let item_bytes: PyBytes = item.extract(py)?;
442 451 let rust_res = self.has_node(py, item_bytes)?;
443 452 Ok(rust_res)
444 453 }
445 454 }
446 455 }
447 456
448 457 def nodemap_data_all(&self) -> PyResult<PyBytes> {
449 458 self.inner_nodemap_data_all(py)
450 459 }
451 460
452 461 def nodemap_data_incremental(&self) -> PyResult<PyObject> {
453 462 self.inner_nodemap_data_incremental(py)
454 463 }
455 464 def update_nodemap_data(
456 465 &self,
457 466 docket: PyObject,
458 467 nm_data: PyObject
459 468 ) -> PyResult<PyObject> {
460 469 self.inner_update_nodemap_data(py, docket, nm_data)
461 470 }
462 471
463 472 @property
464 473 def entry_size(&self) -> PyResult<PyInt> {
465 474 let rust_res: PyInt = INDEX_ENTRY_SIZE.to_py_object(py);
466 475 Ok(rust_res)
467 476 }
468 477
469 478 @property
470 479 def rust_ext_compat(&self) -> PyResult<PyInt> {
471 480 // will be entirely removed when the Rust index yet useful to
472 481 // implement in Rust to detangle things when removing `self.cindex`
473 482 let rust_res: PyInt = 1.to_py_object(py);
474 483 Ok(rust_res)
475 484 }
476 485
477 486 @property
478 487 def is_rust(&self) -> PyResult<PyBool> {
479 488 Ok(false.to_py_object(py))
480 489 }
481 490
482 491 });
483 492
484 493 /// Take a (potentially) mmap'ed buffer, and return the underlying Python
485 494 /// buffer along with the Rust slice into said buffer. We need to keep the
486 495 /// Python buffer around, otherwise we'd get a dangling pointer once the buffer
487 496 /// is freed from Python's side.
488 497 ///
489 498 /// # Safety
490 499 ///
491 500 /// The caller must make sure that the buffer is kept around for at least as
492 501 /// long as the slice.
493 502 #[deny(unsafe_op_in_unsafe_fn)]
494 503 unsafe fn mmap_keeparound(
495 504 py: Python,
496 505 data: PyObject,
497 506 ) -> PyResult<(
498 507 PyBuffer,
499 508 Box<dyn std::ops::Deref<Target = [u8]> + Send + Sync + 'static>,
500 509 )> {
501 510 let buf = PyBuffer::get(py, &data)?;
502 511 let len = buf.item_count();
503 512
504 513 // Build a slice from the mmap'ed buffer data
505 514 let cbuf = buf.buf_ptr();
506 515 let bytes = if std::mem::size_of::<u8>() == buf.item_size()
507 516 && buf.is_c_contiguous()
508 517 && u8::is_compatible_format(buf.format())
509 518 {
510 519 unsafe { std::slice::from_raw_parts(cbuf as *const u8, len) }
511 520 } else {
512 521 return Err(PyErr::new::<ValueError, _>(
513 522 py,
514 523 "Nodemap data buffer has an invalid memory representation"
515 524 .to_string(),
516 525 ));
517 526 };
518 527
519 528 Ok((buf, Box::new(bytes)))
520 529 }
521 530
522 531 fn py_tuple_to_revision_data_params(
523 532 py: Python,
524 533 tuple: PyTuple,
525 534 ) -> PyResult<RevisionDataParams> {
526 535 if tuple.len(py) < 8 {
527 536 // this is better than the panic promised by tup.get_item()
528 537 return Err(PyErr::new::<IndexError, _>(
529 538 py,
530 539 "tuple index out of range",
531 540 ));
532 541 }
533 542 let offset_or_flags: u64 = tuple.get_item(py, 0).extract(py)?;
534 543 let node_id = tuple
535 544 .get_item(py, 7)
536 545 .extract::<PyBytes>(py)?
537 546 .data(py)
538 547 .try_into()
539 548 .unwrap();
540 549 let flags = (offset_or_flags & 0xFFFF) as u16;
541 550 let data_offset = offset_or_flags >> 16;
542 551 Ok(RevisionDataParams {
543 552 flags,
544 553 data_offset,
545 554 data_compressed_length: tuple.get_item(py, 1).extract(py)?,
546 555 data_uncompressed_length: tuple.get_item(py, 2).extract(py)?,
547 556 data_delta_base: tuple.get_item(py, 3).extract(py)?,
548 557 link_rev: tuple.get_item(py, 4).extract(py)?,
549 558 parent_rev_1: tuple.get_item(py, 5).extract(py)?,
550 559 parent_rev_2: tuple.get_item(py, 6).extract(py)?,
551 560 node_id,
552 561 ..Default::default()
553 562 })
554 563 }
555 564 fn revision_data_params_to_py_tuple(
556 565 py: Python,
557 566 params: RevisionDataParams,
558 567 ) -> PyTuple {
559 568 PyTuple::new(
560 569 py,
561 570 &[
562 571 params.data_offset.into_py_object(py).into_object(),
563 572 params
564 573 .data_compressed_length
565 574 .into_py_object(py)
566 575 .into_object(),
567 576 params
568 577 .data_uncompressed_length
569 578 .into_py_object(py)
570 579 .into_object(),
571 580 params.data_delta_base.into_py_object(py).into_object(),
572 581 params.link_rev.into_py_object(py).into_object(),
573 582 params.parent_rev_1.into_py_object(py).into_object(),
574 583 params.parent_rev_2.into_py_object(py).into_object(),
575 584 PyBytes::new(py, &params.node_id)
576 585 .into_py_object(py)
577 586 .into_object(),
578 587 params._sidedata_offset.into_py_object(py).into_object(),
579 588 params
580 589 ._sidedata_compressed_length
581 590 .into_py_object(py)
582 591 .into_object(),
583 592 params
584 593 .data_compression_mode
585 594 .into_py_object(py)
586 595 .into_object(),
587 596 params
588 597 ._sidedata_compression_mode
589 598 .into_py_object(py)
590 599 .into_object(),
591 600 params._rank.into_py_object(py).into_object(),
592 601 ],
593 602 )
594 603 }
595 604
596 605 struct PySnapshotsCache<'p> {
597 606 py: Python<'p>,
598 607 dict: PyDict,
599 608 }
600 609
601 610 impl<'p> SnapshotsCache for PySnapshotsCache<'p> {
602 611 fn insert_for(
603 612 &mut self,
604 613 rev: BaseRevision,
605 614 value: BaseRevision,
606 615 ) -> Result<(), RevlogError> {
607 616 let pyvalue = value.into_py_object(self.py).into_object();
608 617 match self.dict.get_item(self.py, rev) {
609 618 Some(obj) => obj
610 619 .extract::<PySet>(self.py)
611 620 .and_then(|set| set.add(self.py, pyvalue)),
612 621 None => PySet::new(self.py, vec![pyvalue])
613 622 .and_then(|set| self.dict.set_item(self.py, rev, set)),
614 623 }
615 624 .map_err(|_| {
616 625 RevlogError::Other(HgError::unsupported(
617 626 "Error in Python caches handling",
618 627 ))
619 628 })
620 629 }
621 630 }
622 631
623 632 impl Index {
624 633 fn new(py: Python, data: PyObject, header: u32) -> PyResult<Self> {
625 634 // Safety: we keep the buffer around inside the class as `index_mmap`
626 635 let (buf, bytes) = unsafe { mmap_keeparound(py, data)? };
627 636
628 637 Self::create_instance(
629 638 py,
630 639 hg::index::Index::new(
631 640 bytes,
632 641 IndexHeader::parse(&header.to_be_bytes())
633 642 .expect("default header is broken")
634 643 .unwrap(),
635 644 )
636 645 .map_err(|e| {
637 646 revlog_error_with_msg(py, e.to_string().as_bytes())
638 647 })?,
639 648 RefCell::new(None),
640 649 RefCell::new(None),
641 650 RefCell::new(None),
642 651 RefCell::new(Some(buf)),
643 652 RefCell::new(None),
644 653 RefCell::new(None),
645 654 )
646 655 }
647 656
648 657 fn len(&self, py: Python) -> PyResult<usize> {
649 658 let rust_index_len = self.index(py).borrow().len();
650 659 Ok(rust_index_len)
651 660 }
652 661
653 662 /// This is scaffolding at this point, but it could also become
654 663 /// a way to start a persistent nodemap or perform a
655 664 /// vacuum / repack operation
656 665 fn fill_nodemap(
657 666 &self,
658 667 py: Python,
659 668 nt: &mut CoreNodeTree,
660 669 ) -> PyResult<PyObject> {
661 670 let index = self.index(py).borrow();
662 671 for r in 0..self.len(py)? {
663 672 let rev = Revision(r as BaseRevision);
664 673 // in this case node() won't ever return None
665 674 nt.insert(&*index, index.node(rev).unwrap(), rev)
666 675 .map_err(|e| nodemap_error(py, e))?
667 676 }
668 677 Ok(py.None())
669 678 }
670 679
671 680 fn get_nodetree<'a>(
672 681 &'a self,
673 682 py: Python<'a>,
674 683 ) -> PyResult<&'a RefCell<Option<CoreNodeTree>>> {
675 684 if self.nt(py).borrow().is_none() {
676 685 let readonly = Box::<Vec<_>>::default();
677 686 let mut nt = CoreNodeTree::load_bytes(readonly, 0);
678 687 self.fill_nodemap(py, &mut nt)?;
679 688 self.nt(py).borrow_mut().replace(nt);
680 689 }
681 690 Ok(self.nt(py))
682 691 }
683 692
684 693 /// Returns the full nodemap bytes to be written as-is to disk
685 694 fn inner_nodemap_data_all(&self, py: Python) -> PyResult<PyBytes> {
686 695 let nodemap = self.get_nodetree(py)?.borrow_mut().take().unwrap();
687 696 let (readonly, bytes) = nodemap.into_readonly_and_added_bytes();
688 697
689 698 // If there's anything readonly, we need to build the data again from
690 699 // scratch
691 700 let bytes = if readonly.len() > 0 {
692 701 let mut nt = CoreNodeTree::load_bytes(Box::<Vec<_>>::default(), 0);
693 702 self.fill_nodemap(py, &mut nt)?;
694 703
695 704 let (readonly, bytes) = nt.into_readonly_and_added_bytes();
696 705 assert_eq!(readonly.len(), 0);
697 706
698 707 bytes
699 708 } else {
700 709 bytes
701 710 };
702 711
703 712 let bytes = PyBytes::new(py, &bytes);
704 713 Ok(bytes)
705 714 }
706 715
707 716 /// Returns the last saved docket along with the size of any changed data
708 717 /// (in number of blocks), and said data as bytes.
709 718 fn inner_nodemap_data_incremental(
710 719 &self,
711 720 py: Python,
712 721 ) -> PyResult<PyObject> {
713 722 let docket = self.docket(py).borrow();
714 723 let docket = match docket.as_ref() {
715 724 Some(d) => d,
716 725 None => return Ok(py.None()),
717 726 };
718 727
719 728 let node_tree = self.get_nodetree(py)?.borrow_mut().take().unwrap();
720 729 let masked_blocks = node_tree.masked_readonly_blocks();
721 730 let (_, data) = node_tree.into_readonly_and_added_bytes();
722 731 let changed = masked_blocks * std::mem::size_of::<Block>();
723 732
724 733 Ok((docket, changed, PyBytes::new(py, &data))
725 734 .to_py_object(py)
726 735 .into_object())
727 736 }
728 737
729 738 /// Update the nodemap from the new (mmaped) data.
730 739 /// The docket is kept as a reference for later incremental calls.
731 740 fn inner_update_nodemap_data(
732 741 &self,
733 742 py: Python,
734 743 docket: PyObject,
735 744 nm_data: PyObject,
736 745 ) -> PyResult<PyObject> {
737 746 // Safety: we keep the buffer around inside the class as `nodemap_mmap`
738 747 let (buf, bytes) = unsafe { mmap_keeparound(py, nm_data)? };
739 748 let len = buf.item_count();
740 749 self.nodemap_mmap(py).borrow_mut().replace(buf);
741 750
742 751 let mut nt = CoreNodeTree::load_bytes(bytes, len);
743 752
744 753 let data_tip = docket
745 754 .getattr(py, "tip_rev")?
746 755 .extract::<BaseRevision>(py)?
747 756 .into();
748 757 self.docket(py).borrow_mut().replace(docket.clone_ref(py));
749 758 let idx = self.index(py).borrow();
750 759 let data_tip = idx.check_revision(data_tip).ok_or_else(|| {
751 760 nodemap_error(py, NodeMapError::RevisionNotInIndex(data_tip))
752 761 })?;
753 762 let current_tip = idx.len();
754 763
755 764 for r in (data_tip.0 + 1)..current_tip as BaseRevision {
756 765 let rev = Revision(r);
757 766 // in this case node() won't ever return None
758 767 nt.insert(&*idx, idx.node(rev).unwrap(), rev)
759 768 .map_err(|e| nodemap_error(py, e))?
760 769 }
761 770
762 771 *self.nt(py).borrow_mut() = Some(nt);
763 772
764 773 Ok(py.None())
765 774 }
766 775
767 776 fn inner_getitem(&self, py: Python, key: PyObject) -> PyResult<PyObject> {
768 777 let idx = self.index(py).borrow();
769 778 Ok(match key.extract::<BaseRevision>(py) {
770 779 Ok(key_as_int) => {
771 780 let entry_params = if key_as_int == NULL_REVISION.0 {
772 781 RevisionDataParams::default()
773 782 } else {
774 783 let rev = UncheckedRevision(key_as_int);
775 784 match idx.entry_as_params(rev) {
776 785 Some(e) => e,
777 786 None => {
778 787 return Err(PyErr::new::<IndexError, _>(
779 788 py,
780 789 "revlog index out of range",
781 790 ));
782 791 }
783 792 }
784 793 };
785 794 revision_data_params_to_py_tuple(py, entry_params)
786 795 .into_object()
787 796 }
788 797 _ => self.get_rev(py, key.extract::<PyBytes>(py)?)?.map_or_else(
789 798 || py.None(),
790 799 |py_rev| py_rev.into_py_object(py).into_object(),
791 800 ),
792 801 })
793 802 }
794 803
795 804 fn inner_head_node_ids(&self, py: Python) -> PyResult<PyObject> {
796 805 let index = &*self.index(py).borrow();
797 806
798 807 // We don't use the shortcut here, as it's actually slower to loop
799 808 // through the cached `PyList` than to re-do the whole computation for
800 809 // large lists, which are the performance sensitive ones anyway.
801 810 let head_revs = index.head_revs().map_err(|e| graph_error(py, e))?;
802 811 let res: Vec<_> = head_revs
803 812 .iter()
804 813 .map(|r| {
805 814 PyBytes::new(
806 815 py,
807 816 index
808 817 .node(*r)
809 818 .expect("rev should have been in the index")
810 819 .as_bytes(),
811 820 )
812 821 .into_object()
813 822 })
814 823 .collect();
815 824
816 825 self.cache_new_heads_py_list(&head_revs, py);
817 826 self.cache_new_heads_node_ids_py_list(&head_revs, py);
818 827
819 828 Ok(PyList::new(py, &res).into_object())
820 829 }
821 830
822 831 fn inner_headrevs(&self, py: Python) -> PyResult<PyObject> {
823 832 let index = &*self.index(py).borrow();
824 833 if let Some(new_heads) =
825 834 index.head_revs_shortcut().map_err(|e| graph_error(py, e))?
826 835 {
827 836 self.cache_new_heads_py_list(&new_heads, py);
828 837 }
829 838
830 839 Ok(self
831 840 .head_revs_py_list(py)
832 841 .borrow()
833 842 .as_ref()
834 843 .expect("head revs should be cached")
835 844 .clone_ref(py)
836 845 .into_object())
837 846 }
838 847
839 848 fn check_revision(
840 849 index: &hg::index::Index,
841 850 rev: UncheckedRevision,
842 851 py: Python,
843 852 ) -> PyResult<Revision> {
844 853 index
845 854 .check_revision(rev)
846 855 .ok_or_else(|| rev_not_in_index(py, rev))
847 856 }
848 857
849 858 fn inner_headrevsdiff(
850 859 &self,
851 860 py: Python,
852 861 begin: &PyObject,
853 862 end: &PyObject,
854 863 ) -> PyResult<PyObject> {
855 864 let begin = begin.extract::<BaseRevision>(py)?;
856 865 let end = end.extract::<BaseRevision>(py)?;
857 866 let index = &*self.index(py).borrow();
858 867 let begin =
859 868 Self::check_revision(index, UncheckedRevision(begin - 1), py)?;
860 869 let end = Self::check_revision(index, UncheckedRevision(end - 1), py)?;
861 870 let (removed, added) = index
862 871 .head_revs_diff(begin, end)
863 872 .map_err(|e| graph_error(py, e))?;
864 873 let removed: Vec<_> =
865 874 removed.into_iter().map(PyRevision::from).collect();
866 875 let added: Vec<_> = added.into_iter().map(PyRevision::from).collect();
867 876 let res = (removed, added).to_py_object(py).into_object();
868 877 Ok(res)
869 878 }
870 879
871 880 fn inner_headrevsfiltered(
872 881 &self,
873 882 py: Python,
874 883 filtered_revs: &PyObject,
875 884 ) -> PyResult<PyObject> {
876 885 let index = &*self.index(py).borrow();
877 886 let filtered_revs = rev_pyiter_collect(py, filtered_revs, index)?;
878 887
879 888 if let Some(new_heads) = index
880 889 .head_revs_filtered(&filtered_revs, true)
881 890 .map_err(|e| graph_error(py, e))?
882 891 {
883 892 self.cache_new_heads_py_list(&new_heads, py);
884 893 }
885 894
886 895 Ok(self
887 896 .head_revs_py_list(py)
888 897 .borrow()
889 898 .as_ref()
890 899 .expect("head revs should be cached")
891 900 .clone_ref(py)
892 901 .into_object())
893 902 }
894 903
895 904 fn cache_new_heads_node_ids_py_list(
896 905 &self,
897 906 new_heads: &[Revision],
898 907 py: Python<'_>,
899 908 ) -> PyList {
900 909 let index = self.index(py).borrow();
901 910 let as_vec: Vec<PyObject> = new_heads
902 911 .iter()
903 912 .map(|r| {
904 913 PyBytes::new(
905 914 py,
906 915 index
907 916 .node(*r)
908 917 .expect("rev should have been in the index")
909 918 .as_bytes(),
910 919 )
911 920 .into_object()
912 921 })
913 922 .collect();
914 923 let new_heads_py_list = PyList::new(py, &as_vec);
915 924 *self.head_node_ids_py_list(py).borrow_mut() =
916 925 Some(new_heads_py_list.clone_ref(py));
917 926 new_heads_py_list
918 927 }
919 928
920 929 fn cache_new_heads_py_list(
921 930 &self,
922 931 new_heads: &[Revision],
923 932 py: Python<'_>,
924 933 ) -> PyList {
925 934 let as_vec: Vec<PyObject> = new_heads
926 935 .iter()
927 936 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
928 937 .collect();
929 938 let new_heads_py_list = PyList::new(py, &as_vec);
930 939 *self.head_revs_py_list(py).borrow_mut() =
931 940 Some(new_heads_py_list.clone_ref(py));
932 941 new_heads_py_list
933 942 }
934 943
935 944 fn inner_ancestors(
936 945 &self,
937 946 py: Python,
938 947 py_revs: &PyTuple,
939 948 ) -> PyResult<PyObject> {
940 949 let index = &*self.index(py).borrow();
941 950 let revs: Vec<_> = rev_pyiter_collect(py, py_revs.as_object(), index)?;
942 951 let as_vec: Vec<_> = index
943 952 .ancestors(&revs)
944 953 .map_err(|e| graph_error(py, e))?
945 954 .iter()
946 955 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
947 956 .collect();
948 957 Ok(PyList::new(py, &as_vec).into_object())
949 958 }
950 959
951 960 fn inner_commonancestorsheads(
952 961 &self,
953 962 py: Python,
954 963 py_revs: &PyTuple,
955 964 ) -> PyResult<PyObject> {
956 965 let index = &*self.index(py).borrow();
957 966 let revs: Vec<_> = rev_pyiter_collect(py, py_revs.as_object(), index)?;
958 967 let as_vec: Vec<_> = index
959 968 .common_ancestor_heads(&revs)
960 969 .map_err(|e| graph_error(py, e))?
961 970 .iter()
962 971 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
963 972 .collect();
964 973 Ok(PyList::new(py, &as_vec).into_object())
965 974 }
966 975
967 976 fn inner_computephasesmapsets(
968 977 &self,
969 978 py: Python,
970 979 py_roots: PyDict,
971 980 ) -> PyResult<PyObject> {
972 981 let index = &*self.index(py).borrow();
973 982 let roots: Result<HashMap<Phase, Vec<Revision>>, PyErr> = py_roots
974 983 .items_list(py)
975 984 .iter(py)
976 985 .map(|r| {
977 986 let phase = r.get_item(py, 0)?;
978 987 let revs: Vec<_> =
979 988 rev_pyiter_collect(py, &r.get_item(py, 1)?, index)?;
980 989 let phase = Phase::try_from(phase.extract::<usize>(py)?)
981 990 .map_err(|_| revlog_error(py));
982 991 Ok((phase?, revs))
983 992 })
984 993 .collect();
985 994 let (len, phase_maps) = index
986 995 .compute_phases_map_sets(roots?)
987 996 .map_err(|e| graph_error(py, e))?;
988 997
989 998 // Ugly hack, but temporary
990 999 const IDX_TO_PHASE_NUM: [usize; 4] = [1, 2, 32, 96];
991 1000 let py_phase_maps = PyDict::new(py);
992 1001 for (idx, roots) in phase_maps.into_iter().enumerate() {
993 1002 let phase_num = IDX_TO_PHASE_NUM[idx].into_py_object(py);
994 1003 // This is a bit faster than collecting into a `Vec` and passing
995 1004 // it to `PySet::new`.
996 1005 let set = PySet::empty(py)?;
997 1006 for rev in roots {
998 1007 set.add(py, PyRevision::from(rev).into_py_object(py))?;
999 1008 }
1000 1009 py_phase_maps.set_item(py, phase_num, set)?;
1001 1010 }
1002 1011 Ok(PyTuple::new(
1003 1012 py,
1004 1013 &[
1005 1014 len.into_py_object(py).into_object(),
1006 1015 py_phase_maps.into_object(),
1007 1016 ],
1008 1017 )
1009 1018 .into_object())
1010 1019 }
1011 1020
1012 1021 fn inner_slicechunktodensity(
1013 1022 &self,
1014 1023 py: Python,
1015 1024 revs: PyObject,
1016 1025 target_density: f64,
1017 1026 min_gap_size: usize,
1018 1027 ) -> PyResult<PyObject> {
1019 1028 let index = &*self.index(py).borrow();
1020 1029 let revs: Vec<_> = rev_pyiter_collect(py, &revs, index)?;
1021 1030 let as_nested_vec =
1022 1031 index.slice_chunk_to_density(&revs, target_density, min_gap_size);
1023 1032 let mut res = Vec::with_capacity(as_nested_vec.len());
1024 1033 let mut py_chunk = Vec::new();
1025 1034 for chunk in as_nested_vec {
1026 1035 py_chunk.clear();
1027 1036 py_chunk.reserve_exact(chunk.len());
1028 1037 for rev in chunk {
1029 1038 py_chunk.push(
1030 1039 PyRevision::from(rev).into_py_object(py).into_object(),
1031 1040 );
1032 1041 }
1033 1042 res.push(PyList::new(py, &py_chunk).into_object());
1034 1043 }
1035 1044 // This is just to do the same as C, not sure why it does this
1036 1045 if res.len() == 1 {
1037 1046 Ok(PyTuple::new(py, &res).into_object())
1038 1047 } else {
1039 1048 Ok(PyList::new(py, &res).into_object())
1040 1049 }
1041 1050 }
1042 1051
1043 1052 fn inner_reachableroots2(
1044 1053 &self,
1045 1054 py: Python,
1046 1055 min_root: UncheckedRevision,
1047 1056 heads: PyObject,
1048 1057 roots: PyObject,
1049 1058 include_path: bool,
1050 1059 ) -> PyResult<PyObject> {
1051 1060 let index = &*self.index(py).borrow();
1052 1061 let heads = rev_pyiter_collect_or_else(py, &heads, index, |_rev| {
1053 1062 PyErr::new::<IndexError, _>(py, "head out of range")
1054 1063 })?;
1055 1064 let roots: Result<_, _> = roots
1056 1065 .iter(py)?
1057 1066 .map(|r| {
1058 1067 r.and_then(|o| match o.extract::<PyRevision>(py) {
1059 1068 Ok(r) => Ok(UncheckedRevision(r.0)),
1060 1069 Err(e) => Err(e),
1061 1070 })
1062 1071 })
1063 1072 .collect();
1064 1073 let as_set = index
1065 1074 .reachable_roots(min_root, heads, roots?, include_path)
1066 1075 .map_err(|e| graph_error(py, e))?;
1067 1076 let as_vec: Vec<PyObject> = as_set
1068 1077 .iter()
1069 1078 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
1070 1079 .collect();
1071 1080 Ok(PyList::new(py, &as_vec).into_object())
1072 1081 }
1073 1082 }
1074 1083
1075 1084 py_class!(pub class NodeTree |py| {
1076 1085 data nt: RefCell<CoreNodeTree>;
1077 1086 data index: RefCell<UnsafePyLeaked<PySharedIndex>>;
1078 1087
1079 1088 def __new__(_cls, index: PyObject) -> PyResult<NodeTree> {
1080 1089 let index = py_rust_index_to_graph(py, index)?;
1081 1090 let nt = CoreNodeTree::default(); // in-RAM, fully mutable
1082 1091 Self::create_instance(py, RefCell::new(nt), RefCell::new(index))
1083 1092 }
1084 1093
1085 1094 /// Tell whether the NodeTree is still valid
1086 1095 ///
1087 1096 /// In case of mutation of the index, the given results are not
1088 1097 /// guaranteed to be correct, and in fact, the methods borrowing
1089 1098 /// the inner index would fail because of `PySharedRef` poisoning
1090 1099 /// (generation-based guard), same as iterating on a `dict` that has
1091 1100 /// been meanwhile mutated.
1092 1101 def is_invalidated(&self) -> PyResult<bool> {
1093 1102 let leaked = self.index(py).borrow();
1094 1103 // Safety: we don't leak the "faked" reference out of `UnsafePyLeaked`
1095 1104 let result = unsafe { leaked.try_borrow(py) };
1096 1105 // two cases for result to be an error:
1097 1106 // - the index has previously been mutably borrowed
1098 1107 // - there is currently a mutable borrow
1099 1108 // in both cases this amounts for previous results related to
1100 1109 // the index to still be valid.
1101 1110 Ok(result.is_err())
1102 1111 }
1103 1112
1104 1113 def insert(&self, rev: PyRevision) -> PyResult<PyObject> {
1105 1114 let leaked = self.index(py).borrow();
1106 1115 // Safety: we don't leak the "faked" reference out of `UnsafePyLeaked`
1107 1116 let index = &*unsafe { leaked.try_borrow(py)? };
1108 1117
1109 1118 let rev = UncheckedRevision(rev.0);
1110 1119 let rev = index
1111 1120 .check_revision(rev)
1112 1121 .ok_or_else(|| rev_not_in_index(py, rev))?;
1113 1122 if rev == NULL_REVISION {
1114 1123 return Err(rev_not_in_index(py, rev.into()))
1115 1124 }
1116 1125
1117 1126 let entry = index.inner.get_entry(rev).unwrap();
1118 1127 let mut nt = self.nt(py).borrow_mut();
1119 1128 nt.insert(index, entry.hash(), rev).map_err(|e| nodemap_error(py, e))?;
1120 1129
1121 1130 Ok(py.None())
1122 1131 }
1123 1132
1124 1133 /// Lookup by node hex prefix in the NodeTree, returning revision number.
1125 1134 ///
1126 1135 /// This is not part of the classical NodeTree API, but is good enough
1127 1136 /// for unit testing, as in `test-rust-revlog.py`.
1128 1137 def prefix_rev_lookup(
1129 1138 &self,
1130 1139 node_prefix: PyBytes
1131 1140 ) -> PyResult<Option<PyRevision>> {
1132 1141 let prefix = NodePrefix::from_hex(node_prefix.data(py))
1133 1142 .map_err(|_| PyErr::new::<ValueError, _>(
1134 1143 py,
1135 1144 format!("Invalid node or prefix {:?}",
1136 1145 node_prefix.as_object()))
1137 1146 )?;
1138 1147
1139 1148 let nt = self.nt(py).borrow();
1140 1149 let leaked = self.index(py).borrow();
1141 1150 // Safety: we don't leak the "faked" reference out of `UnsafePyLeaked`
1142 1151 let index = &*unsafe { leaked.try_borrow(py)? };
1143 1152
1144 1153 Ok(nt.find_bin(index, prefix)
1145 1154 .map_err(|e| nodemap_error(py, e))?
1146 1155 .map(|r| r.into())
1147 1156 )
1148 1157 }
1149 1158
1150 1159 def shortest(&self, node: PyBytes) -> PyResult<usize> {
1151 1160 let nt = self.nt(py).borrow();
1152 1161 let leaked = self.index(py).borrow();
1153 1162 // Safety: we don't leak the "faked" reference out of `UnsafePyLeaked`
1154 1163 let idx = &*unsafe { leaked.try_borrow(py)? };
1155 1164 match nt.unique_prefix_len_node(idx, &node_from_py_bytes(py, &node)?)
1156 1165 {
1157 1166 Ok(Some(l)) => Ok(l),
1158 1167 Ok(None) => Err(revlog_error(py)),
1159 1168 Err(e) => Err(nodemap_error(py, e)),
1160 1169 }
1161 1170 }
1162 1171 });
1163 1172
1164 1173 fn revlog_error(py: Python) -> PyErr {
1165 1174 match py
1166 1175 .import("mercurial.error")
1167 1176 .and_then(|m| m.get(py, "RevlogError"))
1168 1177 {
1169 1178 Err(e) => e,
1170 1179 Ok(cls) => PyErr::from_instance(
1171 1180 py,
1172 1181 cls.call(py, (py.None(),), None).ok().into_py_object(py),
1173 1182 ),
1174 1183 }
1175 1184 }
1176 1185
1177 1186 fn revlog_error_with_msg(py: Python, msg: &[u8]) -> PyErr {
1178 1187 match py
1179 1188 .import("mercurial.error")
1180 1189 .and_then(|m| m.get(py, "RevlogError"))
1181 1190 {
1182 1191 Err(e) => e,
1183 1192 Ok(cls) => PyErr::from_instance(
1184 1193 py,
1185 1194 cls.call(py, (PyBytes::new(py, msg),), None)
1186 1195 .ok()
1187 1196 .into_py_object(py),
1188 1197 ),
1189 1198 }
1190 1199 }
1191 1200
1192 1201 fn graph_error(py: Python, _err: hg::GraphError) -> PyErr {
1193 1202 // ParentOutOfRange is currently the only alternative
1194 1203 // in `hg::GraphError`. The C index always raises this simple ValueError.
1195 1204 PyErr::new::<ValueError, _>(py, "parent out of range")
1196 1205 }
1197 1206
1198 1207 fn nodemap_rev_not_in_index(py: Python, rev: UncheckedRevision) -> PyErr {
1199 1208 PyErr::new::<ValueError, _>(
1200 1209 py,
1201 1210 format!(
1202 1211 "Inconsistency: Revision {} found in nodemap \
1203 1212 is not in revlog index",
1204 1213 rev
1205 1214 ),
1206 1215 )
1207 1216 }
1208 1217
1209 1218 fn rev_not_in_index(py: Python, rev: UncheckedRevision) -> PyErr {
1210 1219 PyErr::new::<ValueError, _>(
1211 1220 py,
1212 1221 format!("revlog index out of range: {}", rev),
1213 1222 )
1214 1223 }
1215 1224
1216 1225 /// Standard treatment of NodeMapError
1217 1226 fn nodemap_error(py: Python, err: NodeMapError) -> PyErr {
1218 1227 match err {
1219 1228 NodeMapError::MultipleResults => revlog_error(py),
1220 1229 NodeMapError::RevisionNotInIndex(r) => nodemap_rev_not_in_index(py, r),
1221 1230 }
1222 1231 }
1223 1232
1224 1233 /// Create the module, with __package__ given from parent
1225 1234 pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> {
1226 1235 let dotted_name = &format!("{}.revlog", package);
1227 1236 let m = PyModule::new(py, dotted_name)?;
1228 1237 m.add(py, "__package__", package)?;
1229 1238 m.add(py, "__doc__", "RevLog - Rust implementations")?;
1230 1239
1231 1240 m.add_class::<Index>(py)?;
1232 1241 m.add_class::<NodeTree>(py)?;
1233 1242
1234 1243 let sys = PyModule::import(py, "sys")?;
1235 1244 let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?;
1236 1245 sys_modules.set_item(py, dotted_name, &m)?;
1237 1246
1238 1247 Ok(m)
1239 1248 }
General Comments 0
You need to be logged in to leave comments. Login now