##// END OF EJS Templates
rust-revlog: bare minimal NodeTree exposition...
Georges Racinet -
r52142:2966b88d default
parent child Browse files
Show More
@@ -1,1042 +1,1110 b''
1 1 // revlog.rs
2 2 //
3 3 // Copyright 2019-2020 Georges Racinet <georges.racinet@octobus.net>
4 4 //
5 5 // This software may be used and distributed according to the terms of the
6 6 // GNU General Public License version 2 or any later version.
7 7
8 8 use crate::{
9 9 cindex,
10 10 conversion::{rev_pyiter_collect, rev_pyiter_collect_or_else},
11 11 utils::{node_from_py_bytes, node_from_py_object},
12 12 PyRevision,
13 13 };
14 14 use cpython::{
15 15 buffer::{Element, PyBuffer},
16 16 exc::{IndexError, ValueError},
17 17 ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyInt, PyList,
18 18 PyModule, PyObject, PyResult, PySet, PyString, PyTuple, Python,
19 19 PythonObject, ToPyObject, UnsafePyLeaked,
20 20 };
21 21 use hg::{
22 22 errors::HgError,
23 23 index::{
24 24 IndexHeader, Phase, RevisionDataParams, SnapshotsCache,
25 25 INDEX_ENTRY_SIZE,
26 26 },
27 27 nodemap::{Block, NodeMapError, NodeTree as CoreNodeTree},
28 28 revlog::{nodemap::NodeMap, Graph, NodePrefix, RevlogError, RevlogIndex},
29 29 BaseRevision, Node, Revision, UncheckedRevision, NULL_REVISION,
30 30 };
31 31 use std::{cell::RefCell, collections::HashMap};
32 32 use vcsgraph::graph::Graph as VCSGraph;
33 33
34 34 pub struct PySharedIndex {
35 35 /// The underlying hg-core index
36 36 pub(crate) inner: &'static hg::index::Index,
37 37 }
38 38
39 39 /// Return a Struct implementing the Graph trait
40 40 pub(crate) fn py_rust_index_to_graph(
41 41 py: Python,
42 42 index: PyObject,
43 43 ) -> PyResult<UnsafePyLeaked<PySharedIndex>> {
44 44 let midx = index.extract::<MixedIndex>(py)?;
45 45 let leaked = midx.index(py).leak_immutable();
46 46 Ok(unsafe { leaked.map(py, |idx| PySharedIndex { inner: idx }) })
47 47 }
48 48
49 49 impl Clone for PySharedIndex {
50 50 fn clone(&self) -> Self {
51 51 Self { inner: self.inner }
52 52 }
53 53 }
54 54
55 55 impl Graph for PySharedIndex {
56 56 fn parents(&self, rev: Revision) -> Result<[Revision; 2], hg::GraphError> {
57 57 self.inner.parents(rev)
58 58 }
59 59 }
60 60
61 61 impl VCSGraph for PySharedIndex {
62 62 fn parents(
63 63 &self,
64 64 rev: BaseRevision,
65 65 ) -> Result<vcsgraph::graph::Parents, vcsgraph::graph::GraphReadError>
66 66 {
67 67 // FIXME This trait should be reworked to decide between Revision
68 68 // and UncheckedRevision, get better errors names, etc.
69 69 match Graph::parents(self, Revision(rev)) {
70 70 Ok(parents) => {
71 71 Ok(vcsgraph::graph::Parents([parents[0].0, parents[1].0]))
72 72 }
73 73 Err(hg::GraphError::ParentOutOfRange(rev)) => {
74 74 Err(vcsgraph::graph::GraphReadError::KeyedInvalidKey(rev.0))
75 75 }
76 76 }
77 77 }
78 78 }
79 79
80 80 impl RevlogIndex for PySharedIndex {
81 81 fn len(&self) -> usize {
82 82 self.inner.len()
83 83 }
84 84 fn node(&self, rev: Revision) -> Option<&Node> {
85 85 self.inner.node(rev)
86 86 }
87 87 }
88 88
89 89 py_class!(pub class MixedIndex |py| {
90 90 data cindex: RefCell<cindex::Index>;
91 91 @shared data index: hg::index::Index;
92 92 data nt: RefCell<Option<CoreNodeTree>>;
93 93 data docket: RefCell<Option<PyObject>>;
94 94 // Holds a reference to the mmap'ed persistent nodemap data
95 95 data nodemap_mmap: RefCell<Option<PyBuffer>>;
96 96 // Holds a reference to the mmap'ed persistent index data
97 97 data index_mmap: RefCell<Option<PyBuffer>>;
98 98
99 99 def __new__(
100 100 _cls,
101 101 cindex: PyObject,
102 102 data: PyObject,
103 103 default_header: u32,
104 104 ) -> PyResult<MixedIndex> {
105 105 Self::new(py, cindex, data, default_header)
106 106 }
107 107
108 108 /// Compatibility layer used for Python consumers needing access to the C index
109 109 ///
110 110 /// Only use case so far is `scmutil.shortesthexnodeidprefix`,
111 111 /// that may need to build a custom `nodetree`, based on a specified revset.
112 112 /// With a Rust implementation of the nodemap, we will be able to get rid of
113 113 /// this, by exposing our own standalone nodemap class,
114 114 /// ready to accept `MixedIndex`.
115 115 def get_cindex(&self) -> PyResult<PyObject> {
116 116 Ok(self.cindex(py).borrow().inner().clone_ref(py))
117 117 }
118 118
119 119 // Index API involving nodemap, as defined in mercurial/pure/parsers.py
120 120
121 121 /// Return Revision if found, raises a bare `error.RevlogError`
122 122 /// in case of ambiguity, same as C version does
123 123 def get_rev(&self, node: PyBytes) -> PyResult<Option<PyRevision>> {
124 124 let opt = self.get_nodetree(py)?.borrow();
125 125 let nt = opt.as_ref().unwrap();
126 126 let ridx = &*self.index(py).borrow();
127 127 let node = node_from_py_bytes(py, &node)?;
128 128 let rust_rev =
129 129 nt.find_bin(ridx, node.into()).map_err(|e| nodemap_error(py, e))?;
130 130 Ok(rust_rev.map(Into::into))
131 131
132 132 }
133 133
134 134 /// same as `get_rev()` but raises a bare `error.RevlogError` if node
135 135 /// is not found.
136 136 ///
137 137 /// No need to repeat `node` in the exception, `mercurial/revlog.py`
138 138 /// will catch and rewrap with it
139 139 def rev(&self, node: PyBytes) -> PyResult<PyRevision> {
140 140 self.get_rev(py, node)?.ok_or_else(|| revlog_error(py))
141 141 }
142 142
143 143 /// return True if the node exist in the index
144 144 def has_node(&self, node: PyBytes) -> PyResult<bool> {
145 145 // TODO OPTIM we could avoid a needless conversion here,
146 146 // to do when scaffolding for pure Rust switch is removed,
147 147 // as `get_rev()` currently does the necessary assertions
148 148 self.get_rev(py, node).map(|opt| opt.is_some())
149 149 }
150 150
151 151 /// find length of shortest hex nodeid of a binary ID
152 152 def shortest(&self, node: PyBytes) -> PyResult<usize> {
153 153 let opt = self.get_nodetree(py)?.borrow();
154 154 let nt = opt.as_ref().unwrap();
155 155 let idx = &*self.index(py).borrow();
156 156 match nt.unique_prefix_len_node(idx, &node_from_py_bytes(py, &node)?)
157 157 {
158 158 Ok(Some(l)) => Ok(l),
159 159 Ok(None) => Err(revlog_error(py)),
160 160 Err(e) => Err(nodemap_error(py, e)),
161 161 }
162 162 }
163 163
164 164 def partialmatch(&self, node: PyObject) -> PyResult<Option<PyBytes>> {
165 165 let opt = self.get_nodetree(py)?.borrow();
166 166 let nt = opt.as_ref().unwrap();
167 167 let idx = &*self.index(py).borrow();
168 168
169 169 let node_as_string = if cfg!(feature = "python3-sys") {
170 170 node.cast_as::<PyString>(py)?.to_string(py)?.to_string()
171 171 }
172 172 else {
173 173 let node = node.extract::<PyBytes>(py)?;
174 174 String::from_utf8_lossy(node.data(py)).to_string()
175 175 };
176 176
177 177 let prefix = NodePrefix::from_hex(&node_as_string)
178 178 .map_err(|_| PyErr::new::<ValueError, _>(
179 179 py, format!("Invalid node or prefix '{}'", node_as_string))
180 180 )?;
181 181
182 182 nt.find_bin(idx, prefix)
183 183 // TODO make an inner API returning the node directly
184 184 .map(|opt| opt.map(
185 185 |rev| PyBytes::new(py, idx.node(rev).unwrap().as_bytes())))
186 186 .map_err(|e| nodemap_error(py, e))
187 187
188 188 }
189 189
190 190 /// append an index entry
191 191 def append(&self, tup: PyTuple) -> PyResult<PyObject> {
192 192 if tup.len(py) < 8 {
193 193 // this is better than the panic promised by tup.get_item()
194 194 return Err(
195 195 PyErr::new::<IndexError, _>(py, "tuple index out of range"))
196 196 }
197 197 let node_bytes = tup.get_item(py, 7).extract(py)?;
198 198 let node = node_from_py_object(py, &node_bytes)?;
199 199
200 200 let rev = self.len(py)? as BaseRevision;
201 201
202 202 // This is ok since we will just add the revision to the index
203 203 let rev = Revision(rev);
204 204 self.index(py)
205 205 .borrow_mut()
206 206 .append(py_tuple_to_revision_data_params(py, tup)?)
207 207 .unwrap();
208 208 let idx = &*self.index(py).borrow();
209 209 self.get_nodetree(py)?.borrow_mut().as_mut().unwrap()
210 210 .insert(idx, &node, rev)
211 211 .map_err(|e| nodemap_error(py, e))?;
212 212 Ok(py.None())
213 213 }
214 214
215 215 def __delitem__(&self, key: PyObject) -> PyResult<()> {
216 216 // __delitem__ is both for `del idx[r]` and `del idx[r1:r2]`
217 217 let start = key.getattr(py, "start")?;
218 218 let start = UncheckedRevision(start.extract(py)?);
219 219 let start = self.index(py)
220 220 .borrow()
221 221 .check_revision(start)
222 222 .ok_or_else(|| {
223 223 nodemap_error(py, NodeMapError::RevisionNotInIndex(start))
224 224 })?;
225 225 self.index(py).borrow_mut().remove(start).unwrap();
226 226 let mut opt = self.get_nodetree(py)?.borrow_mut();
227 227 let nt = opt.as_mut().unwrap();
228 228 nt.invalidate_all();
229 229 self.fill_nodemap(py, nt)?;
230 230 Ok(())
231 231 }
232 232
233 233 //
234 234 // Index methods previously reforwarded to C index (tp_methods)
235 235 // Same ordering as in revlog.c
236 236 //
237 237
238 238 /// return the gca set of the given revs
239 239 def ancestors(&self, *args, **_kw) -> PyResult<PyObject> {
240 240 let rust_res = self.inner_ancestors(py, args)?;
241 241 Ok(rust_res)
242 242 }
243 243
244 244 /// return the heads of the common ancestors of the given revs
245 245 def commonancestorsheads(&self, *args, **_kw) -> PyResult<PyObject> {
246 246 let rust_res = self.inner_commonancestorsheads(py, args)?;
247 247 Ok(rust_res)
248 248 }
249 249
250 250 /// Clear the index caches and inner py_class data.
251 251 /// It is Python's responsibility to call `update_nodemap_data` again.
252 252 def clearcaches(&self) -> PyResult<PyObject> {
253 253 self.nt(py).borrow_mut().take();
254 254 self.docket(py).borrow_mut().take();
255 255 self.nodemap_mmap(py).borrow_mut().take();
256 256 self.index(py).borrow().clear_caches();
257 257 Ok(py.None())
258 258 }
259 259
260 260 /// return the raw binary string representing a revision
261 261 def entry_binary(&self, *args, **_kw) -> PyResult<PyObject> {
262 262 let rindex = self.index(py).borrow();
263 263 let rev = UncheckedRevision(args.get_item(py, 0).extract(py)?);
264 264 let rust_bytes = rindex.check_revision(rev).and_then(
265 265 |r| rindex.entry_binary(r))
266 266 .ok_or_else(|| rev_not_in_index(py, rev))?;
267 267 let rust_res = PyBytes::new(py, rust_bytes).into_object();
268 268 Ok(rust_res)
269 269 }
270 270
271 271 /// return a binary packed version of the header
272 272 def pack_header(&self, *args, **_kw) -> PyResult<PyObject> {
273 273 let rindex = self.index(py).borrow();
274 274 let packed = rindex.pack_header(args.get_item(py, 0).extract(py)?);
275 275 let rust_res = PyBytes::new(py, &packed).into_object();
276 276 Ok(rust_res)
277 277 }
278 278
279 279 /// compute phases
280 280 def computephasesmapsets(&self, *args, **_kw) -> PyResult<PyObject> {
281 281 let py_roots = args.get_item(py, 0).extract::<PyDict>(py)?;
282 282 let rust_res = self.inner_computephasesmapsets(py, py_roots)?;
283 283 Ok(rust_res)
284 284 }
285 285
286 286 /// reachableroots
287 287 def reachableroots2(&self, *args, **_kw) -> PyResult<PyObject> {
288 288 let rust_res = self.inner_reachableroots2(
289 289 py,
290 290 UncheckedRevision(args.get_item(py, 0).extract(py)?),
291 291 args.get_item(py, 1),
292 292 args.get_item(py, 2),
293 293 args.get_item(py, 3).extract(py)?,
294 294 )?;
295 295 Ok(rust_res)
296 296 }
297 297
298 298 /// get head revisions
299 299 def headrevs(&self) -> PyResult<PyObject> {
300 300 let rust_res = self.inner_headrevs(py)?;
301 301 Ok(rust_res)
302 302 }
303 303
304 304 /// get filtered head revisions
305 305 def headrevsfiltered(&self, *args, **_kw) -> PyResult<PyObject> {
306 306 let rust_res = self.inner_headrevsfiltered(py, &args.get_item(py, 0))?;
307 307 Ok(rust_res)
308 308 }
309 309
310 310 /// True if the object is a snapshot
311 311 def issnapshot(&self, *args, **_kw) -> PyResult<bool> {
312 312 let index = self.index(py).borrow();
313 313 let result = index
314 314 .is_snapshot(UncheckedRevision(args.get_item(py, 0).extract(py)?))
315 315 .map_err(|e| {
316 316 PyErr::new::<cpython::exc::ValueError, _>(py, e.to_string())
317 317 })?;
318 318 Ok(result)
319 319 }
320 320
321 321 /// Gather snapshot data in a cache dict
322 322 def findsnapshots(&self, *args, **_kw) -> PyResult<PyObject> {
323 323 let index = self.index(py).borrow();
324 324 let cache: PyDict = args.get_item(py, 0).extract(py)?;
325 325 // this methods operates by setting new values in the cache,
326 326 // hence we will compare results by letting the C implementation
327 327 // operate over a deepcopy of the cache, and finally compare both
328 328 // caches.
329 329 let c_cache = PyDict::new(py);
330 330 for (k, v) in cache.items(py) {
331 331 c_cache.set_item(py, k, PySet::new(py, v)?)?;
332 332 }
333 333
334 334 let start_rev = UncheckedRevision(args.get_item(py, 1).extract(py)?);
335 335 let end_rev = UncheckedRevision(args.get_item(py, 2).extract(py)?);
336 336 let mut cache_wrapper = PySnapshotsCache{ py, dict: cache };
337 337 index.find_snapshots(
338 338 start_rev,
339 339 end_rev,
340 340 &mut cache_wrapper,
341 341 ).map_err(|_| revlog_error(py))?;
342 342 Ok(py.None())
343 343 }
344 344
345 345 /// determine revisions with deltas to reconstruct fulltext
346 346 def deltachain(&self, *args, **_kw) -> PyResult<PyObject> {
347 347 let index = self.index(py).borrow();
348 348 let rev = args.get_item(py, 0).extract::<BaseRevision>(py)?.into();
349 349 let stop_rev =
350 350 args.get_item(py, 1).extract::<Option<BaseRevision>>(py)?;
351 351 let rev = index.check_revision(rev).ok_or_else(|| {
352 352 nodemap_error(py, NodeMapError::RevisionNotInIndex(rev))
353 353 })?;
354 354 let stop_rev = if let Some(stop_rev) = stop_rev {
355 355 let stop_rev = UncheckedRevision(stop_rev);
356 356 Some(index.check_revision(stop_rev).ok_or_else(|| {
357 357 nodemap_error(py, NodeMapError::RevisionNotInIndex(stop_rev))
358 358 })?)
359 359 } else {None};
360 360 let using_general_delta = args.get_item(py, 2)
361 361 .extract::<Option<u32>>(py)?
362 362 .map(|i| i != 0);
363 363 let (chain, stopped) = index.delta_chain(
364 364 rev, stop_rev, using_general_delta
365 365 ).map_err(|e| {
366 366 PyErr::new::<cpython::exc::ValueError, _>(py, e.to_string())
367 367 })?;
368 368
369 369 let chain: Vec<_> = chain.into_iter().map(|r| r.0).collect();
370 370 Ok(
371 371 PyTuple::new(
372 372 py,
373 373 &[
374 374 chain.into_py_object(py).into_object(),
375 375 stopped.into_py_object(py).into_object()
376 376 ]
377 377 ).into_object()
378 378 )
379 379
380 380 }
381 381
382 382 /// slice planned chunk read to reach a density threshold
383 383 def slicechunktodensity(&self, *args, **_kw) -> PyResult<PyObject> {
384 384 let rust_res = self.inner_slicechunktodensity(
385 385 py,
386 386 args.get_item(py, 0),
387 387 args.get_item(py, 1).extract(py)?,
388 388 args.get_item(py, 2).extract(py)?
389 389 )?;
390 390 Ok(rust_res)
391 391 }
392 392
393 393 // index_sequence_methods and index_mapping_methods.
394 394 //
395 395 // Since we call back through the high level Python API,
396 396 // there's no point making a distinction between index_get
397 397 // and index_getitem.
398 398 // gracinet 2023: this above is no longer true for the pure Rust impl
399 399
400 400 def __len__(&self) -> PyResult<usize> {
401 401 self.len(py)
402 402 }
403 403
404 404 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
405 405 let rust_res = self.inner_getitem(py, key.clone_ref(py))?;
406 406 Ok(rust_res)
407 407 }
408 408
409 409 def __contains__(&self, item: PyObject) -> PyResult<bool> {
410 410 // ObjectProtocol does not seem to provide contains(), so
411 411 // this is an equivalent implementation of the index_contains()
412 412 // defined in revlog.c
413 413 match item.extract::<i32>(py) {
414 414 Ok(rev) => {
415 415 Ok(rev >= -1 && rev < self.len(py)? as BaseRevision)
416 416 }
417 417 Err(_) => {
418 418 let item_bytes: PyBytes = item.extract(py)?;
419 419 let rust_res = self.has_node(py, item_bytes)?;
420 420 Ok(rust_res)
421 421 }
422 422 }
423 423 }
424 424
425 425 def nodemap_data_all(&self) -> PyResult<PyBytes> {
426 426 self.inner_nodemap_data_all(py)
427 427 }
428 428
429 429 def nodemap_data_incremental(&self) -> PyResult<PyObject> {
430 430 self.inner_nodemap_data_incremental(py)
431 431 }
432 432 def update_nodemap_data(
433 433 &self,
434 434 docket: PyObject,
435 435 nm_data: PyObject
436 436 ) -> PyResult<PyObject> {
437 437 self.inner_update_nodemap_data(py, docket, nm_data)
438 438 }
439 439
440 440 @property
441 441 def entry_size(&self) -> PyResult<PyInt> {
442 442 let rust_res: PyInt = INDEX_ENTRY_SIZE.to_py_object(py);
443 443 Ok(rust_res)
444 444 }
445 445
446 446 @property
447 447 def rust_ext_compat(&self) -> PyResult<PyInt> {
448 448 // will be entirely removed when the Rust index yet useful to
449 449 // implement in Rust to detangle things when removing `self.cindex`
450 450 let rust_res: PyInt = 1.to_py_object(py);
451 451 Ok(rust_res)
452 452 }
453 453
454 454 @property
455 455 def is_rust(&self) -> PyResult<PyBool> {
456 456 Ok(false.to_py_object(py))
457 457 }
458 458
459 459 });
460 460
461 461 /// Take a (potentially) mmap'ed buffer, and return the underlying Python
462 462 /// buffer along with the Rust slice into said buffer. We need to keep the
463 463 /// Python buffer around, otherwise we'd get a dangling pointer once the buffer
464 464 /// is freed from Python's side.
465 465 ///
466 466 /// # Safety
467 467 ///
468 468 /// The caller must make sure that the buffer is kept around for at least as
469 469 /// long as the slice.
470 470 #[deny(unsafe_op_in_unsafe_fn)]
471 471 unsafe fn mmap_keeparound(
472 472 py: Python,
473 473 data: PyObject,
474 474 ) -> PyResult<(
475 475 PyBuffer,
476 476 Box<dyn std::ops::Deref<Target = [u8]> + Send + Sync + 'static>,
477 477 )> {
478 478 let buf = PyBuffer::get(py, &data)?;
479 479 let len = buf.item_count();
480 480
481 481 // Build a slice from the mmap'ed buffer data
482 482 let cbuf = buf.buf_ptr();
483 483 let bytes = if std::mem::size_of::<u8>() == buf.item_size()
484 484 && buf.is_c_contiguous()
485 485 && u8::is_compatible_format(buf.format())
486 486 {
487 487 unsafe { std::slice::from_raw_parts(cbuf as *const u8, len) }
488 488 } else {
489 489 return Err(PyErr::new::<ValueError, _>(
490 490 py,
491 491 "Nodemap data buffer has an invalid memory representation"
492 492 .to_string(),
493 493 ));
494 494 };
495 495
496 496 Ok((buf, Box::new(bytes)))
497 497 }
498 498
499 499 fn py_tuple_to_revision_data_params(
500 500 py: Python,
501 501 tuple: PyTuple,
502 502 ) -> PyResult<RevisionDataParams> {
503 503 if tuple.len(py) < 8 {
504 504 // this is better than the panic promised by tup.get_item()
505 505 return Err(PyErr::new::<IndexError, _>(
506 506 py,
507 507 "tuple index out of range",
508 508 ));
509 509 }
510 510 let offset_or_flags: u64 = tuple.get_item(py, 0).extract(py)?;
511 511 let node_id = tuple
512 512 .get_item(py, 7)
513 513 .extract::<PyBytes>(py)?
514 514 .data(py)
515 515 .try_into()
516 516 .unwrap();
517 517 let flags = (offset_or_flags & 0xFFFF) as u16;
518 518 let data_offset = offset_or_flags >> 16;
519 519 Ok(RevisionDataParams {
520 520 flags,
521 521 data_offset,
522 522 data_compressed_length: tuple.get_item(py, 1).extract(py)?,
523 523 data_uncompressed_length: tuple.get_item(py, 2).extract(py)?,
524 524 data_delta_base: tuple.get_item(py, 3).extract(py)?,
525 525 link_rev: tuple.get_item(py, 4).extract(py)?,
526 526 parent_rev_1: tuple.get_item(py, 5).extract(py)?,
527 527 parent_rev_2: tuple.get_item(py, 6).extract(py)?,
528 528 node_id,
529 529 ..Default::default()
530 530 })
531 531 }
532 532 fn revision_data_params_to_py_tuple(
533 533 py: Python,
534 534 params: RevisionDataParams,
535 535 ) -> PyTuple {
536 536 PyTuple::new(
537 537 py,
538 538 &[
539 539 params.data_offset.into_py_object(py).into_object(),
540 540 params
541 541 .data_compressed_length
542 542 .into_py_object(py)
543 543 .into_object(),
544 544 params
545 545 .data_uncompressed_length
546 546 .into_py_object(py)
547 547 .into_object(),
548 548 params.data_delta_base.into_py_object(py).into_object(),
549 549 params.link_rev.into_py_object(py).into_object(),
550 550 params.parent_rev_1.into_py_object(py).into_object(),
551 551 params.parent_rev_2.into_py_object(py).into_object(),
552 552 PyBytes::new(py, &params.node_id)
553 553 .into_py_object(py)
554 554 .into_object(),
555 555 params._sidedata_offset.into_py_object(py).into_object(),
556 556 params
557 557 ._sidedata_compressed_length
558 558 .into_py_object(py)
559 559 .into_object(),
560 560 params
561 561 .data_compression_mode
562 562 .into_py_object(py)
563 563 .into_object(),
564 564 params
565 565 ._sidedata_compression_mode
566 566 .into_py_object(py)
567 567 .into_object(),
568 568 params._rank.into_py_object(py).into_object(),
569 569 ],
570 570 )
571 571 }
572 572
573 573 struct PySnapshotsCache<'p> {
574 574 py: Python<'p>,
575 575 dict: PyDict,
576 576 }
577 577
578 578 impl<'p> SnapshotsCache for PySnapshotsCache<'p> {
579 579 fn insert_for(
580 580 &mut self,
581 581 rev: BaseRevision,
582 582 value: BaseRevision,
583 583 ) -> Result<(), RevlogError> {
584 584 let pyvalue = value.into_py_object(self.py).into_object();
585 585 match self.dict.get_item(self.py, rev) {
586 586 Some(obj) => obj
587 587 .extract::<PySet>(self.py)
588 588 .and_then(|set| set.add(self.py, pyvalue)),
589 589 None => PySet::new(self.py, vec![pyvalue])
590 590 .and_then(|set| self.dict.set_item(self.py, rev, set)),
591 591 }
592 592 .map_err(|_| {
593 593 RevlogError::Other(HgError::unsupported(
594 594 "Error in Python caches handling",
595 595 ))
596 596 })
597 597 }
598 598 }
599 599
600 600 impl MixedIndex {
601 601 fn new(
602 602 py: Python,
603 603 cindex: PyObject,
604 604 data: PyObject,
605 605 header: u32,
606 606 ) -> PyResult<MixedIndex> {
607 607 // Safety: we keep the buffer around inside the class as `index_mmap`
608 608 let (buf, bytes) = unsafe { mmap_keeparound(py, data)? };
609 609
610 610 Self::create_instance(
611 611 py,
612 612 RefCell::new(cindex::Index::new(py, cindex)?),
613 613 hg::index::Index::new(
614 614 bytes,
615 615 IndexHeader::parse(&header.to_be_bytes())
616 616 .expect("default header is broken")
617 617 .unwrap(),
618 618 )
619 619 .map_err(|e| {
620 620 revlog_error_with_msg(py, e.to_string().as_bytes())
621 621 })?,
622 622 RefCell::new(None),
623 623 RefCell::new(None),
624 624 RefCell::new(None),
625 625 RefCell::new(Some(buf)),
626 626 )
627 627 }
628 628
629 629 fn len(&self, py: Python) -> PyResult<usize> {
630 630 let rust_index_len = self.index(py).borrow().len();
631 631 Ok(rust_index_len)
632 632 }
633 633
634 634 /// This is scaffolding at this point, but it could also become
635 635 /// a way to start a persistent nodemap or perform a
636 636 /// vacuum / repack operation
637 637 fn fill_nodemap(
638 638 &self,
639 639 py: Python,
640 640 nt: &mut CoreNodeTree,
641 641 ) -> PyResult<PyObject> {
642 642 let index = self.index(py).borrow();
643 643 for r in 0..self.len(py)? {
644 644 let rev = Revision(r as BaseRevision);
645 645 // in this case node() won't ever return None
646 646 nt.insert(&*index, index.node(rev).unwrap(), rev)
647 647 .map_err(|e| nodemap_error(py, e))?
648 648 }
649 649 Ok(py.None())
650 650 }
651 651
652 652 fn get_nodetree<'a>(
653 653 &'a self,
654 654 py: Python<'a>,
655 655 ) -> PyResult<&'a RefCell<Option<CoreNodeTree>>> {
656 656 if self.nt(py).borrow().is_none() {
657 657 let readonly = Box::<Vec<_>>::default();
658 658 let mut nt = CoreNodeTree::load_bytes(readonly, 0);
659 659 self.fill_nodemap(py, &mut nt)?;
660 660 self.nt(py).borrow_mut().replace(nt);
661 661 }
662 662 Ok(self.nt(py))
663 663 }
664 664
665 665 pub fn clone_cindex(&self, py: Python) -> cindex::Index {
666 666 self.cindex(py).borrow().clone_ref(py)
667 667 }
668 668
669 669 /// Returns the full nodemap bytes to be written as-is to disk
670 670 fn inner_nodemap_data_all(&self, py: Python) -> PyResult<PyBytes> {
671 671 let nodemap = self.get_nodetree(py)?.borrow_mut().take().unwrap();
672 672 let (readonly, bytes) = nodemap.into_readonly_and_added_bytes();
673 673
674 674 // If there's anything readonly, we need to build the data again from
675 675 // scratch
676 676 let bytes = if readonly.len() > 0 {
677 677 let mut nt = CoreNodeTree::load_bytes(Box::<Vec<_>>::default(), 0);
678 678 self.fill_nodemap(py, &mut nt)?;
679 679
680 680 let (readonly, bytes) = nt.into_readonly_and_added_bytes();
681 681 assert_eq!(readonly.len(), 0);
682 682
683 683 bytes
684 684 } else {
685 685 bytes
686 686 };
687 687
688 688 let bytes = PyBytes::new(py, &bytes);
689 689 Ok(bytes)
690 690 }
691 691
692 692 /// Returns the last saved docket along with the size of any changed data
693 693 /// (in number of blocks), and said data as bytes.
694 694 fn inner_nodemap_data_incremental(
695 695 &self,
696 696 py: Python,
697 697 ) -> PyResult<PyObject> {
698 698 let docket = self.docket(py).borrow();
699 699 let docket = match docket.as_ref() {
700 700 Some(d) => d,
701 701 None => return Ok(py.None()),
702 702 };
703 703
704 704 let node_tree = self.get_nodetree(py)?.borrow_mut().take().unwrap();
705 705 let masked_blocks = node_tree.masked_readonly_blocks();
706 706 let (_, data) = node_tree.into_readonly_and_added_bytes();
707 707 let changed = masked_blocks * std::mem::size_of::<Block>();
708 708
709 709 Ok((docket, changed, PyBytes::new(py, &data))
710 710 .to_py_object(py)
711 711 .into_object())
712 712 }
713 713
714 714 /// Update the nodemap from the new (mmaped) data.
715 715 /// The docket is kept as a reference for later incremental calls.
716 716 fn inner_update_nodemap_data(
717 717 &self,
718 718 py: Python,
719 719 docket: PyObject,
720 720 nm_data: PyObject,
721 721 ) -> PyResult<PyObject> {
722 722 // Safety: we keep the buffer around inside the class as `nodemap_mmap`
723 723 let (buf, bytes) = unsafe { mmap_keeparound(py, nm_data)? };
724 724 let len = buf.item_count();
725 725 self.nodemap_mmap(py).borrow_mut().replace(buf);
726 726
727 727 let mut nt = CoreNodeTree::load_bytes(bytes, len);
728 728
729 729 let data_tip = docket
730 730 .getattr(py, "tip_rev")?
731 731 .extract::<BaseRevision>(py)?
732 732 .into();
733 733 self.docket(py).borrow_mut().replace(docket.clone_ref(py));
734 734 let idx = self.index(py).borrow();
735 735 let data_tip = idx.check_revision(data_tip).ok_or_else(|| {
736 736 nodemap_error(py, NodeMapError::RevisionNotInIndex(data_tip))
737 737 })?;
738 738 let current_tip = idx.len();
739 739
740 740 for r in (data_tip.0 + 1)..current_tip as BaseRevision {
741 741 let rev = Revision(r);
742 742 // in this case node() won't ever return None
743 743 nt.insert(&*idx, idx.node(rev).unwrap(), rev)
744 744 .map_err(|e| nodemap_error(py, e))?
745 745 }
746 746
747 747 *self.nt(py).borrow_mut() = Some(nt);
748 748
749 749 Ok(py.None())
750 750 }
751 751
752 752 fn inner_getitem(&self, py: Python, key: PyObject) -> PyResult<PyObject> {
753 753 let idx = self.index(py).borrow();
754 754 Ok(match key.extract::<BaseRevision>(py) {
755 755 Ok(key_as_int) => {
756 756 let entry_params = if key_as_int == NULL_REVISION.0 {
757 757 RevisionDataParams::default()
758 758 } else {
759 759 let rev = UncheckedRevision(key_as_int);
760 760 match idx.entry_as_params(rev) {
761 761 Some(e) => e,
762 762 None => {
763 763 return Err(PyErr::new::<IndexError, _>(
764 764 py,
765 765 "revlog index out of range",
766 766 ));
767 767 }
768 768 }
769 769 };
770 770 revision_data_params_to_py_tuple(py, entry_params)
771 771 .into_object()
772 772 }
773 773 _ => self.get_rev(py, key.extract::<PyBytes>(py)?)?.map_or_else(
774 774 || py.None(),
775 775 |py_rev| py_rev.into_py_object(py).into_object(),
776 776 ),
777 777 })
778 778 }
779 779
780 780 fn inner_headrevs(&self, py: Python) -> PyResult<PyObject> {
781 781 let index = &*self.index(py).borrow();
782 782 let as_vec: Vec<PyObject> = index
783 783 .head_revs()
784 784 .map_err(|e| graph_error(py, e))?
785 785 .iter()
786 786 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
787 787 .collect();
788 788 Ok(PyList::new(py, &as_vec).into_object())
789 789 }
790 790
791 791 fn inner_headrevsfiltered(
792 792 &self,
793 793 py: Python,
794 794 filtered_revs: &PyObject,
795 795 ) -> PyResult<PyObject> {
796 796 let index = &mut *self.index(py).borrow_mut();
797 797 let filtered_revs = rev_pyiter_collect(py, filtered_revs, index)?;
798 798
799 799 let as_vec: Vec<PyObject> = index
800 800 .head_revs_filtered(&filtered_revs)
801 801 .map_err(|e| graph_error(py, e))?
802 802 .iter()
803 803 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
804 804 .collect();
805 805 Ok(PyList::new(py, &as_vec).into_object())
806 806 }
807 807
808 808 fn inner_ancestors(
809 809 &self,
810 810 py: Python,
811 811 py_revs: &PyTuple,
812 812 ) -> PyResult<PyObject> {
813 813 let index = &*self.index(py).borrow();
814 814 let revs: Vec<_> = rev_pyiter_collect(py, py_revs.as_object(), index)?;
815 815 let as_vec: Vec<_> = index
816 816 .ancestors(&revs)
817 817 .map_err(|e| graph_error(py, e))?
818 818 .iter()
819 819 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
820 820 .collect();
821 821 Ok(PyList::new(py, &as_vec).into_object())
822 822 }
823 823
824 824 fn inner_commonancestorsheads(
825 825 &self,
826 826 py: Python,
827 827 py_revs: &PyTuple,
828 828 ) -> PyResult<PyObject> {
829 829 let index = &*self.index(py).borrow();
830 830 let revs: Vec<_> = rev_pyiter_collect(py, py_revs.as_object(), index)?;
831 831 let as_vec: Vec<_> = index
832 832 .common_ancestor_heads(&revs)
833 833 .map_err(|e| graph_error(py, e))?
834 834 .iter()
835 835 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
836 836 .collect();
837 837 Ok(PyList::new(py, &as_vec).into_object())
838 838 }
839 839
840 840 fn inner_computephasesmapsets(
841 841 &self,
842 842 py: Python,
843 843 py_roots: PyDict,
844 844 ) -> PyResult<PyObject> {
845 845 let index = &*self.index(py).borrow();
846 846 let opt = self.get_nodetree(py)?.borrow();
847 847 let nt = opt.as_ref().unwrap();
848 848 let roots: Result<HashMap<Phase, Vec<Revision>>, PyErr> = py_roots
849 849 .items_list(py)
850 850 .iter(py)
851 851 .map(|r| {
852 852 let phase = r.get_item(py, 0)?;
853 853 let nodes = r.get_item(py, 1)?;
854 854 // Transform the nodes from Python to revs here since we
855 855 // have access to the nodemap
856 856 let revs: Result<_, _> = nodes
857 857 .iter(py)?
858 858 .map(|node| match node?.extract::<PyBytes>(py) {
859 859 Ok(py_bytes) => {
860 860 let node = node_from_py_bytes(py, &py_bytes)?;
861 861 nt.find_bin(index, node.into())
862 862 .map_err(|e| nodemap_error(py, e))?
863 863 .ok_or_else(|| revlog_error(py))
864 864 }
865 865 Err(e) => Err(e),
866 866 })
867 867 .collect();
868 868 let phase = Phase::try_from(phase.extract::<usize>(py)?)
869 869 .map_err(|_| revlog_error(py));
870 870 Ok((phase?, revs?))
871 871 })
872 872 .collect();
873 873 let (len, phase_maps) = index
874 874 .compute_phases_map_sets(roots?)
875 875 .map_err(|e| graph_error(py, e))?;
876 876
877 877 // Ugly hack, but temporary
878 878 const IDX_TO_PHASE_NUM: [usize; 4] = [1, 2, 32, 96];
879 879 let py_phase_maps = PyDict::new(py);
880 880 for (idx, roots) in phase_maps.iter().enumerate() {
881 881 let phase_num = IDX_TO_PHASE_NUM[idx].into_py_object(py);
882 882 // OPTIM too bad we have to collect here. At least, we could
883 883 // reuse the same Vec and allocate it with capacity at
884 884 // max(len(phase_maps)
885 885 let roots_vec: Vec<PyInt> = roots
886 886 .iter()
887 887 .map(|r| PyRevision::from(*r).into_py_object(py))
888 888 .collect();
889 889 py_phase_maps.set_item(
890 890 py,
891 891 phase_num,
892 892 PySet::new(py, roots_vec)?,
893 893 )?;
894 894 }
895 895 Ok(PyTuple::new(
896 896 py,
897 897 &[
898 898 len.into_py_object(py).into_object(),
899 899 py_phase_maps.into_object(),
900 900 ],
901 901 )
902 902 .into_object())
903 903 }
904 904
905 905 fn inner_slicechunktodensity(
906 906 &self,
907 907 py: Python,
908 908 revs: PyObject,
909 909 target_density: f64,
910 910 min_gap_size: usize,
911 911 ) -> PyResult<PyObject> {
912 912 let index = &*self.index(py).borrow();
913 913 let revs: Vec<_> = rev_pyiter_collect(py, &revs, index)?;
914 914 let as_nested_vec =
915 915 index.slice_chunk_to_density(&revs, target_density, min_gap_size);
916 916 let mut res = Vec::with_capacity(as_nested_vec.len());
917 917 let mut py_chunk = Vec::new();
918 918 for chunk in as_nested_vec {
919 919 py_chunk.clear();
920 920 py_chunk.reserve_exact(chunk.len());
921 921 for rev in chunk {
922 922 py_chunk.push(
923 923 PyRevision::from(rev).into_py_object(py).into_object(),
924 924 );
925 925 }
926 926 res.push(PyList::new(py, &py_chunk).into_object());
927 927 }
928 928 // This is just to do the same as C, not sure why it does this
929 929 if res.len() == 1 {
930 930 Ok(PyTuple::new(py, &res).into_object())
931 931 } else {
932 932 Ok(PyList::new(py, &res).into_object())
933 933 }
934 934 }
935 935
936 936 fn inner_reachableroots2(
937 937 &self,
938 938 py: Python,
939 939 min_root: UncheckedRevision,
940 940 heads: PyObject,
941 941 roots: PyObject,
942 942 include_path: bool,
943 943 ) -> PyResult<PyObject> {
944 944 let index = &*self.index(py).borrow();
945 945 let heads = rev_pyiter_collect_or_else(py, &heads, index, |_rev| {
946 946 PyErr::new::<IndexError, _>(py, "head out of range")
947 947 })?;
948 948 let roots: Result<_, _> = roots
949 949 .iter(py)?
950 950 .map(|r| {
951 951 r.and_then(|o| match o.extract::<PyRevision>(py) {
952 952 Ok(r) => Ok(UncheckedRevision(r.0)),
953 953 Err(e) => Err(e),
954 954 })
955 955 })
956 956 .collect();
957 957 let as_set = index
958 958 .reachable_roots(min_root, heads, roots?, include_path)
959 959 .map_err(|e| graph_error(py, e))?;
960 960 let as_vec: Vec<PyObject> = as_set
961 961 .iter()
962 962 .map(|r| PyRevision::from(*r).into_py_object(py).into_object())
963 963 .collect();
964 964 Ok(PyList::new(py, &as_vec).into_object())
965 965 }
966 966 }
967 967
968 py_class!(pub class NodeTree |py| {
969 data nt: RefCell<CoreNodeTree>;
970 data index: RefCell<UnsafePyLeaked<PySharedIndex>>;
971
972 def __new__(_cls, index: PyObject) -> PyResult<NodeTree> {
973 let index = py_rust_index_to_graph(py, index)?;
974 let nt = CoreNodeTree::default(); // in-RAM, fully mutable
975 Self::create_instance(py, RefCell::new(nt), RefCell::new(index))
976 }
977
978 def insert(&self, rev: PyRevision) -> PyResult<PyObject> {
979 let leaked = self.index(py).borrow();
980 let index = &*unsafe { leaked.try_borrow(py)? };
981
982 let rev = UncheckedRevision(rev.0);
983 let rev = index
984 .check_revision(rev)
985 .ok_or_else(|| rev_not_in_index(py, rev))?;
986 if rev == NULL_REVISION {
987 return Err(rev_not_in_index(py, rev.into()))
988 }
989
990 let entry = index.inner.get_entry(rev).unwrap();
991 let mut nt = self.nt(py).borrow_mut();
992 nt.insert(index, entry.hash(), rev).map_err(|e| nodemap_error(py, e))?;
993
994 Ok(py.None())
995 }
996
997 /// Lookup by node hex prefix in the NodeTree, returning revision number.
998 ///
999 /// This is not part of the classical NodeTree API, but is good enough
1000 /// for unit testing, as in `test-rust-revlog.py`.
1001 def prefix_rev_lookup(
1002 &self,
1003 node_prefix: PyBytes
1004 ) -> PyResult<Option<PyRevision>> {
1005 let prefix = NodePrefix::from_hex(node_prefix.data(py))
1006 .map_err(|_| PyErr::new::<ValueError, _>(
1007 py,
1008 format!("Invalid node or prefix {:?}",
1009 node_prefix.as_object()))
1010 )?;
1011
1012 let nt = self.nt(py).borrow();
1013 let leaked = self.index(py).borrow();
1014 let index = &*unsafe { leaked.try_borrow(py)? };
1015
1016 Ok(nt.find_bin(index, prefix)
1017 .map_err(|e| nodemap_error(py, e))?
1018 .map(|r| r.into())
1019 )
1020 }
1021
1022 def shortest(&self, node: PyBytes) -> PyResult<usize> {
1023 let nt = self.nt(py).borrow();
1024 let leaked = self.index(py).borrow();
1025 let idx = &*unsafe { leaked.try_borrow(py)? };
1026 match nt.unique_prefix_len_node(idx, &node_from_py_bytes(py, &node)?)
1027 {
1028 Ok(Some(l)) => Ok(l),
1029 Ok(None) => Err(revlog_error(py)),
1030 Err(e) => Err(nodemap_error(py, e)),
1031 }
1032 }
1033 });
1034
968 1035 fn revlog_error(py: Python) -> PyErr {
969 1036 match py
970 1037 .import("mercurial.error")
971 1038 .and_then(|m| m.get(py, "RevlogError"))
972 1039 {
973 1040 Err(e) => e,
974 1041 Ok(cls) => PyErr::from_instance(
975 1042 py,
976 1043 cls.call(py, (py.None(),), None).ok().into_py_object(py),
977 1044 ),
978 1045 }
979 1046 }
980 1047
981 1048 fn revlog_error_with_msg(py: Python, msg: &[u8]) -> PyErr {
982 1049 match py
983 1050 .import("mercurial.error")
984 1051 .and_then(|m| m.get(py, "RevlogError"))
985 1052 {
986 1053 Err(e) => e,
987 1054 Ok(cls) => PyErr::from_instance(
988 1055 py,
989 1056 cls.call(py, (PyBytes::new(py, msg),), None)
990 1057 .ok()
991 1058 .into_py_object(py),
992 1059 ),
993 1060 }
994 1061 }
995 1062
996 1063 fn graph_error(py: Python, _err: hg::GraphError) -> PyErr {
997 1064 // ParentOutOfRange is currently the only alternative
998 1065 // in `hg::GraphError`. The C index always raises this simple ValueError.
999 1066 PyErr::new::<ValueError, _>(py, "parent out of range")
1000 1067 }
1001 1068
1002 1069 fn nodemap_rev_not_in_index(py: Python, rev: UncheckedRevision) -> PyErr {
1003 1070 PyErr::new::<ValueError, _>(
1004 1071 py,
1005 1072 format!(
1006 1073 "Inconsistency: Revision {} found in nodemap \
1007 1074 is not in revlog index",
1008 1075 rev
1009 1076 ),
1010 1077 )
1011 1078 }
1012 1079
1013 1080 fn rev_not_in_index(py: Python, rev: UncheckedRevision) -> PyErr {
1014 1081 PyErr::new::<ValueError, _>(
1015 1082 py,
1016 1083 format!("revlog index out of range: {}", rev),
1017 1084 )
1018 1085 }
1019 1086
1020 1087 /// Standard treatment of NodeMapError
1021 1088 fn nodemap_error(py: Python, err: NodeMapError) -> PyErr {
1022 1089 match err {
1023 1090 NodeMapError::MultipleResults => revlog_error(py),
1024 1091 NodeMapError::RevisionNotInIndex(r) => nodemap_rev_not_in_index(py, r),
1025 1092 }
1026 1093 }
1027 1094
1028 1095 /// Create the module, with __package__ given from parent
1029 1096 pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> {
1030 1097 let dotted_name = &format!("{}.revlog", package);
1031 1098 let m = PyModule::new(py, dotted_name)?;
1032 1099 m.add(py, "__package__", package)?;
1033 1100 m.add(py, "__doc__", "RevLog - Rust implementations")?;
1034 1101
1035 1102 m.add_class::<MixedIndex>(py)?;
1103 m.add_class::<NodeTree>(py)?;
1036 1104
1037 1105 let sys = PyModule::import(py, "sys")?;
1038 1106 let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?;
1039 1107 sys_modules.set_item(py, dotted_name, &m)?;
1040 1108
1041 1109 Ok(m)
1042 1110 }
@@ -1,63 +1,94 b''
1 1 import struct
2 2 import unittest
3 3
4 from mercurial.node import hex
5
4 6 try:
5 7 from mercurial import rustext
6 8
7 9 rustext.__name__ # trigger immediate actual import
8 10 except ImportError:
9 11 rustext = None
10 12 else:
11 13 from mercurial.rustext import revlog
12 14
13 15 # this would fail already without appropriate ancestor.__package__
14 16 from mercurial.rustext.ancestor import LazyAncestors
15 17
16 18 from mercurial.testing import revlog as revlogtesting
17 19
18 20 header = struct.unpack(">I", revlogtesting.data_non_inlined[:4])[0]
19 21
20 22
21 23 @unittest.skipIf(
22 24 rustext is None,
23 25 "rustext module revlog relies on is not available",
24 26 )
25 27 class RustRevlogIndexTest(revlogtesting.RevlogBasedTestBase):
26 28 def test_heads(self):
27 29 idx = self.parseindex()
28 30 rustidx = revlog.MixedIndex(idx, revlogtesting.data_non_inlined, header)
29 31 self.assertEqual(rustidx.headrevs(), idx.headrevs())
30 32
31 33 def test_get_cindex(self):
32 34 # drop me once we no longer need the method for shortest node
33 35 idx = self.parseindex()
34 36 rustidx = revlog.MixedIndex(idx, revlogtesting.data_non_inlined, header)
35 37 cidx = rustidx.get_cindex()
36 38 self.assertTrue(idx is cidx)
37 39
38 40 def test_len(self):
39 41 idx = self.parseindex()
40 42 rustidx = revlog.MixedIndex(idx, revlogtesting.data_non_inlined, header)
41 43 self.assertEqual(len(rustidx), len(idx))
42 44
43 45 def test_ancestors(self):
44 46 idx = self.parseindex()
45 47 rustidx = revlog.MixedIndex(idx, revlogtesting.data_non_inlined, header)
46 48 lazy = LazyAncestors(rustidx, [3], 0, True)
47 49 # we have two more references to the index:
48 50 # - in its inner iterator for __contains__ and __bool__
49 51 # - in the LazyAncestors instance itself (to spawn new iterators)
50 52 self.assertTrue(2 in lazy)
51 53 self.assertTrue(bool(lazy))
52 54 self.assertEqual(list(lazy), [3, 2, 1, 0])
53 55 # a second time to validate that we spawn new iterators
54 56 self.assertEqual(list(lazy), [3, 2, 1, 0])
55 57
56 58 # let's check bool for an empty one
57 59 self.assertFalse(LazyAncestors(rustidx, [0], 0, False))
58 60
59 61
62 @unittest.skipIf(
63 rustext is None,
64 "rustext module revlog relies on is not available",
65 )
66 class RustRevlogNodeTreeClassTest(revlogtesting.RustRevlogBasedTestBase):
67 def test_standalone_nodetree(self):
68 idx = self.parserustindex()
69 nt = revlog.NodeTree(idx)
70 for i in range(4):
71 nt.insert(i)
72
73 bin_nodes = [entry[7] for entry in idx]
74 hex_nodes = [hex(n) for n in bin_nodes]
75
76 for i, node in enumerate(hex_nodes):
77 self.assertEqual(nt.prefix_rev_lookup(node), i)
78 self.assertEqual(nt.prefix_rev_lookup(node[:5]), i)
79
80 # all 4 revisions in idx (standard data set) have different
81 # first nybbles in their Node IDs,
82 # hence `nt.shortest()` should return 1 for them, except when
83 # the leading nybble is 0 (ambiguity with NULL_NODE)
84 for i, (bin_node, hex_node) in enumerate(zip(bin_nodes, hex_nodes)):
85 shortest = nt.shortest(bin_node)
86 expected = 2 if hex_node[0] == ord('0') else 1
87 self.assertEqual(shortest, expected)
88 self.assertEqual(nt.prefix_rev_lookup(hex_node[:shortest]), i)
89
90
60 91 if __name__ == '__main__':
61 92 import silenttestrunner
62 93
63 94 silenttestrunner.main(__name__)
General Comments 0
You need to be logged in to leave comments. Login now