##// END OF EJS Templates
rust: look up HgRevlogIndex_GetParents() from symbol table...
Yuya Nishihara -
r40897:54a60968 default
parent child Browse files
Show More
@@ -1,2901 +1,2889 b''
1 1 /*
2 2 parsers.c - efficient content parsing
3 3
4 4 Copyright 2008 Matt Mackall <mpm@selenic.com> and others
5 5
6 6 This software may be used and distributed according to the terms of
7 7 the GNU General Public License, incorporated herein by reference.
8 8 */
9 9
10 10 #include <Python.h>
11 11 #include <assert.h>
12 12 #include <ctype.h>
13 13 #include <limits.h>
14 14 #include <stddef.h>
15 15 #include <stdlib.h>
16 16 #include <string.h>
17 17
18 18 #include "bitmanipulation.h"
19 19 #include "charencode.h"
20 20 #include "revlog.h"
21 21 #include "util.h"
22 22
23 23 #ifdef IS_PY3K
24 24 /* The mapping of Python types is meant to be temporary to get Python
25 25 * 3 to compile. We should remove this once Python 3 support is fully
26 26 * supported and proper types are used in the extensions themselves. */
27 27 #define PyInt_Check PyLong_Check
28 28 #define PyInt_FromLong PyLong_FromLong
29 29 #define PyInt_FromSsize_t PyLong_FromSsize_t
30 30 #define PyInt_AsLong PyLong_AsLong
31 31 #endif
32 32
33 33 typedef struct indexObjectStruct indexObject;
34 34
35 35 typedef struct {
36 36 int children[16];
37 37 } nodetreenode;
38 38
39 39 /*
40 40 * A base-16 trie for fast node->rev mapping.
41 41 *
42 42 * Positive value is index of the next node in the trie
43 43 * Negative value is a leaf: -(rev + 2)
44 44 * Zero is empty
45 45 */
46 46 typedef struct {
47 47 indexObject *index;
48 48 nodetreenode *nodes;
49 49 unsigned length; /* # nodes in use */
50 50 unsigned capacity; /* # nodes allocated */
51 51 int depth; /* maximum depth of tree */
52 52 int splits; /* # splits performed */
53 53 } nodetree;
54 54
55 55 typedef struct {
56 56 PyObject_HEAD /* ; */
57 57 nodetree nt;
58 58 } nodetreeObject;
59 59
60 60 /*
61 61 * This class has two behaviors.
62 62 *
63 63 * When used in a list-like way (with integer keys), we decode an
64 64 * entry in a RevlogNG index file on demand. Our last entry is a
65 65 * sentinel, always a nullid. We have limited support for
66 66 * integer-keyed insert and delete, only at elements right before the
67 67 * sentinel.
68 68 *
69 69 * With string keys, we lazily perform a reverse mapping from node to
70 70 * rev, using a base-16 trie.
71 71 */
72 72 struct indexObjectStruct {
73 73 PyObject_HEAD
74 74 /* Type-specific fields go here. */
75 75 PyObject *data; /* raw bytes of index */
76 76 Py_buffer buf; /* buffer of data */
77 77 PyObject **cache; /* cached tuples */
78 78 const char **offsets; /* populated on demand */
79 79 Py_ssize_t raw_length; /* original number of elements */
80 80 Py_ssize_t length; /* current number of elements */
81 81 PyObject *added; /* populated on demand */
82 82 PyObject *headrevs; /* cache, invalidated on changes */
83 83 PyObject *filteredrevs; /* filtered revs set */
84 84 nodetree nt; /* base-16 trie */
85 85 int ntinitialized; /* 0 or 1 */
86 86 int ntrev; /* last rev scanned */
87 87 int ntlookups; /* # lookups */
88 88 int ntmisses; /* # lookups that miss the cache */
89 89 int inlined;
90 90 };
91 91
92 92 static Py_ssize_t index_length(const indexObject *self)
93 93 {
94 94 if (self->added == NULL)
95 95 return self->length;
96 96 return self->length + PyList_GET_SIZE(self->added);
97 97 }
98 98
99 99 static PyObject *nullentry = NULL;
100 100 static const char nullid[20] = {0};
101 101
102 102 static Py_ssize_t inline_scan(indexObject *self, const char **offsets);
103 103
104 104 #if LONG_MAX == 0x7fffffffL
105 105 static const char *const tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
106 106 #else
107 107 static const char *const tuple_format = PY23("kiiiiiis#", "kiiiiiiy#");
108 108 #endif
109 109
110 110 /* A RevlogNG v1 index entry is 64 bytes long. */
111 111 static const long v1_hdrsize = 64;
112 112
113 113 static void raise_revlog_error(void)
114 114 {
115 115 PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
116 116
117 117 mod = PyImport_ImportModule("mercurial.error");
118 118 if (mod == NULL) {
119 119 goto cleanup;
120 120 }
121 121
122 122 dict = PyModule_GetDict(mod);
123 123 if (dict == NULL) {
124 124 goto cleanup;
125 125 }
126 126 Py_INCREF(dict);
127 127
128 128 errclass = PyDict_GetItemString(dict, "RevlogError");
129 129 if (errclass == NULL) {
130 130 PyErr_SetString(PyExc_SystemError,
131 131 "could not find RevlogError");
132 132 goto cleanup;
133 133 }
134 134
135 135 /* value of exception is ignored by callers */
136 136 PyErr_SetString(errclass, "RevlogError");
137 137
138 138 cleanup:
139 139 Py_XDECREF(dict);
140 140 Py_XDECREF(mod);
141 141 }
142 142
143 143 /*
144 144 * Return a pointer to the beginning of a RevlogNG record.
145 145 */
146 146 static const char *index_deref(indexObject *self, Py_ssize_t pos)
147 147 {
148 148 if (self->inlined && pos > 0) {
149 149 if (self->offsets == NULL) {
150 150 self->offsets = PyMem_Malloc(self->raw_length *
151 151 sizeof(*self->offsets));
152 152 if (self->offsets == NULL)
153 153 return (const char *)PyErr_NoMemory();
154 154 inline_scan(self, self->offsets);
155 155 }
156 156 return self->offsets[pos];
157 157 }
158 158
159 159 return (const char *)(self->buf.buf) + pos * v1_hdrsize;
160 160 }
161 161
162 162 /*
163 163 * Get parents of the given rev.
164 164 *
165 165 * The specified rev must be valid and must not be nullrev. A returned
166 166 * parent revision may be nullrev, but is guaranteed to be in valid range.
167 167 */
168 168 static inline int index_get_parents(indexObject *self, Py_ssize_t rev, int *ps,
169 169 int maxrev)
170 170 {
171 171 if (rev >= self->length) {
172 172 long tmp;
173 173 PyObject *tuple =
174 174 PyList_GET_ITEM(self->added, rev - self->length);
175 175 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 5), &tmp)) {
176 176 return -1;
177 177 }
178 178 ps[0] = (int)tmp;
179 179 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 6), &tmp)) {
180 180 return -1;
181 181 }
182 182 ps[1] = (int)tmp;
183 183 } else {
184 184 const char *data = index_deref(self, rev);
185 185 ps[0] = getbe32(data + 24);
186 186 ps[1] = getbe32(data + 28);
187 187 }
188 188 /* If index file is corrupted, ps[] may point to invalid revisions. So
189 189 * there is a risk of buffer overflow to trust them unconditionally. */
190 190 if (ps[0] < -1 || ps[0] > maxrev || ps[1] < -1 || ps[1] > maxrev) {
191 191 PyErr_SetString(PyExc_ValueError, "parent out of range");
192 192 return -1;
193 193 }
194 194 return 0;
195 195 }
196 196
197 197 /*
198 198 * Get parents of the given rev.
199 199 *
200 200 * If the specified rev is out of range, IndexError will be raised. If the
201 201 * revlog entry is corrupted, ValueError may be raised.
202 202 *
203 203 * Returns 0 on success or -1 on failure.
204 204 */
205 205 int HgRevlogIndex_GetParents(PyObject *op, int rev, int *ps)
206 206 {
207 207 int tiprev;
208 208 if (!op || !HgRevlogIndex_Check(op) || !ps) {
209 209 PyErr_BadInternalCall();
210 210 return -1;
211 211 }
212 212 tiprev = (int)index_length((indexObject *)op) - 1;
213 213 if (rev < -1 || rev > tiprev) {
214 214 PyErr_Format(PyExc_IndexError, "rev out of range: %d", rev);
215 215 return -1;
216 216 } else if (rev == -1) {
217 217 ps[0] = ps[1] = -1;
218 218 return 0;
219 219 } else {
220 220 return index_get_parents((indexObject *)op, rev, ps, tiprev);
221 221 }
222 222 }
223 223
224 224 static inline int64_t index_get_start(indexObject *self, Py_ssize_t rev)
225 225 {
226 226 uint64_t offset;
227 227 if (rev >= self->length) {
228 228 PyObject *tuple;
229 229 PyObject *pylong;
230 230 PY_LONG_LONG tmp;
231 231 tuple = PyList_GET_ITEM(self->added, rev - self->length);
232 232 pylong = PyTuple_GET_ITEM(tuple, 0);
233 233 tmp = PyLong_AsLongLong(pylong);
234 234 if (tmp == -1 && PyErr_Occurred()) {
235 235 return -1;
236 236 }
237 237 if (tmp < 0) {
238 238 PyErr_Format(PyExc_OverflowError,
239 239 "revlog entry size out of bound (%lld)",
240 240 (long long)tmp);
241 241 return -1;
242 242 }
243 243 offset = (uint64_t)tmp;
244 244 } else {
245 245 const char *data = index_deref(self, rev);
246 246 offset = getbe32(data + 4);
247 247 if (rev == 0) {
248 248 /* mask out version number for the first entry */
249 249 offset &= 0xFFFF;
250 250 } else {
251 251 uint32_t offset_high = getbe32(data);
252 252 offset |= ((uint64_t)offset_high) << 32;
253 253 }
254 254 }
255 255 return (int64_t)(offset >> 16);
256 256 }
257 257
258 258 static inline int index_get_length(indexObject *self, Py_ssize_t rev)
259 259 {
260 260 if (rev >= self->length) {
261 261 PyObject *tuple;
262 262 PyObject *pylong;
263 263 long ret;
264 264 tuple = PyList_GET_ITEM(self->added, rev - self->length);
265 265 pylong = PyTuple_GET_ITEM(tuple, 1);
266 266 ret = PyInt_AsLong(pylong);
267 267 if (ret == -1 && PyErr_Occurred()) {
268 268 return -1;
269 269 }
270 270 if (ret < 0 || ret > (long)INT_MAX) {
271 271 PyErr_Format(PyExc_OverflowError,
272 272 "revlog entry size out of bound (%ld)",
273 273 ret);
274 274 return -1;
275 275 }
276 276 return (int)ret;
277 277 } else {
278 278 const char *data = index_deref(self, rev);
279 279 int tmp = (int)getbe32(data + 8);
280 280 if (tmp < 0) {
281 281 PyErr_Format(PyExc_OverflowError,
282 282 "revlog entry size out of bound (%d)",
283 283 tmp);
284 284 return -1;
285 285 }
286 286 return tmp;
287 287 }
288 288 }
289 289
290 290 /*
291 291 * RevlogNG format (all in big endian, data may be inlined):
292 292 * 6 bytes: offset
293 293 * 2 bytes: flags
294 294 * 4 bytes: compressed length
295 295 * 4 bytes: uncompressed length
296 296 * 4 bytes: base revision
297 297 * 4 bytes: link revision
298 298 * 4 bytes: parent 1 revision
299 299 * 4 bytes: parent 2 revision
300 300 * 32 bytes: nodeid (only 20 bytes used)
301 301 */
302 302 static PyObject *index_get(indexObject *self, Py_ssize_t pos)
303 303 {
304 304 uint64_t offset_flags;
305 305 int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
306 306 const char *c_node_id;
307 307 const char *data;
308 308 Py_ssize_t length = index_length(self);
309 309 PyObject *entry;
310 310
311 311 if (pos == -1) {
312 312 Py_INCREF(nullentry);
313 313 return nullentry;
314 314 }
315 315
316 316 if (pos < 0 || pos >= length) {
317 317 PyErr_SetString(PyExc_IndexError, "revlog index out of range");
318 318 return NULL;
319 319 }
320 320
321 321 if (pos >= self->length) {
322 322 PyObject *obj;
323 323 obj = PyList_GET_ITEM(self->added, pos - self->length);
324 324 Py_INCREF(obj);
325 325 return obj;
326 326 }
327 327
328 328 if (self->cache) {
329 329 if (self->cache[pos]) {
330 330 Py_INCREF(self->cache[pos]);
331 331 return self->cache[pos];
332 332 }
333 333 } else {
334 334 self->cache = calloc(self->raw_length, sizeof(PyObject *));
335 335 if (self->cache == NULL)
336 336 return PyErr_NoMemory();
337 337 }
338 338
339 339 data = index_deref(self, pos);
340 340 if (data == NULL)
341 341 return NULL;
342 342
343 343 offset_flags = getbe32(data + 4);
344 344 if (pos == 0) /* mask out version number for the first entry */
345 345 offset_flags &= 0xFFFF;
346 346 else {
347 347 uint32_t offset_high = getbe32(data);
348 348 offset_flags |= ((uint64_t)offset_high) << 32;
349 349 }
350 350
351 351 comp_len = getbe32(data + 8);
352 352 uncomp_len = getbe32(data + 12);
353 353 base_rev = getbe32(data + 16);
354 354 link_rev = getbe32(data + 20);
355 355 parent_1 = getbe32(data + 24);
356 356 parent_2 = getbe32(data + 28);
357 357 c_node_id = data + 32;
358 358
359 359 entry = Py_BuildValue(tuple_format, offset_flags, comp_len, uncomp_len,
360 360 base_rev, link_rev, parent_1, parent_2, c_node_id,
361 361 20);
362 362
363 363 if (entry) {
364 364 PyObject_GC_UnTrack(entry);
365 365 Py_INCREF(entry);
366 366 }
367 367
368 368 self->cache[pos] = entry;
369 369
370 370 return entry;
371 371 }
372 372
373 373 /*
374 374 * Return the 20-byte SHA of the node corresponding to the given rev.
375 375 */
376 376 static const char *index_node(indexObject *self, Py_ssize_t pos)
377 377 {
378 378 Py_ssize_t length = index_length(self);
379 379 const char *data;
380 380
381 381 if (pos == -1)
382 382 return nullid;
383 383
384 384 if (pos >= length)
385 385 return NULL;
386 386
387 387 if (pos >= self->length) {
388 388 PyObject *tuple, *str;
389 389 tuple = PyList_GET_ITEM(self->added, pos - self->length);
390 390 str = PyTuple_GetItem(tuple, 7);
391 391 return str ? PyBytes_AS_STRING(str) : NULL;
392 392 }
393 393
394 394 data = index_deref(self, pos);
395 395 return data ? data + 32 : NULL;
396 396 }
397 397
398 398 /*
399 399 * Return the 20-byte SHA of the node corresponding to the given rev. The
400 400 * rev is assumed to be existing. If not, an exception is set.
401 401 */
402 402 static const char *index_node_existing(indexObject *self, Py_ssize_t pos)
403 403 {
404 404 const char *node = index_node(self, pos);
405 405 if (node == NULL) {
406 406 PyErr_Format(PyExc_IndexError, "could not access rev %d",
407 407 (int)pos);
408 408 }
409 409 return node;
410 410 }
411 411
412 412 static int nt_insert(nodetree *self, const char *node, int rev);
413 413
414 414 static int node_check(PyObject *obj, char **node)
415 415 {
416 416 Py_ssize_t nodelen;
417 417 if (PyBytes_AsStringAndSize(obj, node, &nodelen) == -1)
418 418 return -1;
419 419 if (nodelen == 20)
420 420 return 0;
421 421 PyErr_SetString(PyExc_ValueError, "20-byte hash required");
422 422 return -1;
423 423 }
424 424
425 425 static PyObject *index_append(indexObject *self, PyObject *obj)
426 426 {
427 427 char *node;
428 428 Py_ssize_t len;
429 429
430 430 if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj) != 8) {
431 431 PyErr_SetString(PyExc_TypeError, "8-tuple required");
432 432 return NULL;
433 433 }
434 434
435 435 if (node_check(PyTuple_GET_ITEM(obj, 7), &node) == -1)
436 436 return NULL;
437 437
438 438 len = index_length(self);
439 439
440 440 if (self->added == NULL) {
441 441 self->added = PyList_New(0);
442 442 if (self->added == NULL)
443 443 return NULL;
444 444 }
445 445
446 446 if (PyList_Append(self->added, obj) == -1)
447 447 return NULL;
448 448
449 449 if (self->ntinitialized)
450 450 nt_insert(&self->nt, node, (int)len);
451 451
452 452 Py_CLEAR(self->headrevs);
453 453 Py_RETURN_NONE;
454 454 }
455 455
456 456 static PyObject *index_stats(indexObject *self)
457 457 {
458 458 PyObject *obj = PyDict_New();
459 459 PyObject *s = NULL;
460 460 PyObject *t = NULL;
461 461
462 462 if (obj == NULL)
463 463 return NULL;
464 464
465 465 #define istat(__n, __d) \
466 466 do { \
467 467 s = PyBytes_FromString(__d); \
468 468 t = PyInt_FromSsize_t(self->__n); \
469 469 if (!s || !t) \
470 470 goto bail; \
471 471 if (PyDict_SetItem(obj, s, t) == -1) \
472 472 goto bail; \
473 473 Py_CLEAR(s); \
474 474 Py_CLEAR(t); \
475 475 } while (0)
476 476
477 477 if (self->added) {
478 478 Py_ssize_t len = PyList_GET_SIZE(self->added);
479 479 s = PyBytes_FromString("index entries added");
480 480 t = PyInt_FromSsize_t(len);
481 481 if (!s || !t)
482 482 goto bail;
483 483 if (PyDict_SetItem(obj, s, t) == -1)
484 484 goto bail;
485 485 Py_CLEAR(s);
486 486 Py_CLEAR(t);
487 487 }
488 488
489 489 if (self->raw_length != self->length)
490 490 istat(raw_length, "revs on disk");
491 491 istat(length, "revs in memory");
492 492 istat(ntlookups, "node trie lookups");
493 493 istat(ntmisses, "node trie misses");
494 494 istat(ntrev, "node trie last rev scanned");
495 495 if (self->ntinitialized) {
496 496 istat(nt.capacity, "node trie capacity");
497 497 istat(nt.depth, "node trie depth");
498 498 istat(nt.length, "node trie count");
499 499 istat(nt.splits, "node trie splits");
500 500 }
501 501
502 502 #undef istat
503 503
504 504 return obj;
505 505
506 506 bail:
507 507 Py_XDECREF(obj);
508 508 Py_XDECREF(s);
509 509 Py_XDECREF(t);
510 510 return NULL;
511 511 }
512 512
513 513 /*
514 514 * When we cache a list, we want to be sure the caller can't mutate
515 515 * the cached copy.
516 516 */
517 517 static PyObject *list_copy(PyObject *list)
518 518 {
519 519 Py_ssize_t len = PyList_GET_SIZE(list);
520 520 PyObject *newlist = PyList_New(len);
521 521 Py_ssize_t i;
522 522
523 523 if (newlist == NULL)
524 524 return NULL;
525 525
526 526 for (i = 0; i < len; i++) {
527 527 PyObject *obj = PyList_GET_ITEM(list, i);
528 528 Py_INCREF(obj);
529 529 PyList_SET_ITEM(newlist, i, obj);
530 530 }
531 531
532 532 return newlist;
533 533 }
534 534
535 535 static int check_filter(PyObject *filter, Py_ssize_t arg)
536 536 {
537 537 if (filter) {
538 538 PyObject *arglist, *result;
539 539 int isfiltered;
540 540
541 541 arglist = Py_BuildValue("(n)", arg);
542 542 if (!arglist) {
543 543 return -1;
544 544 }
545 545
546 546 result = PyEval_CallObject(filter, arglist);
547 547 Py_DECREF(arglist);
548 548 if (!result) {
549 549 return -1;
550 550 }
551 551
552 552 /* PyObject_IsTrue returns 1 if true, 0 if false, -1 if error,
553 553 * same as this function, so we can just return it directly.*/
554 554 isfiltered = PyObject_IsTrue(result);
555 555 Py_DECREF(result);
556 556 return isfiltered;
557 557 } else {
558 558 return 0;
559 559 }
560 560 }
561 561
562 562 static Py_ssize_t add_roots_get_min(indexObject *self, PyObject *list,
563 563 Py_ssize_t marker, char *phases)
564 564 {
565 565 PyObject *iter = NULL;
566 566 PyObject *iter_item = NULL;
567 567 Py_ssize_t min_idx = index_length(self) + 2;
568 568 long iter_item_long;
569 569
570 570 if (PyList_GET_SIZE(list) != 0) {
571 571 iter = PyObject_GetIter(list);
572 572 if (iter == NULL)
573 573 return -2;
574 574 while ((iter_item = PyIter_Next(iter))) {
575 575 if (!pylong_to_long(iter_item, &iter_item_long)) {
576 576 Py_DECREF(iter_item);
577 577 return -2;
578 578 }
579 579 Py_DECREF(iter_item);
580 580 if (iter_item_long < min_idx)
581 581 min_idx = iter_item_long;
582 582 phases[iter_item_long] = (char)marker;
583 583 }
584 584 Py_DECREF(iter);
585 585 }
586 586
587 587 return min_idx;
588 588 }
589 589
590 590 static inline void set_phase_from_parents(char *phases, int parent_1,
591 591 int parent_2, Py_ssize_t i)
592 592 {
593 593 if (parent_1 >= 0 && phases[parent_1] > phases[i])
594 594 phases[i] = phases[parent_1];
595 595 if (parent_2 >= 0 && phases[parent_2] > phases[i])
596 596 phases[i] = phases[parent_2];
597 597 }
598 598
599 599 static PyObject *reachableroots2(indexObject *self, PyObject *args)
600 600 {
601 601
602 602 /* Input */
603 603 long minroot;
604 604 PyObject *includepatharg = NULL;
605 605 int includepath = 0;
606 606 /* heads and roots are lists */
607 607 PyObject *heads = NULL;
608 608 PyObject *roots = NULL;
609 609 PyObject *reachable = NULL;
610 610
611 611 PyObject *val;
612 612 Py_ssize_t len = index_length(self);
613 613 long revnum;
614 614 Py_ssize_t k;
615 615 Py_ssize_t i;
616 616 Py_ssize_t l;
617 617 int r;
618 618 int parents[2];
619 619
620 620 /* Internal data structure:
621 621 * tovisit: array of length len+1 (all revs + nullrev), filled upto
622 622 * lentovisit
623 623 *
624 624 * revstates: array of length len+1 (all revs + nullrev) */
625 625 int *tovisit = NULL;
626 626 long lentovisit = 0;
627 627 enum { RS_SEEN = 1, RS_ROOT = 2, RS_REACHABLE = 4 };
628 628 char *revstates = NULL;
629 629
630 630 /* Get arguments */
631 631 if (!PyArg_ParseTuple(args, "lO!O!O!", &minroot, &PyList_Type, &heads,
632 632 &PyList_Type, &roots, &PyBool_Type,
633 633 &includepatharg))
634 634 goto bail;
635 635
636 636 if (includepatharg == Py_True)
637 637 includepath = 1;
638 638
639 639 /* Initialize return set */
640 640 reachable = PyList_New(0);
641 641 if (reachable == NULL)
642 642 goto bail;
643 643
644 644 /* Initialize internal datastructures */
645 645 tovisit = (int *)malloc((len + 1) * sizeof(int));
646 646 if (tovisit == NULL) {
647 647 PyErr_NoMemory();
648 648 goto bail;
649 649 }
650 650
651 651 revstates = (char *)calloc(len + 1, 1);
652 652 if (revstates == NULL) {
653 653 PyErr_NoMemory();
654 654 goto bail;
655 655 }
656 656
657 657 l = PyList_GET_SIZE(roots);
658 658 for (i = 0; i < l; i++) {
659 659 revnum = PyInt_AsLong(PyList_GET_ITEM(roots, i));
660 660 if (revnum == -1 && PyErr_Occurred())
661 661 goto bail;
662 662 /* If root is out of range, e.g. wdir(), it must be unreachable
663 663 * from heads. So we can just ignore it. */
664 664 if (revnum + 1 < 0 || revnum + 1 >= len + 1)
665 665 continue;
666 666 revstates[revnum + 1] |= RS_ROOT;
667 667 }
668 668
669 669 /* Populate tovisit with all the heads */
670 670 l = PyList_GET_SIZE(heads);
671 671 for (i = 0; i < l; i++) {
672 672 revnum = PyInt_AsLong(PyList_GET_ITEM(heads, i));
673 673 if (revnum == -1 && PyErr_Occurred())
674 674 goto bail;
675 675 if (revnum + 1 < 0 || revnum + 1 >= len + 1) {
676 676 PyErr_SetString(PyExc_IndexError, "head out of range");
677 677 goto bail;
678 678 }
679 679 if (!(revstates[revnum + 1] & RS_SEEN)) {
680 680 tovisit[lentovisit++] = (int)revnum;
681 681 revstates[revnum + 1] |= RS_SEEN;
682 682 }
683 683 }
684 684
685 685 /* Visit the tovisit list and find the reachable roots */
686 686 k = 0;
687 687 while (k < lentovisit) {
688 688 /* Add the node to reachable if it is a root*/
689 689 revnum = tovisit[k++];
690 690 if (revstates[revnum + 1] & RS_ROOT) {
691 691 revstates[revnum + 1] |= RS_REACHABLE;
692 692 val = PyInt_FromLong(revnum);
693 693 if (val == NULL)
694 694 goto bail;
695 695 r = PyList_Append(reachable, val);
696 696 Py_DECREF(val);
697 697 if (r < 0)
698 698 goto bail;
699 699 if (includepath == 0)
700 700 continue;
701 701 }
702 702
703 703 /* Add its parents to the list of nodes to visit */
704 704 if (revnum == -1)
705 705 continue;
706 706 r = index_get_parents(self, revnum, parents, (int)len - 1);
707 707 if (r < 0)
708 708 goto bail;
709 709 for (i = 0; i < 2; i++) {
710 710 if (!(revstates[parents[i] + 1] & RS_SEEN) &&
711 711 parents[i] >= minroot) {
712 712 tovisit[lentovisit++] = parents[i];
713 713 revstates[parents[i] + 1] |= RS_SEEN;
714 714 }
715 715 }
716 716 }
717 717
718 718 /* Find all the nodes in between the roots we found and the heads
719 719 * and add them to the reachable set */
720 720 if (includepath == 1) {
721 721 long minidx = minroot;
722 722 if (minidx < 0)
723 723 minidx = 0;
724 724 for (i = minidx; i < len; i++) {
725 725 if (!(revstates[i + 1] & RS_SEEN))
726 726 continue;
727 727 r = index_get_parents(self, i, parents, (int)len - 1);
728 728 /* Corrupted index file, error is set from
729 729 * index_get_parents */
730 730 if (r < 0)
731 731 goto bail;
732 732 if (((revstates[parents[0] + 1] |
733 733 revstates[parents[1] + 1]) &
734 734 RS_REACHABLE) &&
735 735 !(revstates[i + 1] & RS_REACHABLE)) {
736 736 revstates[i + 1] |= RS_REACHABLE;
737 737 val = PyInt_FromSsize_t(i);
738 738 if (val == NULL)
739 739 goto bail;
740 740 r = PyList_Append(reachable, val);
741 741 Py_DECREF(val);
742 742 if (r < 0)
743 743 goto bail;
744 744 }
745 745 }
746 746 }
747 747
748 748 free(revstates);
749 749 free(tovisit);
750 750 return reachable;
751 751 bail:
752 752 Py_XDECREF(reachable);
753 753 free(revstates);
754 754 free(tovisit);
755 755 return NULL;
756 756 }
757 757
758 758 static PyObject *compute_phases_map_sets(indexObject *self, PyObject *args)
759 759 {
760 760 PyObject *roots = Py_None;
761 761 PyObject *ret = NULL;
762 762 PyObject *phasessize = NULL;
763 763 PyObject *phaseroots = NULL;
764 764 PyObject *phaseset = NULL;
765 765 PyObject *phasessetlist = NULL;
766 766 PyObject *rev = NULL;
767 767 Py_ssize_t len = index_length(self);
768 768 Py_ssize_t numphase = 0;
769 769 Py_ssize_t minrevallphases = 0;
770 770 Py_ssize_t minrevphase = 0;
771 771 Py_ssize_t i = 0;
772 772 char *phases = NULL;
773 773 long phase;
774 774
775 775 if (!PyArg_ParseTuple(args, "O", &roots))
776 776 goto done;
777 777 if (roots == NULL || !PyList_Check(roots)) {
778 778 PyErr_SetString(PyExc_TypeError, "roots must be a list");
779 779 goto done;
780 780 }
781 781
782 782 phases = calloc(
783 783 len, 1); /* phase per rev: {0: public, 1: draft, 2: secret} */
784 784 if (phases == NULL) {
785 785 PyErr_NoMemory();
786 786 goto done;
787 787 }
788 788 /* Put the phase information of all the roots in phases */
789 789 numphase = PyList_GET_SIZE(roots) + 1;
790 790 minrevallphases = len + 1;
791 791 phasessetlist = PyList_New(numphase);
792 792 if (phasessetlist == NULL)
793 793 goto done;
794 794
795 795 PyList_SET_ITEM(phasessetlist, 0, Py_None);
796 796 Py_INCREF(Py_None);
797 797
798 798 for (i = 0; i < numphase - 1; i++) {
799 799 phaseroots = PyList_GET_ITEM(roots, i);
800 800 phaseset = PySet_New(NULL);
801 801 if (phaseset == NULL)
802 802 goto release;
803 803 PyList_SET_ITEM(phasessetlist, i + 1, phaseset);
804 804 if (!PyList_Check(phaseroots)) {
805 805 PyErr_SetString(PyExc_TypeError,
806 806 "roots item must be a list");
807 807 goto release;
808 808 }
809 809 minrevphase =
810 810 add_roots_get_min(self, phaseroots, i + 1, phases);
811 811 if (minrevphase == -2) /* Error from add_roots_get_min */
812 812 goto release;
813 813 minrevallphases = MIN(minrevallphases, minrevphase);
814 814 }
815 815 /* Propagate the phase information from the roots to the revs */
816 816 if (minrevallphases != -1) {
817 817 int parents[2];
818 818 for (i = minrevallphases; i < len; i++) {
819 819 if (index_get_parents(self, i, parents, (int)len - 1) <
820 820 0)
821 821 goto release;
822 822 set_phase_from_parents(phases, parents[0], parents[1],
823 823 i);
824 824 }
825 825 }
826 826 /* Transform phase list to a python list */
827 827 phasessize = PyInt_FromSsize_t(len);
828 828 if (phasessize == NULL)
829 829 goto release;
830 830 for (i = 0; i < len; i++) {
831 831 phase = phases[i];
832 832 /* We only store the sets of phase for non public phase, the
833 833 * public phase is computed as a difference */
834 834 if (phase != 0) {
835 835 phaseset = PyList_GET_ITEM(phasessetlist, phase);
836 836 rev = PyInt_FromSsize_t(i);
837 837 if (rev == NULL)
838 838 goto release;
839 839 PySet_Add(phaseset, rev);
840 840 Py_XDECREF(rev);
841 841 }
842 842 }
843 843 ret = PyTuple_Pack(2, phasessize, phasessetlist);
844 844
845 845 release:
846 846 Py_XDECREF(phasessize);
847 847 Py_XDECREF(phasessetlist);
848 848 done:
849 849 free(phases);
850 850 return ret;
851 851 }
852 852
853 853 static PyObject *index_headrevs(indexObject *self, PyObject *args)
854 854 {
855 855 Py_ssize_t i, j, len;
856 856 char *nothead = NULL;
857 857 PyObject *heads = NULL;
858 858 PyObject *filter = NULL;
859 859 PyObject *filteredrevs = Py_None;
860 860
861 861 if (!PyArg_ParseTuple(args, "|O", &filteredrevs)) {
862 862 return NULL;
863 863 }
864 864
865 865 if (self->headrevs && filteredrevs == self->filteredrevs)
866 866 return list_copy(self->headrevs);
867 867
868 868 Py_DECREF(self->filteredrevs);
869 869 self->filteredrevs = filteredrevs;
870 870 Py_INCREF(filteredrevs);
871 871
872 872 if (filteredrevs != Py_None) {
873 873 filter = PyObject_GetAttrString(filteredrevs, "__contains__");
874 874 if (!filter) {
875 875 PyErr_SetString(
876 876 PyExc_TypeError,
877 877 "filteredrevs has no attribute __contains__");
878 878 goto bail;
879 879 }
880 880 }
881 881
882 882 len = index_length(self);
883 883 heads = PyList_New(0);
884 884 if (heads == NULL)
885 885 goto bail;
886 886 if (len == 0) {
887 887 PyObject *nullid = PyInt_FromLong(-1);
888 888 if (nullid == NULL || PyList_Append(heads, nullid) == -1) {
889 889 Py_XDECREF(nullid);
890 890 goto bail;
891 891 }
892 892 goto done;
893 893 }
894 894
895 895 nothead = calloc(len, 1);
896 896 if (nothead == NULL) {
897 897 PyErr_NoMemory();
898 898 goto bail;
899 899 }
900 900
901 901 for (i = len - 1; i >= 0; i--) {
902 902 int isfiltered;
903 903 int parents[2];
904 904
905 905 /* If nothead[i] == 1, it means we've seen an unfiltered child
906 906 * of this node already, and therefore this node is not
907 907 * filtered. So we can skip the expensive check_filter step.
908 908 */
909 909 if (nothead[i] != 1) {
910 910 isfiltered = check_filter(filter, i);
911 911 if (isfiltered == -1) {
912 912 PyErr_SetString(PyExc_TypeError,
913 913 "unable to check filter");
914 914 goto bail;
915 915 }
916 916
917 917 if (isfiltered) {
918 918 nothead[i] = 1;
919 919 continue;
920 920 }
921 921 }
922 922
923 923 if (index_get_parents(self, i, parents, (int)len - 1) < 0)
924 924 goto bail;
925 925 for (j = 0; j < 2; j++) {
926 926 if (parents[j] >= 0)
927 927 nothead[parents[j]] = 1;
928 928 }
929 929 }
930 930
931 931 for (i = 0; i < len; i++) {
932 932 PyObject *head;
933 933
934 934 if (nothead[i])
935 935 continue;
936 936 head = PyInt_FromSsize_t(i);
937 937 if (head == NULL || PyList_Append(heads, head) == -1) {
938 938 Py_XDECREF(head);
939 939 goto bail;
940 940 }
941 941 }
942 942
943 943 done:
944 944 self->headrevs = heads;
945 945 Py_XDECREF(filter);
946 946 free(nothead);
947 947 return list_copy(self->headrevs);
948 948 bail:
949 949 Py_XDECREF(filter);
950 950 Py_XDECREF(heads);
951 951 free(nothead);
952 952 return NULL;
953 953 }
954 954
955 955 /**
956 956 * Obtain the base revision index entry.
957 957 *
958 958 * Callers must ensure that rev >= 0 or illegal memory access may occur.
959 959 */
960 960 static inline int index_baserev(indexObject *self, int rev)
961 961 {
962 962 const char *data;
963 963
964 964 if (rev >= self->length) {
965 965 PyObject *tuple =
966 966 PyList_GET_ITEM(self->added, rev - self->length);
967 967 long ret;
968 968 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 3), &ret)) {
969 969 return -2;
970 970 }
971 971 return (int)ret;
972 972 } else {
973 973 data = index_deref(self, rev);
974 974 if (data == NULL) {
975 975 return -2;
976 976 }
977 977
978 978 return getbe32(data + 16);
979 979 }
980 980 }
981 981
982 982 static PyObject *index_deltachain(indexObject *self, PyObject *args)
983 983 {
984 984 int rev, generaldelta;
985 985 PyObject *stoparg;
986 986 int stoprev, iterrev, baserev = -1;
987 987 int stopped;
988 988 PyObject *chain = NULL, *result = NULL;
989 989 const Py_ssize_t length = index_length(self);
990 990
991 991 if (!PyArg_ParseTuple(args, "iOi", &rev, &stoparg, &generaldelta)) {
992 992 return NULL;
993 993 }
994 994
995 995 if (PyInt_Check(stoparg)) {
996 996 stoprev = (int)PyInt_AsLong(stoparg);
997 997 if (stoprev == -1 && PyErr_Occurred()) {
998 998 return NULL;
999 999 }
1000 1000 } else if (stoparg == Py_None) {
1001 1001 stoprev = -2;
1002 1002 } else {
1003 1003 PyErr_SetString(PyExc_ValueError,
1004 1004 "stoprev must be integer or None");
1005 1005 return NULL;
1006 1006 }
1007 1007
1008 1008 if (rev < 0 || rev >= length) {
1009 1009 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
1010 1010 return NULL;
1011 1011 }
1012 1012
1013 1013 chain = PyList_New(0);
1014 1014 if (chain == NULL) {
1015 1015 return NULL;
1016 1016 }
1017 1017
1018 1018 baserev = index_baserev(self, rev);
1019 1019
1020 1020 /* This should never happen. */
1021 1021 if (baserev <= -2) {
1022 1022 /* Error should be set by index_deref() */
1023 1023 assert(PyErr_Occurred());
1024 1024 goto bail;
1025 1025 }
1026 1026
1027 1027 iterrev = rev;
1028 1028
1029 1029 while (iterrev != baserev && iterrev != stoprev) {
1030 1030 PyObject *value = PyInt_FromLong(iterrev);
1031 1031 if (value == NULL) {
1032 1032 goto bail;
1033 1033 }
1034 1034 if (PyList_Append(chain, value)) {
1035 1035 Py_DECREF(value);
1036 1036 goto bail;
1037 1037 }
1038 1038 Py_DECREF(value);
1039 1039
1040 1040 if (generaldelta) {
1041 1041 iterrev = baserev;
1042 1042 } else {
1043 1043 iterrev--;
1044 1044 }
1045 1045
1046 1046 if (iterrev < 0) {
1047 1047 break;
1048 1048 }
1049 1049
1050 1050 if (iterrev >= length) {
1051 1051 PyErr_SetString(PyExc_IndexError,
1052 1052 "revision outside index");
1053 1053 return NULL;
1054 1054 }
1055 1055
1056 1056 baserev = index_baserev(self, iterrev);
1057 1057
1058 1058 /* This should never happen. */
1059 1059 if (baserev <= -2) {
1060 1060 /* Error should be set by index_deref() */
1061 1061 assert(PyErr_Occurred());
1062 1062 goto bail;
1063 1063 }
1064 1064 }
1065 1065
1066 1066 if (iterrev == stoprev) {
1067 1067 stopped = 1;
1068 1068 } else {
1069 1069 PyObject *value = PyInt_FromLong(iterrev);
1070 1070 if (value == NULL) {
1071 1071 goto bail;
1072 1072 }
1073 1073 if (PyList_Append(chain, value)) {
1074 1074 Py_DECREF(value);
1075 1075 goto bail;
1076 1076 }
1077 1077 Py_DECREF(value);
1078 1078
1079 1079 stopped = 0;
1080 1080 }
1081 1081
1082 1082 if (PyList_Reverse(chain)) {
1083 1083 goto bail;
1084 1084 }
1085 1085
1086 1086 result = Py_BuildValue("OO", chain, stopped ? Py_True : Py_False);
1087 1087 Py_DECREF(chain);
1088 1088 return result;
1089 1089
1090 1090 bail:
1091 1091 Py_DECREF(chain);
1092 1092 return NULL;
1093 1093 }
1094 1094
1095 1095 static inline int64_t
1096 1096 index_segment_span(indexObject *self, Py_ssize_t start_rev, Py_ssize_t end_rev)
1097 1097 {
1098 1098 int64_t start_offset;
1099 1099 int64_t end_offset;
1100 1100 int end_size;
1101 1101 start_offset = index_get_start(self, start_rev);
1102 1102 if (start_offset < 0) {
1103 1103 return -1;
1104 1104 }
1105 1105 end_offset = index_get_start(self, end_rev);
1106 1106 if (end_offset < 0) {
1107 1107 return -1;
1108 1108 }
1109 1109 end_size = index_get_length(self, end_rev);
1110 1110 if (end_size < 0) {
1111 1111 return -1;
1112 1112 }
1113 1113 if (end_offset < start_offset) {
1114 1114 PyErr_Format(PyExc_ValueError,
1115 1115 "corrupted revlog index: inconsistent offset "
1116 1116 "between revisions (%zd) and (%zd)",
1117 1117 start_rev, end_rev);
1118 1118 return -1;
1119 1119 }
1120 1120 return (end_offset - start_offset) + (int64_t)end_size;
1121 1121 }
1122 1122
1123 1123 /* returns endidx so that revs[startidx:endidx] has no empty trailing revs */
1124 1124 static Py_ssize_t trim_endidx(indexObject *self, const Py_ssize_t *revs,
1125 1125 Py_ssize_t startidx, Py_ssize_t endidx)
1126 1126 {
1127 1127 int length;
1128 1128 while (endidx > 1 && endidx > startidx) {
1129 1129 length = index_get_length(self, revs[endidx - 1]);
1130 1130 if (length < 0) {
1131 1131 return -1;
1132 1132 }
1133 1133 if (length != 0) {
1134 1134 break;
1135 1135 }
1136 1136 endidx -= 1;
1137 1137 }
1138 1138 return endidx;
1139 1139 }
1140 1140
1141 1141 struct Gap {
1142 1142 int64_t size;
1143 1143 Py_ssize_t idx;
1144 1144 };
1145 1145
1146 1146 static int gap_compare(const void *left, const void *right)
1147 1147 {
1148 1148 const struct Gap *l_left = ((const struct Gap *)left);
1149 1149 const struct Gap *l_right = ((const struct Gap *)right);
1150 1150 if (l_left->size < l_right->size) {
1151 1151 return -1;
1152 1152 } else if (l_left->size > l_right->size) {
1153 1153 return 1;
1154 1154 }
1155 1155 return 0;
1156 1156 }
1157 1157 static int Py_ssize_t_compare(const void *left, const void *right)
1158 1158 {
1159 1159 const Py_ssize_t l_left = *(const Py_ssize_t *)left;
1160 1160 const Py_ssize_t l_right = *(const Py_ssize_t *)right;
1161 1161 if (l_left < l_right) {
1162 1162 return -1;
1163 1163 } else if (l_left > l_right) {
1164 1164 return 1;
1165 1165 }
1166 1166 return 0;
1167 1167 }
1168 1168
1169 1169 static PyObject *index_slicechunktodensity(indexObject *self, PyObject *args)
1170 1170 {
1171 1171 /* method arguments */
1172 1172 PyObject *list_revs = NULL; /* revisions in the chain */
1173 1173 double targetdensity = 0; /* min density to achieve */
1174 1174 Py_ssize_t mingapsize = 0; /* threshold to ignore gaps */
1175 1175
1176 1176 /* other core variables */
1177 1177 Py_ssize_t idxlen = index_length(self);
1178 1178 Py_ssize_t i; /* used for various iteration */
1179 1179 PyObject *result = NULL; /* the final return of the function */
1180 1180
1181 1181 /* generic information about the delta chain being slice */
1182 1182 Py_ssize_t num_revs = 0; /* size of the full delta chain */
1183 1183 Py_ssize_t *revs = NULL; /* native array of revision in the chain */
1184 1184 int64_t chainpayload = 0; /* sum of all delta in the chain */
1185 1185 int64_t deltachainspan = 0; /* distance from first byte to last byte */
1186 1186
1187 1187 /* variable used for slicing the delta chain */
1188 1188 int64_t readdata = 0; /* amount of data currently planned to be read */
1189 1189 double density = 0; /* ration of payload data compared to read ones */
1190 1190 int64_t previous_end;
1191 1191 struct Gap *gaps = NULL; /* array of notable gap in the chain */
1192 1192 Py_ssize_t num_gaps =
1193 1193 0; /* total number of notable gap recorded so far */
1194 1194 Py_ssize_t *selected_indices = NULL; /* indices of gap skipped over */
1195 1195 Py_ssize_t num_selected = 0; /* number of gaps skipped */
1196 1196 PyObject *chunk = NULL; /* individual slice */
1197 1197 PyObject *allchunks = NULL; /* all slices */
1198 1198 Py_ssize_t previdx;
1199 1199
1200 1200 /* parsing argument */
1201 1201 if (!PyArg_ParseTuple(args, "O!dn", &PyList_Type, &list_revs,
1202 1202 &targetdensity, &mingapsize)) {
1203 1203 goto bail;
1204 1204 }
1205 1205
1206 1206 /* If the delta chain contains a single element, we do not need slicing
1207 1207 */
1208 1208 num_revs = PyList_GET_SIZE(list_revs);
1209 1209 if (num_revs <= 1) {
1210 1210 result = PyTuple_Pack(1, list_revs);
1211 1211 goto done;
1212 1212 }
1213 1213
1214 1214 /* Turn the python list into a native integer array (for efficiency) */
1215 1215 revs = (Py_ssize_t *)calloc(num_revs, sizeof(Py_ssize_t));
1216 1216 if (revs == NULL) {
1217 1217 PyErr_NoMemory();
1218 1218 goto bail;
1219 1219 }
1220 1220 for (i = 0; i < num_revs; i++) {
1221 1221 Py_ssize_t revnum = PyInt_AsLong(PyList_GET_ITEM(list_revs, i));
1222 1222 if (revnum == -1 && PyErr_Occurred()) {
1223 1223 goto bail;
1224 1224 }
1225 1225 if (revnum < 0 || revnum >= idxlen) {
1226 1226 PyErr_Format(PyExc_IndexError,
1227 1227 "index out of range: %zd", revnum);
1228 1228 goto bail;
1229 1229 }
1230 1230 revs[i] = revnum;
1231 1231 }
1232 1232
1233 1233 /* Compute and check various property of the unsliced delta chain */
1234 1234 deltachainspan = index_segment_span(self, revs[0], revs[num_revs - 1]);
1235 1235 if (deltachainspan < 0) {
1236 1236 goto bail;
1237 1237 }
1238 1238
1239 1239 if (deltachainspan <= mingapsize) {
1240 1240 result = PyTuple_Pack(1, list_revs);
1241 1241 goto done;
1242 1242 }
1243 1243 chainpayload = 0;
1244 1244 for (i = 0; i < num_revs; i++) {
1245 1245 int tmp = index_get_length(self, revs[i]);
1246 1246 if (tmp < 0) {
1247 1247 goto bail;
1248 1248 }
1249 1249 chainpayload += tmp;
1250 1250 }
1251 1251
1252 1252 readdata = deltachainspan;
1253 1253 density = 1.0;
1254 1254
1255 1255 if (0 < deltachainspan) {
1256 1256 density = (double)chainpayload / (double)deltachainspan;
1257 1257 }
1258 1258
1259 1259 if (density >= targetdensity) {
1260 1260 result = PyTuple_Pack(1, list_revs);
1261 1261 goto done;
1262 1262 }
1263 1263
1264 1264 /* if chain is too sparse, look for relevant gaps */
1265 1265 gaps = (struct Gap *)calloc(num_revs, sizeof(struct Gap));
1266 1266 if (gaps == NULL) {
1267 1267 PyErr_NoMemory();
1268 1268 goto bail;
1269 1269 }
1270 1270
1271 1271 previous_end = -1;
1272 1272 for (i = 0; i < num_revs; i++) {
1273 1273 int64_t revstart;
1274 1274 int revsize;
1275 1275 revstart = index_get_start(self, revs[i]);
1276 1276 if (revstart < 0) {
1277 1277 goto bail;
1278 1278 };
1279 1279 revsize = index_get_length(self, revs[i]);
1280 1280 if (revsize < 0) {
1281 1281 goto bail;
1282 1282 };
1283 1283 if (revsize == 0) {
1284 1284 continue;
1285 1285 }
1286 1286 if (previous_end >= 0) {
1287 1287 int64_t gapsize = revstart - previous_end;
1288 1288 if (gapsize > mingapsize) {
1289 1289 gaps[num_gaps].size = gapsize;
1290 1290 gaps[num_gaps].idx = i;
1291 1291 num_gaps += 1;
1292 1292 }
1293 1293 }
1294 1294 previous_end = revstart + revsize;
1295 1295 }
1296 1296 if (num_gaps == 0) {
1297 1297 result = PyTuple_Pack(1, list_revs);
1298 1298 goto done;
1299 1299 }
1300 1300 qsort(gaps, num_gaps, sizeof(struct Gap), &gap_compare);
1301 1301
1302 1302 /* Slice the largest gap first, they improve the density the most */
1303 1303 selected_indices =
1304 1304 (Py_ssize_t *)malloc((num_gaps + 1) * sizeof(Py_ssize_t));
1305 1305 if (selected_indices == NULL) {
1306 1306 PyErr_NoMemory();
1307 1307 goto bail;
1308 1308 }
1309 1309
1310 1310 for (i = num_gaps - 1; i >= 0; i--) {
1311 1311 selected_indices[num_selected] = gaps[i].idx;
1312 1312 readdata -= gaps[i].size;
1313 1313 num_selected += 1;
1314 1314 if (readdata <= 0) {
1315 1315 density = 1.0;
1316 1316 } else {
1317 1317 density = (double)chainpayload / (double)readdata;
1318 1318 }
1319 1319 if (density >= targetdensity) {
1320 1320 break;
1321 1321 }
1322 1322 }
1323 1323 qsort(selected_indices, num_selected, sizeof(Py_ssize_t),
1324 1324 &Py_ssize_t_compare);
1325 1325
1326 1326 /* create the resulting slice */
1327 1327 allchunks = PyList_New(0);
1328 1328 if (allchunks == NULL) {
1329 1329 goto bail;
1330 1330 }
1331 1331 previdx = 0;
1332 1332 selected_indices[num_selected] = num_revs;
1333 1333 for (i = 0; i <= num_selected; i++) {
1334 1334 Py_ssize_t idx = selected_indices[i];
1335 1335 Py_ssize_t endidx = trim_endidx(self, revs, previdx, idx);
1336 1336 if (endidx < 0) {
1337 1337 goto bail;
1338 1338 }
1339 1339 if (previdx < endidx) {
1340 1340 chunk = PyList_GetSlice(list_revs, previdx, endidx);
1341 1341 if (chunk == NULL) {
1342 1342 goto bail;
1343 1343 }
1344 1344 if (PyList_Append(allchunks, chunk) == -1) {
1345 1345 goto bail;
1346 1346 }
1347 1347 Py_DECREF(chunk);
1348 1348 chunk = NULL;
1349 1349 }
1350 1350 previdx = idx;
1351 1351 }
1352 1352 result = allchunks;
1353 1353 goto done;
1354 1354
1355 1355 bail:
1356 1356 Py_XDECREF(allchunks);
1357 1357 Py_XDECREF(chunk);
1358 1358 done:
1359 1359 free(revs);
1360 1360 free(gaps);
1361 1361 free(selected_indices);
1362 1362 return result;
1363 1363 }
1364 1364
1365 1365 static inline int nt_level(const char *node, Py_ssize_t level)
1366 1366 {
1367 1367 int v = node[level >> 1];
1368 1368 if (!(level & 1))
1369 1369 v >>= 4;
1370 1370 return v & 0xf;
1371 1371 }
1372 1372
1373 1373 /*
1374 1374 * Return values:
1375 1375 *
1376 1376 * -4: match is ambiguous (multiple candidates)
1377 1377 * -2: not found
1378 1378 * rest: valid rev
1379 1379 */
1380 1380 static int nt_find(nodetree *self, const char *node, Py_ssize_t nodelen,
1381 1381 int hex)
1382 1382 {
1383 1383 int (*getnybble)(const char *, Py_ssize_t) = hex ? hexdigit : nt_level;
1384 1384 int level, maxlevel, off;
1385 1385
1386 1386 if (nodelen == 20 && node[0] == '\0' && memcmp(node, nullid, 20) == 0)
1387 1387 return -1;
1388 1388
1389 1389 if (hex)
1390 1390 maxlevel = nodelen > 40 ? 40 : (int)nodelen;
1391 1391 else
1392 1392 maxlevel = nodelen > 20 ? 40 : ((int)nodelen * 2);
1393 1393
1394 1394 for (level = off = 0; level < maxlevel; level++) {
1395 1395 int k = getnybble(node, level);
1396 1396 nodetreenode *n = &self->nodes[off];
1397 1397 int v = n->children[k];
1398 1398
1399 1399 if (v < 0) {
1400 1400 const char *n;
1401 1401 Py_ssize_t i;
1402 1402
1403 1403 v = -(v + 2);
1404 1404 n = index_node(self->index, v);
1405 1405 if (n == NULL)
1406 1406 return -2;
1407 1407 for (i = level; i < maxlevel; i++)
1408 1408 if (getnybble(node, i) != nt_level(n, i))
1409 1409 return -2;
1410 1410 return v;
1411 1411 }
1412 1412 if (v == 0)
1413 1413 return -2;
1414 1414 off = v;
1415 1415 }
1416 1416 /* multiple matches against an ambiguous prefix */
1417 1417 return -4;
1418 1418 }
1419 1419
1420 1420 static int nt_new(nodetree *self)
1421 1421 {
1422 1422 if (self->length == self->capacity) {
1423 1423 unsigned newcapacity;
1424 1424 nodetreenode *newnodes;
1425 1425 newcapacity = self->capacity * 2;
1426 1426 if (newcapacity >= INT_MAX / sizeof(nodetreenode)) {
1427 1427 PyErr_SetString(PyExc_MemoryError,
1428 1428 "overflow in nt_new");
1429 1429 return -1;
1430 1430 }
1431 1431 newnodes =
1432 1432 realloc(self->nodes, newcapacity * sizeof(nodetreenode));
1433 1433 if (newnodes == NULL) {
1434 1434 PyErr_SetString(PyExc_MemoryError, "out of memory");
1435 1435 return -1;
1436 1436 }
1437 1437 self->capacity = newcapacity;
1438 1438 self->nodes = newnodes;
1439 1439 memset(&self->nodes[self->length], 0,
1440 1440 sizeof(nodetreenode) * (self->capacity - self->length));
1441 1441 }
1442 1442 return self->length++;
1443 1443 }
1444 1444
1445 1445 static int nt_insert(nodetree *self, const char *node, int rev)
1446 1446 {
1447 1447 int level = 0;
1448 1448 int off = 0;
1449 1449
1450 1450 while (level < 40) {
1451 1451 int k = nt_level(node, level);
1452 1452 nodetreenode *n;
1453 1453 int v;
1454 1454
1455 1455 n = &self->nodes[off];
1456 1456 v = n->children[k];
1457 1457
1458 1458 if (v == 0) {
1459 1459 n->children[k] = -rev - 2;
1460 1460 return 0;
1461 1461 }
1462 1462 if (v < 0) {
1463 1463 const char *oldnode =
1464 1464 index_node_existing(self->index, -(v + 2));
1465 1465 int noff;
1466 1466
1467 1467 if (oldnode == NULL)
1468 1468 return -1;
1469 1469 if (!memcmp(oldnode, node, 20)) {
1470 1470 n->children[k] = -rev - 2;
1471 1471 return 0;
1472 1472 }
1473 1473 noff = nt_new(self);
1474 1474 if (noff == -1)
1475 1475 return -1;
1476 1476 /* self->nodes may have been changed by realloc */
1477 1477 self->nodes[off].children[k] = noff;
1478 1478 off = noff;
1479 1479 n = &self->nodes[off];
1480 1480 n->children[nt_level(oldnode, ++level)] = v;
1481 1481 if (level > self->depth)
1482 1482 self->depth = level;
1483 1483 self->splits += 1;
1484 1484 } else {
1485 1485 level += 1;
1486 1486 off = v;
1487 1487 }
1488 1488 }
1489 1489
1490 1490 return -1;
1491 1491 }
1492 1492
1493 1493 static PyObject *ntobj_insert(nodetreeObject *self, PyObject *args)
1494 1494 {
1495 1495 Py_ssize_t rev;
1496 1496 const char *node;
1497 1497 Py_ssize_t length;
1498 1498 if (!PyArg_ParseTuple(args, "n", &rev))
1499 1499 return NULL;
1500 1500 length = index_length(self->nt.index);
1501 1501 if (rev < 0 || rev >= length) {
1502 1502 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
1503 1503 return NULL;
1504 1504 }
1505 1505 node = index_node_existing(self->nt.index, rev);
1506 1506 if (nt_insert(&self->nt, node, (int)rev) == -1)
1507 1507 return NULL;
1508 1508 Py_RETURN_NONE;
1509 1509 }
1510 1510
1511 1511 static int nt_delete_node(nodetree *self, const char *node)
1512 1512 {
1513 1513 /* rev==-2 happens to get encoded as 0, which is interpreted as not set
1514 1514 */
1515 1515 return nt_insert(self, node, -2);
1516 1516 }
1517 1517
1518 1518 static int nt_init(nodetree *self, indexObject *index, unsigned capacity)
1519 1519 {
1520 1520 /* Initialize before overflow-checking to avoid nt_dealloc() crash. */
1521 1521 self->nodes = NULL;
1522 1522
1523 1523 self->index = index;
1524 1524 /* The input capacity is in terms of revisions, while the field is in
1525 1525 * terms of nodetree nodes. */
1526 1526 self->capacity = (capacity < 4 ? 4 : capacity / 2);
1527 1527 self->depth = 0;
1528 1528 self->splits = 0;
1529 1529 if ((size_t)self->capacity > INT_MAX / sizeof(nodetreenode)) {
1530 1530 PyErr_SetString(PyExc_ValueError, "overflow in init_nt");
1531 1531 return -1;
1532 1532 }
1533 1533 self->nodes = calloc(self->capacity, sizeof(nodetreenode));
1534 1534 if (self->nodes == NULL) {
1535 1535 PyErr_NoMemory();
1536 1536 return -1;
1537 1537 }
1538 1538 self->length = 1;
1539 1539 return 0;
1540 1540 }
1541 1541
1542 1542 static int ntobj_init(nodetreeObject *self, PyObject *args)
1543 1543 {
1544 1544 PyObject *index;
1545 1545 unsigned capacity;
1546 1546 if (!PyArg_ParseTuple(args, "O!I", &HgRevlogIndex_Type, &index,
1547 1547 &capacity))
1548 1548 return -1;
1549 1549 Py_INCREF(index);
1550 1550 return nt_init(&self->nt, (indexObject *)index, capacity);
1551 1551 }
1552 1552
1553 1553 static int nt_partialmatch(nodetree *self, const char *node, Py_ssize_t nodelen)
1554 1554 {
1555 1555 return nt_find(self, node, nodelen, 1);
1556 1556 }
1557 1557
1558 1558 /*
1559 1559 * Find the length of the shortest unique prefix of node.
1560 1560 *
1561 1561 * Return values:
1562 1562 *
1563 1563 * -3: error (exception set)
1564 1564 * -2: not found (no exception set)
1565 1565 * rest: length of shortest prefix
1566 1566 */
1567 1567 static int nt_shortest(nodetree *self, const char *node)
1568 1568 {
1569 1569 int level, off;
1570 1570
1571 1571 for (level = off = 0; level < 40; level++) {
1572 1572 int k, v;
1573 1573 nodetreenode *n = &self->nodes[off];
1574 1574 k = nt_level(node, level);
1575 1575 v = n->children[k];
1576 1576 if (v < 0) {
1577 1577 const char *n;
1578 1578 v = -(v + 2);
1579 1579 n = index_node_existing(self->index, v);
1580 1580 if (n == NULL)
1581 1581 return -3;
1582 1582 if (memcmp(node, n, 20) != 0)
1583 1583 /*
1584 1584 * Found a unique prefix, but it wasn't for the
1585 1585 * requested node (i.e the requested node does
1586 1586 * not exist).
1587 1587 */
1588 1588 return -2;
1589 1589 return level + 1;
1590 1590 }
1591 1591 if (v == 0)
1592 1592 return -2;
1593 1593 off = v;
1594 1594 }
1595 1595 /*
1596 1596 * The node was still not unique after 40 hex digits, so this won't
1597 1597 * happen. Also, if we get here, then there's a programming error in
1598 1598 * this file that made us insert a node longer than 40 hex digits.
1599 1599 */
1600 1600 PyErr_SetString(PyExc_Exception, "broken node tree");
1601 1601 return -3;
1602 1602 }
1603 1603
1604 1604 static PyObject *ntobj_shortest(nodetreeObject *self, PyObject *args)
1605 1605 {
1606 1606 PyObject *val;
1607 1607 char *node;
1608 1608 int length;
1609 1609
1610 1610 if (!PyArg_ParseTuple(args, "O", &val))
1611 1611 return NULL;
1612 1612 if (node_check(val, &node) == -1)
1613 1613 return NULL;
1614 1614
1615 1615 length = nt_shortest(&self->nt, node);
1616 1616 if (length == -3)
1617 1617 return NULL;
1618 1618 if (length == -2) {
1619 1619 raise_revlog_error();
1620 1620 return NULL;
1621 1621 }
1622 1622 return PyInt_FromLong(length);
1623 1623 }
1624 1624
1625 1625 static void nt_dealloc(nodetree *self)
1626 1626 {
1627 1627 free(self->nodes);
1628 1628 self->nodes = NULL;
1629 1629 }
1630 1630
1631 1631 static void ntobj_dealloc(nodetreeObject *self)
1632 1632 {
1633 1633 Py_XDECREF(self->nt.index);
1634 1634 nt_dealloc(&self->nt);
1635 1635 PyObject_Del(self);
1636 1636 }
1637 1637
1638 1638 static PyMethodDef ntobj_methods[] = {
1639 1639 {"insert", (PyCFunction)ntobj_insert, METH_VARARGS,
1640 1640 "insert an index entry"},
1641 1641 {"shortest", (PyCFunction)ntobj_shortest, METH_VARARGS,
1642 1642 "find length of shortest hex nodeid of a binary ID"},
1643 1643 {NULL} /* Sentinel */
1644 1644 };
1645 1645
1646 1646 static PyTypeObject nodetreeType = {
1647 1647 PyVarObject_HEAD_INIT(NULL, 0) /* header */
1648 1648 "parsers.nodetree", /* tp_name */
1649 1649 sizeof(nodetreeObject), /* tp_basicsize */
1650 1650 0, /* tp_itemsize */
1651 1651 (destructor)ntobj_dealloc, /* tp_dealloc */
1652 1652 0, /* tp_print */
1653 1653 0, /* tp_getattr */
1654 1654 0, /* tp_setattr */
1655 1655 0, /* tp_compare */
1656 1656 0, /* tp_repr */
1657 1657 0, /* tp_as_number */
1658 1658 0, /* tp_as_sequence */
1659 1659 0, /* tp_as_mapping */
1660 1660 0, /* tp_hash */
1661 1661 0, /* tp_call */
1662 1662 0, /* tp_str */
1663 1663 0, /* tp_getattro */
1664 1664 0, /* tp_setattro */
1665 1665 0, /* tp_as_buffer */
1666 1666 Py_TPFLAGS_DEFAULT, /* tp_flags */
1667 1667 "nodetree", /* tp_doc */
1668 1668 0, /* tp_traverse */
1669 1669 0, /* tp_clear */
1670 1670 0, /* tp_richcompare */
1671 1671 0, /* tp_weaklistoffset */
1672 1672 0, /* tp_iter */
1673 1673 0, /* tp_iternext */
1674 1674 ntobj_methods, /* tp_methods */
1675 1675 0, /* tp_members */
1676 1676 0, /* tp_getset */
1677 1677 0, /* tp_base */
1678 1678 0, /* tp_dict */
1679 1679 0, /* tp_descr_get */
1680 1680 0, /* tp_descr_set */
1681 1681 0, /* tp_dictoffset */
1682 1682 (initproc)ntobj_init, /* tp_init */
1683 1683 0, /* tp_alloc */
1684 1684 };
1685 1685
1686 1686 static int index_init_nt(indexObject *self)
1687 1687 {
1688 1688 if (!self->ntinitialized) {
1689 1689 if (nt_init(&self->nt, self, (int)self->raw_length) == -1) {
1690 1690 nt_dealloc(&self->nt);
1691 1691 return -1;
1692 1692 }
1693 1693 if (nt_insert(&self->nt, nullid, -1) == -1) {
1694 1694 nt_dealloc(&self->nt);
1695 1695 return -1;
1696 1696 }
1697 1697 self->ntinitialized = 1;
1698 1698 self->ntrev = (int)index_length(self);
1699 1699 self->ntlookups = 1;
1700 1700 self->ntmisses = 0;
1701 1701 }
1702 1702 return 0;
1703 1703 }
1704 1704
1705 1705 /*
1706 1706 * Return values:
1707 1707 *
1708 1708 * -3: error (exception set)
1709 1709 * -2: not found (no exception set)
1710 1710 * rest: valid rev
1711 1711 */
1712 1712 static int index_find_node(indexObject *self, const char *node,
1713 1713 Py_ssize_t nodelen)
1714 1714 {
1715 1715 int rev;
1716 1716
1717 1717 if (index_init_nt(self) == -1)
1718 1718 return -3;
1719 1719
1720 1720 self->ntlookups++;
1721 1721 rev = nt_find(&self->nt, node, nodelen, 0);
1722 1722 if (rev >= -1)
1723 1723 return rev;
1724 1724
1725 1725 /*
1726 1726 * For the first handful of lookups, we scan the entire index,
1727 1727 * and cache only the matching nodes. This optimizes for cases
1728 1728 * like "hg tip", where only a few nodes are accessed.
1729 1729 *
1730 1730 * After that, we cache every node we visit, using a single
1731 1731 * scan amortized over multiple lookups. This gives the best
1732 1732 * bulk performance, e.g. for "hg log".
1733 1733 */
1734 1734 if (self->ntmisses++ < 4) {
1735 1735 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1736 1736 const char *n = index_node_existing(self, rev);
1737 1737 if (n == NULL)
1738 1738 return -3;
1739 1739 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1740 1740 if (nt_insert(&self->nt, n, rev) == -1)
1741 1741 return -3;
1742 1742 break;
1743 1743 }
1744 1744 }
1745 1745 } else {
1746 1746 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1747 1747 const char *n = index_node_existing(self, rev);
1748 1748 if (n == NULL)
1749 1749 return -3;
1750 1750 if (nt_insert(&self->nt, n, rev) == -1) {
1751 1751 self->ntrev = rev + 1;
1752 1752 return -3;
1753 1753 }
1754 1754 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1755 1755 break;
1756 1756 }
1757 1757 }
1758 1758 self->ntrev = rev;
1759 1759 }
1760 1760
1761 1761 if (rev >= 0)
1762 1762 return rev;
1763 1763 return -2;
1764 1764 }
1765 1765
1766 1766 static PyObject *index_getitem(indexObject *self, PyObject *value)
1767 1767 {
1768 1768 char *node;
1769 1769 int rev;
1770 1770
1771 1771 if (PyInt_Check(value)) {
1772 1772 long idx;
1773 1773 if (!pylong_to_long(value, &idx)) {
1774 1774 return NULL;
1775 1775 }
1776 1776 return index_get(self, idx);
1777 1777 }
1778 1778
1779 1779 if (node_check(value, &node) == -1)
1780 1780 return NULL;
1781 1781 rev = index_find_node(self, node, 20);
1782 1782 if (rev >= -1)
1783 1783 return PyInt_FromLong(rev);
1784 1784 if (rev == -2)
1785 1785 raise_revlog_error();
1786 1786 return NULL;
1787 1787 }
1788 1788
1789 1789 /*
1790 1790 * Fully populate the radix tree.
1791 1791 */
1792 1792 static int index_populate_nt(indexObject *self)
1793 1793 {
1794 1794 int rev;
1795 1795 if (self->ntrev > 0) {
1796 1796 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1797 1797 const char *n = index_node_existing(self, rev);
1798 1798 if (n == NULL)
1799 1799 return -1;
1800 1800 if (nt_insert(&self->nt, n, rev) == -1)
1801 1801 return -1;
1802 1802 }
1803 1803 self->ntrev = -1;
1804 1804 }
1805 1805 return 0;
1806 1806 }
1807 1807
1808 1808 static PyObject *index_partialmatch(indexObject *self, PyObject *args)
1809 1809 {
1810 1810 const char *fullnode;
1811 1811 int nodelen;
1812 1812 char *node;
1813 1813 int rev, i;
1814 1814
1815 1815 if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &node, &nodelen))
1816 1816 return NULL;
1817 1817
1818 1818 if (nodelen < 1) {
1819 1819 PyErr_SetString(PyExc_ValueError, "key too short");
1820 1820 return NULL;
1821 1821 }
1822 1822
1823 1823 if (nodelen > 40) {
1824 1824 PyErr_SetString(PyExc_ValueError, "key too long");
1825 1825 return NULL;
1826 1826 }
1827 1827
1828 1828 for (i = 0; i < nodelen; i++)
1829 1829 hexdigit(node, i);
1830 1830 if (PyErr_Occurred()) {
1831 1831 /* input contains non-hex characters */
1832 1832 PyErr_Clear();
1833 1833 Py_RETURN_NONE;
1834 1834 }
1835 1835
1836 1836 if (index_init_nt(self) == -1)
1837 1837 return NULL;
1838 1838 if (index_populate_nt(self) == -1)
1839 1839 return NULL;
1840 1840 rev = nt_partialmatch(&self->nt, node, nodelen);
1841 1841
1842 1842 switch (rev) {
1843 1843 case -4:
1844 1844 raise_revlog_error();
1845 1845 return NULL;
1846 1846 case -2:
1847 1847 Py_RETURN_NONE;
1848 1848 case -1:
1849 1849 return PyBytes_FromStringAndSize(nullid, 20);
1850 1850 }
1851 1851
1852 1852 fullnode = index_node_existing(self, rev);
1853 1853 if (fullnode == NULL) {
1854 1854 return NULL;
1855 1855 }
1856 1856 return PyBytes_FromStringAndSize(fullnode, 20);
1857 1857 }
1858 1858
1859 1859 static PyObject *index_shortest(indexObject *self, PyObject *args)
1860 1860 {
1861 1861 PyObject *val;
1862 1862 char *node;
1863 1863 int length;
1864 1864
1865 1865 if (!PyArg_ParseTuple(args, "O", &val))
1866 1866 return NULL;
1867 1867 if (node_check(val, &node) == -1)
1868 1868 return NULL;
1869 1869
1870 1870 self->ntlookups++;
1871 1871 if (index_init_nt(self) == -1)
1872 1872 return NULL;
1873 1873 if (index_populate_nt(self) == -1)
1874 1874 return NULL;
1875 1875 length = nt_shortest(&self->nt, node);
1876 1876 if (length == -3)
1877 1877 return NULL;
1878 1878 if (length == -2) {
1879 1879 raise_revlog_error();
1880 1880 return NULL;
1881 1881 }
1882 1882 return PyInt_FromLong(length);
1883 1883 }
1884 1884
1885 1885 static PyObject *index_m_get(indexObject *self, PyObject *args)
1886 1886 {
1887 1887 PyObject *val;
1888 1888 char *node;
1889 1889 int rev;
1890 1890
1891 1891 if (!PyArg_ParseTuple(args, "O", &val))
1892 1892 return NULL;
1893 1893 if (node_check(val, &node) == -1)
1894 1894 return NULL;
1895 1895 rev = index_find_node(self, node, 20);
1896 1896 if (rev == -3)
1897 1897 return NULL;
1898 1898 if (rev == -2)
1899 1899 Py_RETURN_NONE;
1900 1900 return PyInt_FromLong(rev);
1901 1901 }
1902 1902
1903 1903 static int index_contains(indexObject *self, PyObject *value)
1904 1904 {
1905 1905 char *node;
1906 1906
1907 1907 if (PyInt_Check(value)) {
1908 1908 long rev;
1909 1909 if (!pylong_to_long(value, &rev)) {
1910 1910 return -1;
1911 1911 }
1912 1912 return rev >= -1 && rev < index_length(self);
1913 1913 }
1914 1914
1915 1915 if (node_check(value, &node) == -1)
1916 1916 return -1;
1917 1917
1918 1918 switch (index_find_node(self, node, 20)) {
1919 1919 case -3:
1920 1920 return -1;
1921 1921 case -2:
1922 1922 return 0;
1923 1923 default:
1924 1924 return 1;
1925 1925 }
1926 1926 }
1927 1927
1928 1928 typedef uint64_t bitmask;
1929 1929
1930 1930 /*
1931 1931 * Given a disjoint set of revs, return all candidates for the
1932 1932 * greatest common ancestor. In revset notation, this is the set
1933 1933 * "heads(::a and ::b and ...)"
1934 1934 */
1935 1935 static PyObject *find_gca_candidates(indexObject *self, const int *revs,
1936 1936 int revcount)
1937 1937 {
1938 1938 const bitmask allseen = (1ull << revcount) - 1;
1939 1939 const bitmask poison = 1ull << revcount;
1940 1940 PyObject *gca = PyList_New(0);
1941 1941 int i, v, interesting;
1942 1942 int maxrev = -1;
1943 1943 bitmask sp;
1944 1944 bitmask *seen;
1945 1945
1946 1946 if (gca == NULL)
1947 1947 return PyErr_NoMemory();
1948 1948
1949 1949 for (i = 0; i < revcount; i++) {
1950 1950 if (revs[i] > maxrev)
1951 1951 maxrev = revs[i];
1952 1952 }
1953 1953
1954 1954 seen = calloc(sizeof(*seen), maxrev + 1);
1955 1955 if (seen == NULL) {
1956 1956 Py_DECREF(gca);
1957 1957 return PyErr_NoMemory();
1958 1958 }
1959 1959
1960 1960 for (i = 0; i < revcount; i++)
1961 1961 seen[revs[i]] = 1ull << i;
1962 1962
1963 1963 interesting = revcount;
1964 1964
1965 1965 for (v = maxrev; v >= 0 && interesting; v--) {
1966 1966 bitmask sv = seen[v];
1967 1967 int parents[2];
1968 1968
1969 1969 if (!sv)
1970 1970 continue;
1971 1971
1972 1972 if (sv < poison) {
1973 1973 interesting -= 1;
1974 1974 if (sv == allseen) {
1975 1975 PyObject *obj = PyInt_FromLong(v);
1976 1976 if (obj == NULL)
1977 1977 goto bail;
1978 1978 if (PyList_Append(gca, obj) == -1) {
1979 1979 Py_DECREF(obj);
1980 1980 goto bail;
1981 1981 }
1982 1982 sv |= poison;
1983 1983 for (i = 0; i < revcount; i++) {
1984 1984 if (revs[i] == v)
1985 1985 goto done;
1986 1986 }
1987 1987 }
1988 1988 }
1989 1989 if (index_get_parents(self, v, parents, maxrev) < 0)
1990 1990 goto bail;
1991 1991
1992 1992 for (i = 0; i < 2; i++) {
1993 1993 int p = parents[i];
1994 1994 if (p == -1)
1995 1995 continue;
1996 1996 sp = seen[p];
1997 1997 if (sv < poison) {
1998 1998 if (sp == 0) {
1999 1999 seen[p] = sv;
2000 2000 interesting++;
2001 2001 } else if (sp != sv)
2002 2002 seen[p] |= sv;
2003 2003 } else {
2004 2004 if (sp && sp < poison)
2005 2005 interesting--;
2006 2006 seen[p] = sv;
2007 2007 }
2008 2008 }
2009 2009 }
2010 2010
2011 2011 done:
2012 2012 free(seen);
2013 2013 return gca;
2014 2014 bail:
2015 2015 free(seen);
2016 2016 Py_XDECREF(gca);
2017 2017 return NULL;
2018 2018 }
2019 2019
2020 2020 /*
2021 2021 * Given a disjoint set of revs, return the subset with the longest
2022 2022 * path to the root.
2023 2023 */
2024 2024 static PyObject *find_deepest(indexObject *self, PyObject *revs)
2025 2025 {
2026 2026 const Py_ssize_t revcount = PyList_GET_SIZE(revs);
2027 2027 static const Py_ssize_t capacity = 24;
2028 2028 int *depth, *interesting = NULL;
2029 2029 int i, j, v, ninteresting;
2030 2030 PyObject *dict = NULL, *keys = NULL;
2031 2031 long *seen = NULL;
2032 2032 int maxrev = -1;
2033 2033 long final;
2034 2034
2035 2035 if (revcount > capacity) {
2036 2036 PyErr_Format(PyExc_OverflowError,
2037 2037 "bitset size (%ld) > capacity (%ld)",
2038 2038 (long)revcount, (long)capacity);
2039 2039 return NULL;
2040 2040 }
2041 2041
2042 2042 for (i = 0; i < revcount; i++) {
2043 2043 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
2044 2044 if (n > maxrev)
2045 2045 maxrev = n;
2046 2046 }
2047 2047
2048 2048 depth = calloc(sizeof(*depth), maxrev + 1);
2049 2049 if (depth == NULL)
2050 2050 return PyErr_NoMemory();
2051 2051
2052 2052 seen = calloc(sizeof(*seen), maxrev + 1);
2053 2053 if (seen == NULL) {
2054 2054 PyErr_NoMemory();
2055 2055 goto bail;
2056 2056 }
2057 2057
2058 2058 interesting = calloc(sizeof(*interesting), ((size_t)1) << revcount);
2059 2059 if (interesting == NULL) {
2060 2060 PyErr_NoMemory();
2061 2061 goto bail;
2062 2062 }
2063 2063
2064 2064 if (PyList_Sort(revs) == -1)
2065 2065 goto bail;
2066 2066
2067 2067 for (i = 0; i < revcount; i++) {
2068 2068 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
2069 2069 long b = 1l << i;
2070 2070 depth[n] = 1;
2071 2071 seen[n] = b;
2072 2072 interesting[b] = 1;
2073 2073 }
2074 2074
2075 2075 /* invariant: ninteresting is the number of non-zero entries in
2076 2076 * interesting. */
2077 2077 ninteresting = (int)revcount;
2078 2078
2079 2079 for (v = maxrev; v >= 0 && ninteresting > 1; v--) {
2080 2080 int dv = depth[v];
2081 2081 int parents[2];
2082 2082 long sv;
2083 2083
2084 2084 if (dv == 0)
2085 2085 continue;
2086 2086
2087 2087 sv = seen[v];
2088 2088 if (index_get_parents(self, v, parents, maxrev) < 0)
2089 2089 goto bail;
2090 2090
2091 2091 for (i = 0; i < 2; i++) {
2092 2092 int p = parents[i];
2093 2093 long sp;
2094 2094 int dp;
2095 2095
2096 2096 if (p == -1)
2097 2097 continue;
2098 2098
2099 2099 dp = depth[p];
2100 2100 sp = seen[p];
2101 2101 if (dp <= dv) {
2102 2102 depth[p] = dv + 1;
2103 2103 if (sp != sv) {
2104 2104 interesting[sv] += 1;
2105 2105 seen[p] = sv;
2106 2106 if (sp) {
2107 2107 interesting[sp] -= 1;
2108 2108 if (interesting[sp] == 0)
2109 2109 ninteresting -= 1;
2110 2110 }
2111 2111 }
2112 2112 } else if (dv == dp - 1) {
2113 2113 long nsp = sp | sv;
2114 2114 if (nsp == sp)
2115 2115 continue;
2116 2116 seen[p] = nsp;
2117 2117 interesting[sp] -= 1;
2118 2118 if (interesting[sp] == 0)
2119 2119 ninteresting -= 1;
2120 2120 if (interesting[nsp] == 0)
2121 2121 ninteresting += 1;
2122 2122 interesting[nsp] += 1;
2123 2123 }
2124 2124 }
2125 2125 interesting[sv] -= 1;
2126 2126 if (interesting[sv] == 0)
2127 2127 ninteresting -= 1;
2128 2128 }
2129 2129
2130 2130 final = 0;
2131 2131 j = ninteresting;
2132 2132 for (i = 0; i < (int)(2 << revcount) && j > 0; i++) {
2133 2133 if (interesting[i] == 0)
2134 2134 continue;
2135 2135 final |= i;
2136 2136 j -= 1;
2137 2137 }
2138 2138 if (final == 0) {
2139 2139 keys = PyList_New(0);
2140 2140 goto bail;
2141 2141 }
2142 2142
2143 2143 dict = PyDict_New();
2144 2144 if (dict == NULL)
2145 2145 goto bail;
2146 2146
2147 2147 for (i = 0; i < revcount; i++) {
2148 2148 PyObject *key;
2149 2149
2150 2150 if ((final & (1 << i)) == 0)
2151 2151 continue;
2152 2152
2153 2153 key = PyList_GET_ITEM(revs, i);
2154 2154 Py_INCREF(key);
2155 2155 Py_INCREF(Py_None);
2156 2156 if (PyDict_SetItem(dict, key, Py_None) == -1) {
2157 2157 Py_DECREF(key);
2158 2158 Py_DECREF(Py_None);
2159 2159 goto bail;
2160 2160 }
2161 2161 }
2162 2162
2163 2163 keys = PyDict_Keys(dict);
2164 2164
2165 2165 bail:
2166 2166 free(depth);
2167 2167 free(seen);
2168 2168 free(interesting);
2169 2169 Py_XDECREF(dict);
2170 2170
2171 2171 return keys;
2172 2172 }
2173 2173
2174 2174 /*
2175 2175 * Given a (possibly overlapping) set of revs, return all the
2176 2176 * common ancestors heads: heads(::args[0] and ::a[1] and ...)
2177 2177 */
2178 2178 static PyObject *index_commonancestorsheads(indexObject *self, PyObject *args)
2179 2179 {
2180 2180 PyObject *ret = NULL;
2181 2181 Py_ssize_t argcount, i, len;
2182 2182 bitmask repeat = 0;
2183 2183 int revcount = 0;
2184 2184 int *revs;
2185 2185
2186 2186 argcount = PySequence_Length(args);
2187 2187 revs = PyMem_Malloc(argcount * sizeof(*revs));
2188 2188 if (argcount > 0 && revs == NULL)
2189 2189 return PyErr_NoMemory();
2190 2190 len = index_length(self);
2191 2191
2192 2192 for (i = 0; i < argcount; i++) {
2193 2193 static const int capacity = 24;
2194 2194 PyObject *obj = PySequence_GetItem(args, i);
2195 2195 bitmask x;
2196 2196 long val;
2197 2197
2198 2198 if (!PyInt_Check(obj)) {
2199 2199 PyErr_SetString(PyExc_TypeError,
2200 2200 "arguments must all be ints");
2201 2201 Py_DECREF(obj);
2202 2202 goto bail;
2203 2203 }
2204 2204 val = PyInt_AsLong(obj);
2205 2205 Py_DECREF(obj);
2206 2206 if (val == -1) {
2207 2207 ret = PyList_New(0);
2208 2208 goto done;
2209 2209 }
2210 2210 if (val < 0 || val >= len) {
2211 2211 PyErr_SetString(PyExc_IndexError, "index out of range");
2212 2212 goto bail;
2213 2213 }
2214 2214 /* this cheesy bloom filter lets us avoid some more
2215 2215 * expensive duplicate checks in the common set-is-disjoint
2216 2216 * case */
2217 2217 x = 1ull << (val & 0x3f);
2218 2218 if (repeat & x) {
2219 2219 int k;
2220 2220 for (k = 0; k < revcount; k++) {
2221 2221 if (val == revs[k])
2222 2222 goto duplicate;
2223 2223 }
2224 2224 } else
2225 2225 repeat |= x;
2226 2226 if (revcount >= capacity) {
2227 2227 PyErr_Format(PyExc_OverflowError,
2228 2228 "bitset size (%d) > capacity (%d)",
2229 2229 revcount, capacity);
2230 2230 goto bail;
2231 2231 }
2232 2232 revs[revcount++] = (int)val;
2233 2233 duplicate:;
2234 2234 }
2235 2235
2236 2236 if (revcount == 0) {
2237 2237 ret = PyList_New(0);
2238 2238 goto done;
2239 2239 }
2240 2240 if (revcount == 1) {
2241 2241 PyObject *obj;
2242 2242 ret = PyList_New(1);
2243 2243 if (ret == NULL)
2244 2244 goto bail;
2245 2245 obj = PyInt_FromLong(revs[0]);
2246 2246 if (obj == NULL)
2247 2247 goto bail;
2248 2248 PyList_SET_ITEM(ret, 0, obj);
2249 2249 goto done;
2250 2250 }
2251 2251
2252 2252 ret = find_gca_candidates(self, revs, revcount);
2253 2253 if (ret == NULL)
2254 2254 goto bail;
2255 2255
2256 2256 done:
2257 2257 PyMem_Free(revs);
2258 2258 return ret;
2259 2259
2260 2260 bail:
2261 2261 PyMem_Free(revs);
2262 2262 Py_XDECREF(ret);
2263 2263 return NULL;
2264 2264 }
2265 2265
2266 2266 /*
2267 2267 * Given a (possibly overlapping) set of revs, return the greatest
2268 2268 * common ancestors: those with the longest path to the root.
2269 2269 */
2270 2270 static PyObject *index_ancestors(indexObject *self, PyObject *args)
2271 2271 {
2272 2272 PyObject *ret;
2273 2273 PyObject *gca = index_commonancestorsheads(self, args);
2274 2274 if (gca == NULL)
2275 2275 return NULL;
2276 2276
2277 2277 if (PyList_GET_SIZE(gca) <= 1) {
2278 2278 return gca;
2279 2279 }
2280 2280
2281 2281 ret = find_deepest(self, gca);
2282 2282 Py_DECREF(gca);
2283 2283 return ret;
2284 2284 }
2285 2285
2286 2286 /*
2287 2287 * Invalidate any trie entries introduced by added revs.
2288 2288 */
2289 2289 static void index_invalidate_added(indexObject *self, Py_ssize_t start)
2290 2290 {
2291 2291 Py_ssize_t i, len = PyList_GET_SIZE(self->added);
2292 2292
2293 2293 for (i = start; i < len; i++) {
2294 2294 PyObject *tuple = PyList_GET_ITEM(self->added, i);
2295 2295 PyObject *node = PyTuple_GET_ITEM(tuple, 7);
2296 2296
2297 2297 nt_delete_node(&self->nt, PyBytes_AS_STRING(node));
2298 2298 }
2299 2299
2300 2300 if (start == 0)
2301 2301 Py_CLEAR(self->added);
2302 2302 }
2303 2303
2304 2304 /*
2305 2305 * Delete a numeric range of revs, which must be at the end of the
2306 2306 * range, but exclude the sentinel nullid entry.
2307 2307 */
2308 2308 static int index_slice_del(indexObject *self, PyObject *item)
2309 2309 {
2310 2310 Py_ssize_t start, stop, step, slicelength;
2311 2311 Py_ssize_t length = index_length(self) + 1;
2312 2312 int ret = 0;
2313 2313
2314 2314 /* Argument changed from PySliceObject* to PyObject* in Python 3. */
2315 2315 #ifdef IS_PY3K
2316 2316 if (PySlice_GetIndicesEx(item, length, &start, &stop, &step,
2317 2317 &slicelength) < 0)
2318 2318 #else
2319 2319 if (PySlice_GetIndicesEx((PySliceObject *)item, length, &start, &stop,
2320 2320 &step, &slicelength) < 0)
2321 2321 #endif
2322 2322 return -1;
2323 2323
2324 2324 if (slicelength <= 0)
2325 2325 return 0;
2326 2326
2327 2327 if ((step < 0 && start < stop) || (step > 0 && start > stop))
2328 2328 stop = start;
2329 2329
2330 2330 if (step < 0) {
2331 2331 stop = start + 1;
2332 2332 start = stop + step * (slicelength - 1) - 1;
2333 2333 step = -step;
2334 2334 }
2335 2335
2336 2336 if (step != 1) {
2337 2337 PyErr_SetString(PyExc_ValueError,
2338 2338 "revlog index delete requires step size of 1");
2339 2339 return -1;
2340 2340 }
2341 2341
2342 2342 if (stop != length - 1) {
2343 2343 PyErr_SetString(PyExc_IndexError,
2344 2344 "revlog index deletion indices are invalid");
2345 2345 return -1;
2346 2346 }
2347 2347
2348 2348 if (start < self->length) {
2349 2349 if (self->ntinitialized) {
2350 2350 Py_ssize_t i;
2351 2351
2352 2352 for (i = start + 1; i < self->length; i++) {
2353 2353 const char *node = index_node_existing(self, i);
2354 2354 if (node == NULL)
2355 2355 return -1;
2356 2356
2357 2357 nt_delete_node(&self->nt, node);
2358 2358 }
2359 2359 if (self->added)
2360 2360 index_invalidate_added(self, 0);
2361 2361 if (self->ntrev > start)
2362 2362 self->ntrev = (int)start;
2363 2363 }
2364 2364 self->length = start;
2365 2365 if (start < self->raw_length) {
2366 2366 if (self->cache) {
2367 2367 Py_ssize_t i;
2368 2368 for (i = start; i < self->raw_length; i++)
2369 2369 Py_CLEAR(self->cache[i]);
2370 2370 }
2371 2371 self->raw_length = start;
2372 2372 }
2373 2373 goto done;
2374 2374 }
2375 2375
2376 2376 if (self->ntinitialized) {
2377 2377 index_invalidate_added(self, start - self->length);
2378 2378 if (self->ntrev > start)
2379 2379 self->ntrev = (int)start;
2380 2380 }
2381 2381 if (self->added)
2382 2382 ret = PyList_SetSlice(self->added, start - self->length,
2383 2383 PyList_GET_SIZE(self->added), NULL);
2384 2384 done:
2385 2385 Py_CLEAR(self->headrevs);
2386 2386 return ret;
2387 2387 }
2388 2388
2389 2389 /*
2390 2390 * Supported ops:
2391 2391 *
2392 2392 * slice deletion
2393 2393 * string assignment (extend node->rev mapping)
2394 2394 * string deletion (shrink node->rev mapping)
2395 2395 */
2396 2396 static int index_assign_subscript(indexObject *self, PyObject *item,
2397 2397 PyObject *value)
2398 2398 {
2399 2399 char *node;
2400 2400 long rev;
2401 2401
2402 2402 if (PySlice_Check(item) && value == NULL)
2403 2403 return index_slice_del(self, item);
2404 2404
2405 2405 if (node_check(item, &node) == -1)
2406 2406 return -1;
2407 2407
2408 2408 if (value == NULL)
2409 2409 return self->ntinitialized ? nt_delete_node(&self->nt, node)
2410 2410 : 0;
2411 2411 rev = PyInt_AsLong(value);
2412 2412 if (rev > INT_MAX || rev < 0) {
2413 2413 if (!PyErr_Occurred())
2414 2414 PyErr_SetString(PyExc_ValueError, "rev out of range");
2415 2415 return -1;
2416 2416 }
2417 2417
2418 2418 if (index_init_nt(self) == -1)
2419 2419 return -1;
2420 2420 return nt_insert(&self->nt, node, (int)rev);
2421 2421 }
2422 2422
2423 2423 /*
2424 2424 * Find all RevlogNG entries in an index that has inline data. Update
2425 2425 * the optional "offsets" table with those entries.
2426 2426 */
2427 2427 static Py_ssize_t inline_scan(indexObject *self, const char **offsets)
2428 2428 {
2429 2429 const char *data = (const char *)self->buf.buf;
2430 2430 Py_ssize_t pos = 0;
2431 2431 Py_ssize_t end = self->buf.len;
2432 2432 long incr = v1_hdrsize;
2433 2433 Py_ssize_t len = 0;
2434 2434
2435 2435 while (pos + v1_hdrsize <= end && pos >= 0) {
2436 2436 uint32_t comp_len;
2437 2437 /* 3rd element of header is length of compressed inline data */
2438 2438 comp_len = getbe32(data + pos + 8);
2439 2439 incr = v1_hdrsize + comp_len;
2440 2440 if (offsets)
2441 2441 offsets[len] = data + pos;
2442 2442 len++;
2443 2443 pos += incr;
2444 2444 }
2445 2445
2446 2446 if (pos != end) {
2447 2447 if (!PyErr_Occurred())
2448 2448 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2449 2449 return -1;
2450 2450 }
2451 2451
2452 2452 return len;
2453 2453 }
2454 2454
2455 2455 static int index_init(indexObject *self, PyObject *args)
2456 2456 {
2457 2457 PyObject *data_obj, *inlined_obj;
2458 2458 Py_ssize_t size;
2459 2459
2460 2460 /* Initialize before argument-checking to avoid index_dealloc() crash.
2461 2461 */
2462 2462 self->raw_length = 0;
2463 2463 self->added = NULL;
2464 2464 self->cache = NULL;
2465 2465 self->data = NULL;
2466 2466 memset(&self->buf, 0, sizeof(self->buf));
2467 2467 self->headrevs = NULL;
2468 2468 self->filteredrevs = Py_None;
2469 2469 Py_INCREF(Py_None);
2470 2470 self->ntinitialized = 0;
2471 2471 self->offsets = NULL;
2472 2472
2473 2473 if (!PyArg_ParseTuple(args, "OO", &data_obj, &inlined_obj))
2474 2474 return -1;
2475 2475 if (!PyObject_CheckBuffer(data_obj)) {
2476 2476 PyErr_SetString(PyExc_TypeError,
2477 2477 "data does not support buffer interface");
2478 2478 return -1;
2479 2479 }
2480 2480
2481 2481 if (PyObject_GetBuffer(data_obj, &self->buf, PyBUF_SIMPLE) == -1)
2482 2482 return -1;
2483 2483 size = self->buf.len;
2484 2484
2485 2485 self->inlined = inlined_obj && PyObject_IsTrue(inlined_obj);
2486 2486 self->data = data_obj;
2487 2487
2488 2488 self->ntlookups = self->ntmisses = 0;
2489 2489 self->ntrev = -1;
2490 2490 Py_INCREF(self->data);
2491 2491
2492 2492 if (self->inlined) {
2493 2493 Py_ssize_t len = inline_scan(self, NULL);
2494 2494 if (len == -1)
2495 2495 goto bail;
2496 2496 self->raw_length = len;
2497 2497 self->length = len;
2498 2498 } else {
2499 2499 if (size % v1_hdrsize) {
2500 2500 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2501 2501 goto bail;
2502 2502 }
2503 2503 self->raw_length = size / v1_hdrsize;
2504 2504 self->length = self->raw_length;
2505 2505 }
2506 2506
2507 2507 return 0;
2508 2508 bail:
2509 2509 return -1;
2510 2510 }
2511 2511
2512 2512 static PyObject *index_nodemap(indexObject *self)
2513 2513 {
2514 2514 Py_INCREF(self);
2515 2515 return (PyObject *)self;
2516 2516 }
2517 2517
2518 2518 static void _index_clearcaches(indexObject *self)
2519 2519 {
2520 2520 if (self->cache) {
2521 2521 Py_ssize_t i;
2522 2522
2523 2523 for (i = 0; i < self->raw_length; i++)
2524 2524 Py_CLEAR(self->cache[i]);
2525 2525 free(self->cache);
2526 2526 self->cache = NULL;
2527 2527 }
2528 2528 if (self->offsets) {
2529 2529 PyMem_Free((void *)self->offsets);
2530 2530 self->offsets = NULL;
2531 2531 }
2532 2532 if (self->ntinitialized) {
2533 2533 nt_dealloc(&self->nt);
2534 2534 }
2535 2535 self->ntinitialized = 0;
2536 2536 Py_CLEAR(self->headrevs);
2537 2537 }
2538 2538
2539 2539 static PyObject *index_clearcaches(indexObject *self)
2540 2540 {
2541 2541 _index_clearcaches(self);
2542 2542 self->ntrev = -1;
2543 2543 self->ntlookups = self->ntmisses = 0;
2544 2544 Py_RETURN_NONE;
2545 2545 }
2546 2546
2547 2547 static void index_dealloc(indexObject *self)
2548 2548 {
2549 2549 _index_clearcaches(self);
2550 2550 Py_XDECREF(self->filteredrevs);
2551 2551 if (self->buf.buf) {
2552 2552 PyBuffer_Release(&self->buf);
2553 2553 memset(&self->buf, 0, sizeof(self->buf));
2554 2554 }
2555 2555 Py_XDECREF(self->data);
2556 2556 Py_XDECREF(self->added);
2557 2557 PyObject_Del(self);
2558 2558 }
2559 2559
2560 2560 static PySequenceMethods index_sequence_methods = {
2561 2561 (lenfunc)index_length, /* sq_length */
2562 2562 0, /* sq_concat */
2563 2563 0, /* sq_repeat */
2564 2564 (ssizeargfunc)index_get, /* sq_item */
2565 2565 0, /* sq_slice */
2566 2566 0, /* sq_ass_item */
2567 2567 0, /* sq_ass_slice */
2568 2568 (objobjproc)index_contains, /* sq_contains */
2569 2569 };
2570 2570
2571 2571 static PyMappingMethods index_mapping_methods = {
2572 2572 (lenfunc)index_length, /* mp_length */
2573 2573 (binaryfunc)index_getitem, /* mp_subscript */
2574 2574 (objobjargproc)index_assign_subscript, /* mp_ass_subscript */
2575 2575 };
2576 2576
2577 2577 static PyMethodDef index_methods[] = {
2578 2578 {"ancestors", (PyCFunction)index_ancestors, METH_VARARGS,
2579 2579 "return the gca set of the given revs"},
2580 2580 {"commonancestorsheads", (PyCFunction)index_commonancestorsheads,
2581 2581 METH_VARARGS,
2582 2582 "return the heads of the common ancestors of the given revs"},
2583 2583 {"clearcaches", (PyCFunction)index_clearcaches, METH_NOARGS,
2584 2584 "clear the index caches"},
2585 2585 {"get", (PyCFunction)index_m_get, METH_VARARGS, "get an index entry"},
2586 2586 {"computephasesmapsets", (PyCFunction)compute_phases_map_sets, METH_VARARGS,
2587 2587 "compute phases"},
2588 2588 {"reachableroots2", (PyCFunction)reachableroots2, METH_VARARGS,
2589 2589 "reachableroots"},
2590 2590 {"headrevs", (PyCFunction)index_headrevs, METH_VARARGS,
2591 2591 "get head revisions"}, /* Can do filtering since 3.2 */
2592 2592 {"headrevsfiltered", (PyCFunction)index_headrevs, METH_VARARGS,
2593 2593 "get filtered head revisions"}, /* Can always do filtering */
2594 2594 {"deltachain", (PyCFunction)index_deltachain, METH_VARARGS,
2595 2595 "determine revisions with deltas to reconstruct fulltext"},
2596 2596 {"slicechunktodensity", (PyCFunction)index_slicechunktodensity,
2597 2597 METH_VARARGS, "determine revisions with deltas to reconstruct fulltext"},
2598 2598 {"append", (PyCFunction)index_append, METH_O, "append an index entry"},
2599 2599 {"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS,
2600 2600 "match a potentially ambiguous node ID"},
2601 2601 {"shortest", (PyCFunction)index_shortest, METH_VARARGS,
2602 2602 "find length of shortest hex nodeid of a binary ID"},
2603 2603 {"stats", (PyCFunction)index_stats, METH_NOARGS, "stats for the index"},
2604 2604 {NULL} /* Sentinel */
2605 2605 };
2606 2606
2607 2607 static PyGetSetDef index_getset[] = {
2608 2608 {"nodemap", (getter)index_nodemap, NULL, "nodemap", NULL},
2609 2609 {NULL} /* Sentinel */
2610 2610 };
2611 2611
2612 2612 PyTypeObject HgRevlogIndex_Type = {
2613 2613 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2614 2614 "parsers.index", /* tp_name */
2615 2615 sizeof(indexObject), /* tp_basicsize */
2616 2616 0, /* tp_itemsize */
2617 2617 (destructor)index_dealloc, /* tp_dealloc */
2618 2618 0, /* tp_print */
2619 2619 0, /* tp_getattr */
2620 2620 0, /* tp_setattr */
2621 2621 0, /* tp_compare */
2622 2622 0, /* tp_repr */
2623 2623 0, /* tp_as_number */
2624 2624 &index_sequence_methods, /* tp_as_sequence */
2625 2625 &index_mapping_methods, /* tp_as_mapping */
2626 2626 0, /* tp_hash */
2627 2627 0, /* tp_call */
2628 2628 0, /* tp_str */
2629 2629 0, /* tp_getattro */
2630 2630 0, /* tp_setattro */
2631 2631 0, /* tp_as_buffer */
2632 2632 Py_TPFLAGS_DEFAULT, /* tp_flags */
2633 2633 "revlog index", /* tp_doc */
2634 2634 0, /* tp_traverse */
2635 2635 0, /* tp_clear */
2636 2636 0, /* tp_richcompare */
2637 2637 0, /* tp_weaklistoffset */
2638 2638 0, /* tp_iter */
2639 2639 0, /* tp_iternext */
2640 2640 index_methods, /* tp_methods */
2641 2641 0, /* tp_members */
2642 2642 index_getset, /* tp_getset */
2643 2643 0, /* tp_base */
2644 2644 0, /* tp_dict */
2645 2645 0, /* tp_descr_get */
2646 2646 0, /* tp_descr_set */
2647 2647 0, /* tp_dictoffset */
2648 2648 (initproc)index_init, /* tp_init */
2649 2649 0, /* tp_alloc */
2650 2650 };
2651 2651
2652 2652 /*
2653 2653 * returns a tuple of the form (index, index, cache) with elements as
2654 2654 * follows:
2655 2655 *
2656 2656 * index: an index object that lazily parses RevlogNG records
2657 2657 * cache: if data is inlined, a tuple (0, index_file_content), else None
2658 2658 * index_file_content could be a string, or a buffer
2659 2659 *
2660 2660 * added complications are for backwards compatibility
2661 2661 */
2662 2662 PyObject *parse_index2(PyObject *self, PyObject *args)
2663 2663 {
2664 2664 PyObject *tuple = NULL, *cache = NULL;
2665 2665 indexObject *idx;
2666 2666 int ret;
2667 2667
2668 2668 idx = PyObject_New(indexObject, &HgRevlogIndex_Type);
2669 2669 if (idx == NULL)
2670 2670 goto bail;
2671 2671
2672 2672 ret = index_init(idx, args);
2673 2673 if (ret == -1)
2674 2674 goto bail;
2675 2675
2676 2676 if (idx->inlined) {
2677 2677 cache = Py_BuildValue("iO", 0, idx->data);
2678 2678 if (cache == NULL)
2679 2679 goto bail;
2680 2680 } else {
2681 2681 cache = Py_None;
2682 2682 Py_INCREF(cache);
2683 2683 }
2684 2684
2685 2685 tuple = Py_BuildValue("NN", idx, cache);
2686 2686 if (!tuple)
2687 2687 goto bail;
2688 2688 return tuple;
2689 2689
2690 2690 bail:
2691 2691 Py_XDECREF(idx);
2692 2692 Py_XDECREF(cache);
2693 2693 Py_XDECREF(tuple);
2694 2694 return NULL;
2695 2695 }
2696 2696
2697 2697 #ifdef WITH_RUST
2698 2698
2699 2699 /* rustlazyancestors: iteration over ancestors implemented in Rust
2700 2700 *
2701 2701 * This class holds a reference to an index and to the Rust iterator.
2702 2702 */
2703 2703 typedef struct rustlazyancestorsObjectStruct rustlazyancestorsObject;
2704 2704
2705 2705 struct rustlazyancestorsObjectStruct {
2706 2706 PyObject_HEAD
2707 2707 /* Type-specific fields go here. */
2708 2708 indexObject *index; /* Ref kept to avoid GC'ing the index */
2709 2709 void *iter; /* Rust iterator */
2710 2710 };
2711 2711
2712 2712 /* FFI exposed from Rust code */
2713 rustlazyancestorsObject *
2714 rustlazyancestors_init(indexObject *index,
2715 /* to pass index_get_parents() */
2716 int (*)(indexObject *, Py_ssize_t, int *, int),
2717 /* intrevs vector */
2718 Py_ssize_t initrevslen, long *initrevs, long stoprev,
2719 int inclusive);
2713 rustlazyancestorsObject *rustlazyancestors_init(indexObject *index,
2714 /* intrevs vector */
2715 Py_ssize_t initrevslen,
2716 long *initrevs, long stoprev,
2717 int inclusive);
2720 2718 void rustlazyancestors_drop(rustlazyancestorsObject *self);
2721 2719 int rustlazyancestors_next(rustlazyancestorsObject *self);
2722 2720 int rustlazyancestors_contains(rustlazyancestorsObject *self, long rev);
2723 2721
2724 static int index_get_parents_checked(indexObject *self, Py_ssize_t rev, int *ps,
2725 int maxrev)
2726 {
2727 if (rev < 0 || rev >= index_length(self)) {
2728 PyErr_SetString(PyExc_ValueError, "rev out of range");
2729 return -1;
2730 }
2731 return index_get_parents(self, rev, ps, maxrev);
2732 }
2733
2734 2722 /* CPython instance methods */
2735 2723 static int rustla_init(rustlazyancestorsObject *self, PyObject *args)
2736 2724 {
2737 2725 PyObject *initrevsarg = NULL;
2738 2726 PyObject *inclusivearg = NULL;
2739 2727 long stoprev = 0;
2740 2728 long *initrevs = NULL;
2741 2729 int inclusive = 0;
2742 2730 Py_ssize_t i;
2743 2731
2744 2732 indexObject *index;
2745 2733 if (!PyArg_ParseTuple(args, "O!O!lO!", &HgRevlogIndex_Type, &index,
2746 2734 &PyList_Type, &initrevsarg, &stoprev,
2747 2735 &PyBool_Type, &inclusivearg))
2748 2736 return -1;
2749 2737
2750 2738 Py_INCREF(index);
2751 2739 self->index = index;
2752 2740
2753 2741 if (inclusivearg == Py_True)
2754 2742 inclusive = 1;
2755 2743
2756 2744 Py_ssize_t linit = PyList_GET_SIZE(initrevsarg);
2757 2745
2758 2746 initrevs = (long *)calloc(linit, sizeof(long));
2759 2747
2760 2748 if (initrevs == NULL) {
2761 2749 PyErr_NoMemory();
2762 2750 goto bail;
2763 2751 }
2764 2752
2765 2753 for (i = 0; i < linit; i++) {
2766 2754 initrevs[i] = PyInt_AsLong(PyList_GET_ITEM(initrevsarg, i));
2767 2755 }
2768 2756 if (PyErr_Occurred())
2769 2757 goto bail;
2770 2758
2771 self->iter = rustlazyancestors_init(index, index_get_parents, linit,
2772 initrevs, stoprev, inclusive);
2759 self->iter =
2760 rustlazyancestors_init(index, linit, initrevs, stoprev, inclusive);
2773 2761 if (self->iter == NULL) {
2774 2762 /* if this is because of GraphError::ParentOutOfRange
2775 * index_get_parents_checked() has already set the proper
2776 * ValueError */
2763 * HgRevlogIndex_GetParents() has already set the proper
2764 * exception */
2777 2765 goto bail;
2778 2766 }
2779 2767
2780 2768 free(initrevs);
2781 2769 return 0;
2782 2770
2783 2771 bail:
2784 2772 free(initrevs);
2785 2773 return -1;
2786 2774 };
2787 2775
2788 2776 static void rustla_dealloc(rustlazyancestorsObject *self)
2789 2777 {
2790 2778 Py_XDECREF(self->index);
2791 2779 if (self->iter != NULL) { /* can happen if rustla_init failed */
2792 2780 rustlazyancestors_drop(self->iter);
2793 2781 }
2794 2782 PyObject_Del(self);
2795 2783 }
2796 2784
2797 2785 static PyObject *rustla_next(rustlazyancestorsObject *self)
2798 2786 {
2799 2787 int res = rustlazyancestors_next(self->iter);
2800 2788 if (res == -1) {
2801 2789 /* Setting an explicit exception seems unnecessary
2802 2790 * as examples from Python source code (Objects/rangeobjets.c
2803 2791 * and Modules/_io/stringio.c) seem to demonstrate.
2804 2792 */
2805 2793 return NULL;
2806 2794 }
2807 2795 return PyInt_FromLong(res);
2808 2796 }
2809 2797
2810 2798 static int rustla_contains(rustlazyancestorsObject *self, PyObject *rev)
2811 2799 {
2812 2800 long lrev;
2813 2801 if (!pylong_to_long(rev, &lrev)) {
2814 2802 PyErr_Clear();
2815 2803 return 0;
2816 2804 }
2817 2805 return rustlazyancestors_contains(self->iter, lrev);
2818 2806 }
2819 2807
2820 2808 static PySequenceMethods rustla_sequence_methods = {
2821 2809 0, /* sq_length */
2822 2810 0, /* sq_concat */
2823 2811 0, /* sq_repeat */
2824 2812 0, /* sq_item */
2825 2813 0, /* sq_slice */
2826 2814 0, /* sq_ass_item */
2827 2815 0, /* sq_ass_slice */
2828 2816 (objobjproc)rustla_contains, /* sq_contains */
2829 2817 };
2830 2818
2831 2819 static PyTypeObject rustlazyancestorsType = {
2832 2820 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2833 2821 "parsers.rustlazyancestors", /* tp_name */
2834 2822 sizeof(rustlazyancestorsObject), /* tp_basicsize */
2835 2823 0, /* tp_itemsize */
2836 2824 (destructor)rustla_dealloc, /* tp_dealloc */
2837 2825 0, /* tp_print */
2838 2826 0, /* tp_getattr */
2839 2827 0, /* tp_setattr */
2840 2828 0, /* tp_compare */
2841 2829 0, /* tp_repr */
2842 2830 0, /* tp_as_number */
2843 2831 &rustla_sequence_methods, /* tp_as_sequence */
2844 2832 0, /* tp_as_mapping */
2845 2833 0, /* tp_hash */
2846 2834 0, /* tp_call */
2847 2835 0, /* tp_str */
2848 2836 0, /* tp_getattro */
2849 2837 0, /* tp_setattro */
2850 2838 0, /* tp_as_buffer */
2851 2839 Py_TPFLAGS_DEFAULT, /* tp_flags */
2852 2840 "Iterator over ancestors, implemented in Rust", /* tp_doc */
2853 2841 0, /* tp_traverse */
2854 2842 0, /* tp_clear */
2855 2843 0, /* tp_richcompare */
2856 2844 0, /* tp_weaklistoffset */
2857 2845 0, /* tp_iter */
2858 2846 (iternextfunc)rustla_next, /* tp_iternext */
2859 2847 0, /* tp_methods */
2860 2848 0, /* tp_members */
2861 2849 0, /* tp_getset */
2862 2850 0, /* tp_base */
2863 2851 0, /* tp_dict */
2864 2852 0, /* tp_descr_get */
2865 2853 0, /* tp_descr_set */
2866 2854 0, /* tp_dictoffset */
2867 2855 (initproc)rustla_init, /* tp_init */
2868 2856 0, /* tp_alloc */
2869 2857 };
2870 2858 #endif /* WITH_RUST */
2871 2859
2872 2860 void revlog_module_init(PyObject *mod)
2873 2861 {
2874 2862 HgRevlogIndex_Type.tp_new = PyType_GenericNew;
2875 2863 if (PyType_Ready(&HgRevlogIndex_Type) < 0)
2876 2864 return;
2877 2865 Py_INCREF(&HgRevlogIndex_Type);
2878 2866 PyModule_AddObject(mod, "index", (PyObject *)&HgRevlogIndex_Type);
2879 2867
2880 2868 nodetreeType.tp_new = PyType_GenericNew;
2881 2869 if (PyType_Ready(&nodetreeType) < 0)
2882 2870 return;
2883 2871 Py_INCREF(&nodetreeType);
2884 2872 PyModule_AddObject(mod, "nodetree", (PyObject *)&nodetreeType);
2885 2873
2886 2874 if (!nullentry) {
2887 2875 nullentry = Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0,
2888 2876 0, -1, -1, -1, -1, nullid, 20);
2889 2877 }
2890 2878 if (nullentry)
2891 2879 PyObject_GC_UnTrack(nullentry);
2892 2880
2893 2881 #ifdef WITH_RUST
2894 2882 rustlazyancestorsType.tp_new = PyType_GenericNew;
2895 2883 if (PyType_Ready(&rustlazyancestorsType) < 0)
2896 2884 return;
2897 2885 Py_INCREF(&rustlazyancestorsType);
2898 2886 PyModule_AddObject(mod, "rustlazyancestors",
2899 2887 (PyObject *)&rustlazyancestorsType);
2900 2888 #endif
2901 2889 }
@@ -1,265 +1,267 b''
1 1 // Copyright 2018 Georges Racinet <gracinet@anybox.fr>
2 2 //
3 3 // This software may be used and distributed according to the terms of the
4 4 // GNU General Public License version 2 or any later version.
5 5
6 6 //! Bindings for CPython extension code
7 7 //!
8 8 //! This exposes methods to build and use a `rustlazyancestors` iterator
9 9 //! from C code, using an index and its parents function that are passed
10 10 //! from the caller at instantiation.
11 11
12 12 use hg::AncestorsIterator;
13 13 use hg::{Graph, GraphError, Revision, NULL_REVISION};
14 14 use libc::{c_int, c_long, c_void, ssize_t};
15 15 use std::ptr::null_mut;
16 16 use std::slice;
17 17
18 18 type IndexPtr = *mut c_void;
19 type IndexParentsFn =
20 unsafe extern "C" fn(index: IndexPtr, rev: ssize_t, ps: *mut [c_int; 2], max_rev: c_int)
21 -> c_int;
19
20 extern "C" {
21 fn HgRevlogIndex_GetParents(
22 op: IndexPtr,
23 rev: c_int,
24 parents: *mut [c_int; 2],
25 ) -> c_int;
26 }
22 27
23 28 /// A Graph backed up by objects and functions from revlog.c
24 29 ///
25 30 /// This implementation of the Graph trait, relies on (pointers to)
26 31 /// - the C index object (`index` member)
27 32 /// - the `index_get_parents()` function (`parents` member)
28 33 pub struct Index {
29 34 index: IndexPtr,
30 parents: IndexParentsFn,
31 35 }
32 36
33 37 impl Index {
34 pub fn new(index: IndexPtr, parents: IndexParentsFn) -> Self {
38 pub fn new(index: IndexPtr) -> Self {
35 39 Index {
36 40 index: index,
37 parents: parents,
38 41 }
39 42 }
40 43 }
41 44
42 45 impl Graph for Index {
43 46 /// wrap a call to the C extern parents function
44 47 fn parents(&self, rev: Revision) -> Result<(Revision, Revision), GraphError> {
45 48 let mut res: [c_int; 2] = [0; 2];
46 49 let code =
47 unsafe { (self.parents)(self.index, rev as ssize_t, &mut res as *mut [c_int; 2], rev) };
50 unsafe { HgRevlogIndex_GetParents(self.index, rev, &mut res as *mut [c_int; 2]) };
48 51 match code {
49 52 0 => Ok((res[0], res[1])),
50 53 _ => Err(GraphError::ParentOutOfRange(rev)),
51 54 }
52 55 }
53 56 }
54 57
55 58 /// Wrapping of AncestorsIterator<Index> constructor, for C callers.
56 59 ///
57 60 /// Besides `initrevs`, `stoprev` and `inclusive`, that are converted
58 61 /// we receive the index and the parents function as pointers
59 62 #[no_mangle]
60 63 pub extern "C" fn rustlazyancestors_init(
61 64 index: IndexPtr,
62 parents: IndexParentsFn,
63 65 initrevslen: ssize_t,
64 66 initrevs: *mut c_long,
65 67 stoprev: c_long,
66 68 inclusive: c_int,
67 69 ) -> *mut AncestorsIterator<Index> {
68 70 assert!(initrevslen >= 0);
69 71 unsafe {
70 72 raw_init(
71 Index::new(index, parents),
73 Index::new(index),
72 74 initrevslen as usize,
73 75 initrevs,
74 76 stoprev,
75 77 inclusive,
76 78 )
77 79 }
78 80 }
79 81
80 82 /// Testable (for any Graph) version of rustlazyancestors_init
81 83 #[inline]
82 84 unsafe fn raw_init<G: Graph>(
83 85 graph: G,
84 86 initrevslen: usize,
85 87 initrevs: *mut c_long,
86 88 stoprev: c_long,
87 89 inclusive: c_int,
88 90 ) -> *mut AncestorsIterator<G> {
89 91 let inclb = match inclusive {
90 92 0 => false,
91 93 1 => true,
92 94 _ => {
93 95 return null_mut();
94 96 }
95 97 };
96 98
97 99 let slice = slice::from_raw_parts(initrevs, initrevslen);
98 100
99 101 Box::into_raw(Box::new(match AncestorsIterator::new(
100 102 graph,
101 103 slice.into_iter().map(|&r| r as Revision),
102 104 stoprev as Revision,
103 105 inclb,
104 106 ) {
105 107 Ok(it) => it,
106 108 Err(_) => {
107 109 return null_mut();
108 110 }
109 111 }))
110 112 }
111 113
112 114 /// Deallocator to be called from C code
113 115 #[no_mangle]
114 116 pub extern "C" fn rustlazyancestors_drop(raw_iter: *mut AncestorsIterator<Index>) {
115 117 raw_drop(raw_iter);
116 118 }
117 119
118 120 /// Testable (for any Graph) version of rustlazayancestors_drop
119 121 #[inline]
120 122 fn raw_drop<G: Graph>(raw_iter: *mut AncestorsIterator<G>) {
121 123 unsafe {
122 124 Box::from_raw(raw_iter);
123 125 }
124 126 }
125 127
126 128 /// Iteration main method to be called from C code
127 129 ///
128 130 /// We convert the end of iteration into NULL_REVISION,
129 131 /// it will be up to the C wrapper to convert that back into a Python end of
130 132 /// iteration
131 133 #[no_mangle]
132 134 pub extern "C" fn rustlazyancestors_next(raw: *mut AncestorsIterator<Index>) -> c_long {
133 135 raw_next(raw)
134 136 }
135 137
136 138 /// Testable (for any Graph) version of rustlazayancestors_next
137 139 #[inline]
138 140 fn raw_next<G: Graph>(raw: *mut AncestorsIterator<G>) -> c_long {
139 141 let as_ref = unsafe { &mut *raw };
140 142 as_ref.next().unwrap_or(NULL_REVISION) as c_long
141 143 }
142 144
143 145 #[no_mangle]
144 146 pub extern "C" fn rustlazyancestors_contains(
145 147 raw: *mut AncestorsIterator<Index>,
146 148 target: c_long,
147 149 ) -> c_int {
148 150 raw_contains(raw, target)
149 151 }
150 152
151 153 /// Testable (for any Graph) version of rustlazayancestors_next
152 154 #[inline]
153 155 fn raw_contains<G: Graph>(
154 156 raw: *mut AncestorsIterator<G>,
155 157 target: c_long,
156 158 ) -> c_int {
157 159 let as_ref = unsafe { &mut *raw };
158 160 if as_ref.contains(target as Revision) {
159 161 return 1;
160 162 }
161 163 0
162 164 }
163 165
164 166 #[cfg(test)]
165 167 mod tests {
166 168 use super::*;
167 169 use std::thread;
168 170
169 171 #[derive(Clone, Debug)]
170 172 struct Stub;
171 173
172 174 impl Graph for Stub {
173 175 fn parents(&self, r: Revision) -> Result<(Revision, Revision), GraphError> {
174 176 match r {
175 177 25 => Err(GraphError::ParentOutOfRange(25)),
176 178 _ => Ok((1, 2)),
177 179 }
178 180 }
179 181 }
180 182
181 183 /// Helper for test_init_next()
182 184 fn stub_raw_init(
183 185 initrevslen: usize,
184 186 initrevs: usize,
185 187 stoprev: c_long,
186 188 inclusive: c_int,
187 189 ) -> usize {
188 190 unsafe {
189 191 raw_init(
190 192 Stub,
191 193 initrevslen,
192 194 initrevs as *mut c_long,
193 195 stoprev,
194 196 inclusive,
195 197 ) as usize
196 198 }
197 199 }
198 200
199 201 fn stub_raw_init_from_vec(
200 202 mut initrevs: Vec<c_long>,
201 203 stoprev: c_long,
202 204 inclusive: c_int,
203 205 ) -> *mut AncestorsIterator<Stub> {
204 206 unsafe {
205 207 raw_init(
206 208 Stub,
207 209 initrevs.len(),
208 210 initrevs.as_mut_ptr(),
209 211 stoprev,
210 212 inclusive,
211 213 )
212 214 }
213 215 }
214 216
215 217 #[test]
216 218 // Test what happens when we init an Iterator as with the exposed C ABI
217 219 // and try to use it afterwards
218 220 // We spawn new threads, in order to make memory consistency harder
219 221 // but this forces us to convert the pointers into shareable usizes.
220 222 fn test_init_next() {
221 223 let mut initrevs: Vec<c_long> = vec![11, 13];
222 224 let initrevs_len = initrevs.len();
223 225 let initrevs_ptr = initrevs.as_mut_ptr() as usize;
224 226 let handler = thread::spawn(move || stub_raw_init(initrevs_len, initrevs_ptr, 0, 1));
225 227 let raw = handler.join().unwrap() as *mut AncestorsIterator<Stub>;
226 228
227 229 assert_eq!(raw_next(raw), 13);
228 230 assert_eq!(raw_next(raw), 11);
229 231 assert_eq!(raw_next(raw), 2);
230 232 assert_eq!(raw_next(raw), 1);
231 233 assert_eq!(raw_next(raw), NULL_REVISION as c_long);
232 234 raw_drop(raw);
233 235 }
234 236
235 237 #[test]
236 238 fn test_init_wrong_bool() {
237 239 assert_eq!(stub_raw_init_from_vec(vec![11, 13], 0, 2), null_mut());
238 240 }
239 241
240 242 #[test]
241 243 fn test_empty() {
242 244 let raw = stub_raw_init_from_vec(vec![], 0, 1);
243 245 assert_eq!(raw_next(raw), NULL_REVISION as c_long);
244 246 raw_drop(raw);
245 247 }
246 248
247 249 #[test]
248 250 fn test_init_err_out_of_range() {
249 251 assert!(stub_raw_init_from_vec(vec![25], 0, 0).is_null());
250 252 }
251 253
252 254 #[test]
253 255 fn test_contains() {
254 256 let raw = stub_raw_init_from_vec(vec![5, 6], 0, 1);
255 257 assert_eq!(raw_contains(raw, 5), 1);
256 258 assert_eq!(raw_contains(raw, 2), 1);
257 259 }
258 260
259 261 #[test]
260 262 fn test_contains_exclusive() {
261 263 let raw = stub_raw_init_from_vec(vec![5, 6], 0, 0);
262 264 assert_eq!(raw_contains(raw, 5), 0);
263 265 assert_eq!(raw_contains(raw, 2), 1);
264 266 }
265 267 }
General Comments 0
You need to be logged in to leave comments. Login now