##// END OF EJS Templates
sparse-revlog: add a `index_segment_span` function in C...
Boris Feld -
r40741:4ec6a240 default
parent child Browse files
Show More
@@ -1,2577 +1,2605 b''
1 1 /*
2 2 parsers.c - efficient content parsing
3 3
4 4 Copyright 2008 Matt Mackall <mpm@selenic.com> and others
5 5
6 6 This software may be used and distributed according to the terms of
7 7 the GNU General Public License, incorporated herein by reference.
8 8 */
9 9
10 10 #include <Python.h>
11 11 #include <assert.h>
12 12 #include <ctype.h>
13 13 #include <limits.h>
14 14 #include <stddef.h>
15 15 #include <string.h>
16 16
17 17 #include "bitmanipulation.h"
18 18 #include "charencode.h"
19 19 #include "util.h"
20 20
21 21 #ifdef IS_PY3K
22 22 /* The mapping of Python types is meant to be temporary to get Python
23 23 * 3 to compile. We should remove this once Python 3 support is fully
24 24 * supported and proper types are used in the extensions themselves. */
25 25 #define PyInt_Check PyLong_Check
26 26 #define PyInt_FromLong PyLong_FromLong
27 27 #define PyInt_FromSsize_t PyLong_FromSsize_t
28 28 #define PyInt_AsLong PyLong_AsLong
29 29 #endif
30 30
31 31 typedef struct indexObjectStruct indexObject;
32 32
33 33 typedef struct {
34 34 int children[16];
35 35 } nodetreenode;
36 36
37 37 /*
38 38 * A base-16 trie for fast node->rev mapping.
39 39 *
40 40 * Positive value is index of the next node in the trie
41 41 * Negative value is a leaf: -(rev + 2)
42 42 * Zero is empty
43 43 */
44 44 typedef struct {
45 45 indexObject *index;
46 46 nodetreenode *nodes;
47 47 unsigned length; /* # nodes in use */
48 48 unsigned capacity; /* # nodes allocated */
49 49 int depth; /* maximum depth of tree */
50 50 int splits; /* # splits performed */
51 51 } nodetree;
52 52
53 53 typedef struct {
54 54 PyObject_HEAD /* ; */
55 55 nodetree nt;
56 56 } nodetreeObject;
57 57
58 58 /*
59 59 * This class has two behaviors.
60 60 *
61 61 * When used in a list-like way (with integer keys), we decode an
62 62 * entry in a RevlogNG index file on demand. Our last entry is a
63 63 * sentinel, always a nullid. We have limited support for
64 64 * integer-keyed insert and delete, only at elements right before the
65 65 * sentinel.
66 66 *
67 67 * With string keys, we lazily perform a reverse mapping from node to
68 68 * rev, using a base-16 trie.
69 69 */
70 70 struct indexObjectStruct {
71 71 PyObject_HEAD
72 72 /* Type-specific fields go here. */
73 73 PyObject *data; /* raw bytes of index */
74 74 Py_buffer buf; /* buffer of data */
75 75 PyObject **cache; /* cached tuples */
76 76 const char **offsets; /* populated on demand */
77 77 Py_ssize_t raw_length; /* original number of elements */
78 78 Py_ssize_t length; /* current number of elements */
79 79 PyObject *added; /* populated on demand */
80 80 PyObject *headrevs; /* cache, invalidated on changes */
81 81 PyObject *filteredrevs; /* filtered revs set */
82 82 nodetree nt; /* base-16 trie */
83 83 int ntinitialized; /* 0 or 1 */
84 84 int ntrev; /* last rev scanned */
85 85 int ntlookups; /* # lookups */
86 86 int ntmisses; /* # lookups that miss the cache */
87 87 int inlined;
88 88 };
89 89
90 90 static Py_ssize_t index_length(const indexObject *self)
91 91 {
92 92 if (self->added == NULL)
93 93 return self->length;
94 94 return self->length + PyList_GET_SIZE(self->added);
95 95 }
96 96
97 97 static PyObject *nullentry = NULL;
98 98 static const char nullid[20] = {0};
99 99
100 100 static Py_ssize_t inline_scan(indexObject *self, const char **offsets);
101 101
102 102 #if LONG_MAX == 0x7fffffffL
103 103 static const char *const tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
104 104 #else
105 105 static const char *const tuple_format = PY23("kiiiiiis#", "kiiiiiiy#");
106 106 #endif
107 107
108 108 /* A RevlogNG v1 index entry is 64 bytes long. */
109 109 static const long v1_hdrsize = 64;
110 110
111 111 static void raise_revlog_error(void)
112 112 {
113 113 PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
114 114
115 115 mod = PyImport_ImportModule("mercurial.error");
116 116 if (mod == NULL) {
117 117 goto cleanup;
118 118 }
119 119
120 120 dict = PyModule_GetDict(mod);
121 121 if (dict == NULL) {
122 122 goto cleanup;
123 123 }
124 124 Py_INCREF(dict);
125 125
126 126 errclass = PyDict_GetItemString(dict, "RevlogError");
127 127 if (errclass == NULL) {
128 128 PyErr_SetString(PyExc_SystemError,
129 129 "could not find RevlogError");
130 130 goto cleanup;
131 131 }
132 132
133 133 /* value of exception is ignored by callers */
134 134 PyErr_SetString(errclass, "RevlogError");
135 135
136 136 cleanup:
137 137 Py_XDECREF(dict);
138 138 Py_XDECREF(mod);
139 139 }
140 140
141 141 /*
142 142 * Return a pointer to the beginning of a RevlogNG record.
143 143 */
144 144 static const char *index_deref(indexObject *self, Py_ssize_t pos)
145 145 {
146 146 if (self->inlined && pos > 0) {
147 147 if (self->offsets == NULL) {
148 148 self->offsets = PyMem_Malloc(self->raw_length *
149 149 sizeof(*self->offsets));
150 150 if (self->offsets == NULL)
151 151 return (const char *)PyErr_NoMemory();
152 152 inline_scan(self, self->offsets);
153 153 }
154 154 return self->offsets[pos];
155 155 }
156 156
157 157 return (const char *)(self->buf.buf) + pos * v1_hdrsize;
158 158 }
159 159
160 160 static inline int index_get_parents(indexObject *self, Py_ssize_t rev, int *ps,
161 161 int maxrev)
162 162 {
163 163 if (rev >= self->length) {
164 164 long tmp;
165 165 PyObject *tuple =
166 166 PyList_GET_ITEM(self->added, rev - self->length);
167 167 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 5), &tmp)) {
168 168 return -1;
169 169 }
170 170 ps[0] = (int)tmp;
171 171 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 6), &tmp)) {
172 172 return -1;
173 173 }
174 174 ps[1] = (int)tmp;
175 175 } else {
176 176 const char *data = index_deref(self, rev);
177 177 ps[0] = getbe32(data + 24);
178 178 ps[1] = getbe32(data + 28);
179 179 }
180 180 /* If index file is corrupted, ps[] may point to invalid revisions. So
181 181 * there is a risk of buffer overflow to trust them unconditionally. */
182 182 if (ps[0] > maxrev || ps[1] > maxrev) {
183 183 PyErr_SetString(PyExc_ValueError, "parent out of range");
184 184 return -1;
185 185 }
186 186 return 0;
187 187 }
188 188
189 189 static inline int64_t index_get_start(indexObject *self, Py_ssize_t rev)
190 190 {
191 191 uint64_t offset;
192 192 if (rev >= self->length) {
193 193 PyObject *tuple;
194 194 PyObject *pylong;
195 195 PY_LONG_LONG tmp;
196 196 tuple = PyList_GET_ITEM(self->added, rev - self->length);
197 197 pylong = PyTuple_GET_ITEM(tuple, 0);
198 198 tmp = PyLong_AsLongLong(pylong);
199 199 if (tmp == -1 && PyErr_Occurred()) {
200 200 return -1;
201 201 }
202 202 if (tmp < 0) {
203 203 PyErr_Format(PyExc_OverflowError,
204 204 "revlog entry size out of bound (%lld)",
205 205 (long long)tmp);
206 206 return -1;
207 207 }
208 208 offset = (uint64_t)tmp;
209 209 } else {
210 210 const char *data = index_deref(self, rev);
211 211 offset = getbe32(data + 4);
212 212 if (rev == 0) {
213 213 /* mask out version number for the first entry */
214 214 offset &= 0xFFFF;
215 215 } else {
216 216 uint32_t offset_high = getbe32(data);
217 217 offset |= ((uint64_t)offset_high) << 32;
218 218 }
219 219 }
220 220 return (int64_t)(offset >> 16);
221 221 }
222 222
223 223 static inline int index_get_length(indexObject *self, Py_ssize_t rev)
224 224 {
225 225 if (rev >= self->length) {
226 226 PyObject *tuple;
227 227 PyObject *pylong;
228 228 long ret;
229 229 tuple = PyList_GET_ITEM(self->added, rev - self->length);
230 230 pylong = PyTuple_GET_ITEM(tuple, 1);
231 231 ret = PyInt_AsLong(pylong);
232 232 if (ret == -1 && PyErr_Occurred()) {
233 233 return -1;
234 234 }
235 235 if (ret < 0 || ret > (long)INT_MAX) {
236 236 PyErr_Format(PyExc_OverflowError,
237 237 "revlog entry size out of bound (%ld)",
238 238 ret);
239 239 return -1;
240 240 }
241 241 return (int)ret;
242 242 } else {
243 243 const char *data = index_deref(self, rev);
244 244 return (int)getbe32(data + 8);
245 245 }
246 246 }
247 247
248 248 /*
249 249 * RevlogNG format (all in big endian, data may be inlined):
250 250 * 6 bytes: offset
251 251 * 2 bytes: flags
252 252 * 4 bytes: compressed length
253 253 * 4 bytes: uncompressed length
254 254 * 4 bytes: base revision
255 255 * 4 bytes: link revision
256 256 * 4 bytes: parent 1 revision
257 257 * 4 bytes: parent 2 revision
258 258 * 32 bytes: nodeid (only 20 bytes used)
259 259 */
260 260 static PyObject *index_get(indexObject *self, Py_ssize_t pos)
261 261 {
262 262 uint64_t offset_flags;
263 263 int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
264 264 const char *c_node_id;
265 265 const char *data;
266 266 Py_ssize_t length = index_length(self);
267 267 PyObject *entry;
268 268
269 269 if (pos == -1) {
270 270 Py_INCREF(nullentry);
271 271 return nullentry;
272 272 }
273 273
274 274 if (pos < 0 || pos >= length) {
275 275 PyErr_SetString(PyExc_IndexError, "revlog index out of range");
276 276 return NULL;
277 277 }
278 278
279 279 if (pos >= self->length) {
280 280 PyObject *obj;
281 281 obj = PyList_GET_ITEM(self->added, pos - self->length);
282 282 Py_INCREF(obj);
283 283 return obj;
284 284 }
285 285
286 286 if (self->cache) {
287 287 if (self->cache[pos]) {
288 288 Py_INCREF(self->cache[pos]);
289 289 return self->cache[pos];
290 290 }
291 291 } else {
292 292 self->cache = calloc(self->raw_length, sizeof(PyObject *));
293 293 if (self->cache == NULL)
294 294 return PyErr_NoMemory();
295 295 }
296 296
297 297 data = index_deref(self, pos);
298 298 if (data == NULL)
299 299 return NULL;
300 300
301 301 offset_flags = getbe32(data + 4);
302 302 if (pos == 0) /* mask out version number for the first entry */
303 303 offset_flags &= 0xFFFF;
304 304 else {
305 305 uint32_t offset_high = getbe32(data);
306 306 offset_flags |= ((uint64_t)offset_high) << 32;
307 307 }
308 308
309 309 comp_len = getbe32(data + 8);
310 310 uncomp_len = getbe32(data + 12);
311 311 base_rev = getbe32(data + 16);
312 312 link_rev = getbe32(data + 20);
313 313 parent_1 = getbe32(data + 24);
314 314 parent_2 = getbe32(data + 28);
315 315 c_node_id = data + 32;
316 316
317 317 entry = Py_BuildValue(tuple_format, offset_flags, comp_len, uncomp_len,
318 318 base_rev, link_rev, parent_1, parent_2, c_node_id,
319 319 20);
320 320
321 321 if (entry) {
322 322 PyObject_GC_UnTrack(entry);
323 323 Py_INCREF(entry);
324 324 }
325 325
326 326 self->cache[pos] = entry;
327 327
328 328 return entry;
329 329 }
330 330
331 331 /*
332 332 * Return the 20-byte SHA of the node corresponding to the given rev.
333 333 */
334 334 static const char *index_node(indexObject *self, Py_ssize_t pos)
335 335 {
336 336 Py_ssize_t length = index_length(self);
337 337 const char *data;
338 338
339 339 if (pos == -1)
340 340 return nullid;
341 341
342 342 if (pos >= length)
343 343 return NULL;
344 344
345 345 if (pos >= self->length) {
346 346 PyObject *tuple, *str;
347 347 tuple = PyList_GET_ITEM(self->added, pos - self->length);
348 348 str = PyTuple_GetItem(tuple, 7);
349 349 return str ? PyBytes_AS_STRING(str) : NULL;
350 350 }
351 351
352 352 data = index_deref(self, pos);
353 353 return data ? data + 32 : NULL;
354 354 }
355 355
356 356 /*
357 357 * Return the 20-byte SHA of the node corresponding to the given rev. The
358 358 * rev is assumed to be existing. If not, an exception is set.
359 359 */
360 360 static const char *index_node_existing(indexObject *self, Py_ssize_t pos)
361 361 {
362 362 const char *node = index_node(self, pos);
363 363 if (node == NULL) {
364 364 PyErr_Format(PyExc_IndexError, "could not access rev %d",
365 365 (int)pos);
366 366 }
367 367 return node;
368 368 }
369 369
370 370 static int nt_insert(nodetree *self, const char *node, int rev);
371 371
372 372 static int node_check(PyObject *obj, char **node)
373 373 {
374 374 Py_ssize_t nodelen;
375 375 if (PyBytes_AsStringAndSize(obj, node, &nodelen) == -1)
376 376 return -1;
377 377 if (nodelen == 20)
378 378 return 0;
379 379 PyErr_SetString(PyExc_ValueError, "20-byte hash required");
380 380 return -1;
381 381 }
382 382
383 383 static PyObject *index_append(indexObject *self, PyObject *obj)
384 384 {
385 385 char *node;
386 386 Py_ssize_t len;
387 387
388 388 if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj) != 8) {
389 389 PyErr_SetString(PyExc_TypeError, "8-tuple required");
390 390 return NULL;
391 391 }
392 392
393 393 if (node_check(PyTuple_GET_ITEM(obj, 7), &node) == -1)
394 394 return NULL;
395 395
396 396 len = index_length(self);
397 397
398 398 if (self->added == NULL) {
399 399 self->added = PyList_New(0);
400 400 if (self->added == NULL)
401 401 return NULL;
402 402 }
403 403
404 404 if (PyList_Append(self->added, obj) == -1)
405 405 return NULL;
406 406
407 407 if (self->ntinitialized)
408 408 nt_insert(&self->nt, node, (int)len);
409 409
410 410 Py_CLEAR(self->headrevs);
411 411 Py_RETURN_NONE;
412 412 }
413 413
414 414 static PyObject *index_stats(indexObject *self)
415 415 {
416 416 PyObject *obj = PyDict_New();
417 417 PyObject *s = NULL;
418 418 PyObject *t = NULL;
419 419
420 420 if (obj == NULL)
421 421 return NULL;
422 422
423 423 #define istat(__n, __d) \
424 424 do { \
425 425 s = PyBytes_FromString(__d); \
426 426 t = PyInt_FromSsize_t(self->__n); \
427 427 if (!s || !t) \
428 428 goto bail; \
429 429 if (PyDict_SetItem(obj, s, t) == -1) \
430 430 goto bail; \
431 431 Py_CLEAR(s); \
432 432 Py_CLEAR(t); \
433 433 } while (0)
434 434
435 435 if (self->added) {
436 436 Py_ssize_t len = PyList_GET_SIZE(self->added);
437 437 s = PyBytes_FromString("index entries added");
438 438 t = PyInt_FromSsize_t(len);
439 439 if (!s || !t)
440 440 goto bail;
441 441 if (PyDict_SetItem(obj, s, t) == -1)
442 442 goto bail;
443 443 Py_CLEAR(s);
444 444 Py_CLEAR(t);
445 445 }
446 446
447 447 if (self->raw_length != self->length)
448 448 istat(raw_length, "revs on disk");
449 449 istat(length, "revs in memory");
450 450 istat(ntlookups, "node trie lookups");
451 451 istat(ntmisses, "node trie misses");
452 452 istat(ntrev, "node trie last rev scanned");
453 453 if (self->ntinitialized) {
454 454 istat(nt.capacity, "node trie capacity");
455 455 istat(nt.depth, "node trie depth");
456 456 istat(nt.length, "node trie count");
457 457 istat(nt.splits, "node trie splits");
458 458 }
459 459
460 460 #undef istat
461 461
462 462 return obj;
463 463
464 464 bail:
465 465 Py_XDECREF(obj);
466 466 Py_XDECREF(s);
467 467 Py_XDECREF(t);
468 468 return NULL;
469 469 }
470 470
471 471 /*
472 472 * When we cache a list, we want to be sure the caller can't mutate
473 473 * the cached copy.
474 474 */
475 475 static PyObject *list_copy(PyObject *list)
476 476 {
477 477 Py_ssize_t len = PyList_GET_SIZE(list);
478 478 PyObject *newlist = PyList_New(len);
479 479 Py_ssize_t i;
480 480
481 481 if (newlist == NULL)
482 482 return NULL;
483 483
484 484 for (i = 0; i < len; i++) {
485 485 PyObject *obj = PyList_GET_ITEM(list, i);
486 486 Py_INCREF(obj);
487 487 PyList_SET_ITEM(newlist, i, obj);
488 488 }
489 489
490 490 return newlist;
491 491 }
492 492
493 493 static int check_filter(PyObject *filter, Py_ssize_t arg)
494 494 {
495 495 if (filter) {
496 496 PyObject *arglist, *result;
497 497 int isfiltered;
498 498
499 499 arglist = Py_BuildValue("(n)", arg);
500 500 if (!arglist) {
501 501 return -1;
502 502 }
503 503
504 504 result = PyEval_CallObject(filter, arglist);
505 505 Py_DECREF(arglist);
506 506 if (!result) {
507 507 return -1;
508 508 }
509 509
510 510 /* PyObject_IsTrue returns 1 if true, 0 if false, -1 if error,
511 511 * same as this function, so we can just return it directly.*/
512 512 isfiltered = PyObject_IsTrue(result);
513 513 Py_DECREF(result);
514 514 return isfiltered;
515 515 } else {
516 516 return 0;
517 517 }
518 518 }
519 519
520 520 static Py_ssize_t add_roots_get_min(indexObject *self, PyObject *list,
521 521 Py_ssize_t marker, char *phases)
522 522 {
523 523 PyObject *iter = NULL;
524 524 PyObject *iter_item = NULL;
525 525 Py_ssize_t min_idx = index_length(self) + 2;
526 526 long iter_item_long;
527 527
528 528 if (PyList_GET_SIZE(list) != 0) {
529 529 iter = PyObject_GetIter(list);
530 530 if (iter == NULL)
531 531 return -2;
532 532 while ((iter_item = PyIter_Next(iter))) {
533 533 if (!pylong_to_long(iter_item, &iter_item_long)) {
534 534 Py_DECREF(iter_item);
535 535 return -2;
536 536 }
537 537 Py_DECREF(iter_item);
538 538 if (iter_item_long < min_idx)
539 539 min_idx = iter_item_long;
540 540 phases[iter_item_long] = (char)marker;
541 541 }
542 542 Py_DECREF(iter);
543 543 }
544 544
545 545 return min_idx;
546 546 }
547 547
548 548 static inline void set_phase_from_parents(char *phases, int parent_1,
549 549 int parent_2, Py_ssize_t i)
550 550 {
551 551 if (parent_1 >= 0 && phases[parent_1] > phases[i])
552 552 phases[i] = phases[parent_1];
553 553 if (parent_2 >= 0 && phases[parent_2] > phases[i])
554 554 phases[i] = phases[parent_2];
555 555 }
556 556
557 557 static PyObject *reachableroots2(indexObject *self, PyObject *args)
558 558 {
559 559
560 560 /* Input */
561 561 long minroot;
562 562 PyObject *includepatharg = NULL;
563 563 int includepath = 0;
564 564 /* heads and roots are lists */
565 565 PyObject *heads = NULL;
566 566 PyObject *roots = NULL;
567 567 PyObject *reachable = NULL;
568 568
569 569 PyObject *val;
570 570 Py_ssize_t len = index_length(self);
571 571 long revnum;
572 572 Py_ssize_t k;
573 573 Py_ssize_t i;
574 574 Py_ssize_t l;
575 575 int r;
576 576 int parents[2];
577 577
578 578 /* Internal data structure:
579 579 * tovisit: array of length len+1 (all revs + nullrev), filled upto
580 580 * lentovisit
581 581 *
582 582 * revstates: array of length len+1 (all revs + nullrev) */
583 583 int *tovisit = NULL;
584 584 long lentovisit = 0;
585 585 enum { RS_SEEN = 1, RS_ROOT = 2, RS_REACHABLE = 4 };
586 586 char *revstates = NULL;
587 587
588 588 /* Get arguments */
589 589 if (!PyArg_ParseTuple(args, "lO!O!O!", &minroot, &PyList_Type, &heads,
590 590 &PyList_Type, &roots, &PyBool_Type,
591 591 &includepatharg))
592 592 goto bail;
593 593
594 594 if (includepatharg == Py_True)
595 595 includepath = 1;
596 596
597 597 /* Initialize return set */
598 598 reachable = PyList_New(0);
599 599 if (reachable == NULL)
600 600 goto bail;
601 601
602 602 /* Initialize internal datastructures */
603 603 tovisit = (int *)malloc((len + 1) * sizeof(int));
604 604 if (tovisit == NULL) {
605 605 PyErr_NoMemory();
606 606 goto bail;
607 607 }
608 608
609 609 revstates = (char *)calloc(len + 1, 1);
610 610 if (revstates == NULL) {
611 611 PyErr_NoMemory();
612 612 goto bail;
613 613 }
614 614
615 615 l = PyList_GET_SIZE(roots);
616 616 for (i = 0; i < l; i++) {
617 617 revnum = PyInt_AsLong(PyList_GET_ITEM(roots, i));
618 618 if (revnum == -1 && PyErr_Occurred())
619 619 goto bail;
620 620 /* If root is out of range, e.g. wdir(), it must be unreachable
621 621 * from heads. So we can just ignore it. */
622 622 if (revnum + 1 < 0 || revnum + 1 >= len + 1)
623 623 continue;
624 624 revstates[revnum + 1] |= RS_ROOT;
625 625 }
626 626
627 627 /* Populate tovisit with all the heads */
628 628 l = PyList_GET_SIZE(heads);
629 629 for (i = 0; i < l; i++) {
630 630 revnum = PyInt_AsLong(PyList_GET_ITEM(heads, i));
631 631 if (revnum == -1 && PyErr_Occurred())
632 632 goto bail;
633 633 if (revnum + 1 < 0 || revnum + 1 >= len + 1) {
634 634 PyErr_SetString(PyExc_IndexError, "head out of range");
635 635 goto bail;
636 636 }
637 637 if (!(revstates[revnum + 1] & RS_SEEN)) {
638 638 tovisit[lentovisit++] = (int)revnum;
639 639 revstates[revnum + 1] |= RS_SEEN;
640 640 }
641 641 }
642 642
643 643 /* Visit the tovisit list and find the reachable roots */
644 644 k = 0;
645 645 while (k < lentovisit) {
646 646 /* Add the node to reachable if it is a root*/
647 647 revnum = tovisit[k++];
648 648 if (revstates[revnum + 1] & RS_ROOT) {
649 649 revstates[revnum + 1] |= RS_REACHABLE;
650 650 val = PyInt_FromLong(revnum);
651 651 if (val == NULL)
652 652 goto bail;
653 653 r = PyList_Append(reachable, val);
654 654 Py_DECREF(val);
655 655 if (r < 0)
656 656 goto bail;
657 657 if (includepath == 0)
658 658 continue;
659 659 }
660 660
661 661 /* Add its parents to the list of nodes to visit */
662 662 if (revnum == -1)
663 663 continue;
664 664 r = index_get_parents(self, revnum, parents, (int)len - 1);
665 665 if (r < 0)
666 666 goto bail;
667 667 for (i = 0; i < 2; i++) {
668 668 if (!(revstates[parents[i] + 1] & RS_SEEN) &&
669 669 parents[i] >= minroot) {
670 670 tovisit[lentovisit++] = parents[i];
671 671 revstates[parents[i] + 1] |= RS_SEEN;
672 672 }
673 673 }
674 674 }
675 675
676 676 /* Find all the nodes in between the roots we found and the heads
677 677 * and add them to the reachable set */
678 678 if (includepath == 1) {
679 679 long minidx = minroot;
680 680 if (minidx < 0)
681 681 minidx = 0;
682 682 for (i = minidx; i < len; i++) {
683 683 if (!(revstates[i + 1] & RS_SEEN))
684 684 continue;
685 685 r = index_get_parents(self, i, parents, (int)len - 1);
686 686 /* Corrupted index file, error is set from
687 687 * index_get_parents */
688 688 if (r < 0)
689 689 goto bail;
690 690 if (((revstates[parents[0] + 1] |
691 691 revstates[parents[1] + 1]) &
692 692 RS_REACHABLE) &&
693 693 !(revstates[i + 1] & RS_REACHABLE)) {
694 694 revstates[i + 1] |= RS_REACHABLE;
695 695 val = PyInt_FromSsize_t(i);
696 696 if (val == NULL)
697 697 goto bail;
698 698 r = PyList_Append(reachable, val);
699 699 Py_DECREF(val);
700 700 if (r < 0)
701 701 goto bail;
702 702 }
703 703 }
704 704 }
705 705
706 706 free(revstates);
707 707 free(tovisit);
708 708 return reachable;
709 709 bail:
710 710 Py_XDECREF(reachable);
711 711 free(revstates);
712 712 free(tovisit);
713 713 return NULL;
714 714 }
715 715
716 716 static PyObject *compute_phases_map_sets(indexObject *self, PyObject *args)
717 717 {
718 718 PyObject *roots = Py_None;
719 719 PyObject *ret = NULL;
720 720 PyObject *phasessize = NULL;
721 721 PyObject *phaseroots = NULL;
722 722 PyObject *phaseset = NULL;
723 723 PyObject *phasessetlist = NULL;
724 724 PyObject *rev = NULL;
725 725 Py_ssize_t len = index_length(self);
726 726 Py_ssize_t numphase = 0;
727 727 Py_ssize_t minrevallphases = 0;
728 728 Py_ssize_t minrevphase = 0;
729 729 Py_ssize_t i = 0;
730 730 char *phases = NULL;
731 731 long phase;
732 732
733 733 if (!PyArg_ParseTuple(args, "O", &roots))
734 734 goto done;
735 735 if (roots == NULL || !PyList_Check(roots)) {
736 736 PyErr_SetString(PyExc_TypeError, "roots must be a list");
737 737 goto done;
738 738 }
739 739
740 740 phases = calloc(
741 741 len, 1); /* phase per rev: {0: public, 1: draft, 2: secret} */
742 742 if (phases == NULL) {
743 743 PyErr_NoMemory();
744 744 goto done;
745 745 }
746 746 /* Put the phase information of all the roots in phases */
747 747 numphase = PyList_GET_SIZE(roots) + 1;
748 748 minrevallphases = len + 1;
749 749 phasessetlist = PyList_New(numphase);
750 750 if (phasessetlist == NULL)
751 751 goto done;
752 752
753 753 PyList_SET_ITEM(phasessetlist, 0, Py_None);
754 754 Py_INCREF(Py_None);
755 755
756 756 for (i = 0; i < numphase - 1; i++) {
757 757 phaseroots = PyList_GET_ITEM(roots, i);
758 758 phaseset = PySet_New(NULL);
759 759 if (phaseset == NULL)
760 760 goto release;
761 761 PyList_SET_ITEM(phasessetlist, i + 1, phaseset);
762 762 if (!PyList_Check(phaseroots)) {
763 763 PyErr_SetString(PyExc_TypeError,
764 764 "roots item must be a list");
765 765 goto release;
766 766 }
767 767 minrevphase =
768 768 add_roots_get_min(self, phaseroots, i + 1, phases);
769 769 if (minrevphase == -2) /* Error from add_roots_get_min */
770 770 goto release;
771 771 minrevallphases = MIN(minrevallphases, minrevphase);
772 772 }
773 773 /* Propagate the phase information from the roots to the revs */
774 774 if (minrevallphases != -1) {
775 775 int parents[2];
776 776 for (i = minrevallphases; i < len; i++) {
777 777 if (index_get_parents(self, i, parents, (int)len - 1) <
778 778 0)
779 779 goto release;
780 780 set_phase_from_parents(phases, parents[0], parents[1],
781 781 i);
782 782 }
783 783 }
784 784 /* Transform phase list to a python list */
785 785 phasessize = PyInt_FromSsize_t(len);
786 786 if (phasessize == NULL)
787 787 goto release;
788 788 for (i = 0; i < len; i++) {
789 789 phase = phases[i];
790 790 /* We only store the sets of phase for non public phase, the
791 791 * public phase is computed as a difference */
792 792 if (phase != 0) {
793 793 phaseset = PyList_GET_ITEM(phasessetlist, phase);
794 794 rev = PyInt_FromSsize_t(i);
795 795 if (rev == NULL)
796 796 goto release;
797 797 PySet_Add(phaseset, rev);
798 798 Py_XDECREF(rev);
799 799 }
800 800 }
801 801 ret = PyTuple_Pack(2, phasessize, phasessetlist);
802 802
803 803 release:
804 804 Py_XDECREF(phasessize);
805 805 Py_XDECREF(phasessetlist);
806 806 done:
807 807 free(phases);
808 808 return ret;
809 809 }
810 810
811 811 static PyObject *index_headrevs(indexObject *self, PyObject *args)
812 812 {
813 813 Py_ssize_t i, j, len;
814 814 char *nothead = NULL;
815 815 PyObject *heads = NULL;
816 816 PyObject *filter = NULL;
817 817 PyObject *filteredrevs = Py_None;
818 818
819 819 if (!PyArg_ParseTuple(args, "|O", &filteredrevs)) {
820 820 return NULL;
821 821 }
822 822
823 823 if (self->headrevs && filteredrevs == self->filteredrevs)
824 824 return list_copy(self->headrevs);
825 825
826 826 Py_DECREF(self->filteredrevs);
827 827 self->filteredrevs = filteredrevs;
828 828 Py_INCREF(filteredrevs);
829 829
830 830 if (filteredrevs != Py_None) {
831 831 filter = PyObject_GetAttrString(filteredrevs, "__contains__");
832 832 if (!filter) {
833 833 PyErr_SetString(
834 834 PyExc_TypeError,
835 835 "filteredrevs has no attribute __contains__");
836 836 goto bail;
837 837 }
838 838 }
839 839
840 840 len = index_length(self);
841 841 heads = PyList_New(0);
842 842 if (heads == NULL)
843 843 goto bail;
844 844 if (len == 0) {
845 845 PyObject *nullid = PyInt_FromLong(-1);
846 846 if (nullid == NULL || PyList_Append(heads, nullid) == -1) {
847 847 Py_XDECREF(nullid);
848 848 goto bail;
849 849 }
850 850 goto done;
851 851 }
852 852
853 853 nothead = calloc(len, 1);
854 854 if (nothead == NULL) {
855 855 PyErr_NoMemory();
856 856 goto bail;
857 857 }
858 858
859 859 for (i = len - 1; i >= 0; i--) {
860 860 int isfiltered;
861 861 int parents[2];
862 862
863 863 /* If nothead[i] == 1, it means we've seen an unfiltered child
864 864 * of this node already, and therefore this node is not
865 865 * filtered. So we can skip the expensive check_filter step.
866 866 */
867 867 if (nothead[i] != 1) {
868 868 isfiltered = check_filter(filter, i);
869 869 if (isfiltered == -1) {
870 870 PyErr_SetString(PyExc_TypeError,
871 871 "unable to check filter");
872 872 goto bail;
873 873 }
874 874
875 875 if (isfiltered) {
876 876 nothead[i] = 1;
877 877 continue;
878 878 }
879 879 }
880 880
881 881 if (index_get_parents(self, i, parents, (int)len - 1) < 0)
882 882 goto bail;
883 883 for (j = 0; j < 2; j++) {
884 884 if (parents[j] >= 0)
885 885 nothead[parents[j]] = 1;
886 886 }
887 887 }
888 888
889 889 for (i = 0; i < len; i++) {
890 890 PyObject *head;
891 891
892 892 if (nothead[i])
893 893 continue;
894 894 head = PyInt_FromSsize_t(i);
895 895 if (head == NULL || PyList_Append(heads, head) == -1) {
896 896 Py_XDECREF(head);
897 897 goto bail;
898 898 }
899 899 }
900 900
901 901 done:
902 902 self->headrevs = heads;
903 903 Py_XDECREF(filter);
904 904 free(nothead);
905 905 return list_copy(self->headrevs);
906 906 bail:
907 907 Py_XDECREF(filter);
908 908 Py_XDECREF(heads);
909 909 free(nothead);
910 910 return NULL;
911 911 }
912 912
913 913 /**
914 914 * Obtain the base revision index entry.
915 915 *
916 916 * Callers must ensure that rev >= 0 or illegal memory access may occur.
917 917 */
918 918 static inline int index_baserev(indexObject *self, int rev)
919 919 {
920 920 const char *data;
921 921
922 922 if (rev >= self->length) {
923 923 PyObject *tuple =
924 924 PyList_GET_ITEM(self->added, rev - self->length);
925 925 long ret;
926 926 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 3), &ret)) {
927 927 return -2;
928 928 }
929 929 return (int)ret;
930 930 } else {
931 931 data = index_deref(self, rev);
932 932 if (data == NULL) {
933 933 return -2;
934 934 }
935 935
936 936 return getbe32(data + 16);
937 937 }
938 938 }
939 939
940 940 static PyObject *index_deltachain(indexObject *self, PyObject *args)
941 941 {
942 942 int rev, generaldelta;
943 943 PyObject *stoparg;
944 944 int stoprev, iterrev, baserev = -1;
945 945 int stopped;
946 946 PyObject *chain = NULL, *result = NULL;
947 947 const Py_ssize_t length = index_length(self);
948 948
949 949 if (!PyArg_ParseTuple(args, "iOi", &rev, &stoparg, &generaldelta)) {
950 950 return NULL;
951 951 }
952 952
953 953 if (PyInt_Check(stoparg)) {
954 954 stoprev = (int)PyInt_AsLong(stoparg);
955 955 if (stoprev == -1 && PyErr_Occurred()) {
956 956 return NULL;
957 957 }
958 958 } else if (stoparg == Py_None) {
959 959 stoprev = -2;
960 960 } else {
961 961 PyErr_SetString(PyExc_ValueError,
962 962 "stoprev must be integer or None");
963 963 return NULL;
964 964 }
965 965
966 966 if (rev < 0 || rev >= length) {
967 967 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
968 968 return NULL;
969 969 }
970 970
971 971 chain = PyList_New(0);
972 972 if (chain == NULL) {
973 973 return NULL;
974 974 }
975 975
976 976 baserev = index_baserev(self, rev);
977 977
978 978 /* This should never happen. */
979 979 if (baserev <= -2) {
980 980 /* Error should be set by index_deref() */
981 981 assert(PyErr_Occurred());
982 982 goto bail;
983 983 }
984 984
985 985 iterrev = rev;
986 986
987 987 while (iterrev != baserev && iterrev != stoprev) {
988 988 PyObject *value = PyInt_FromLong(iterrev);
989 989 if (value == NULL) {
990 990 goto bail;
991 991 }
992 992 if (PyList_Append(chain, value)) {
993 993 Py_DECREF(value);
994 994 goto bail;
995 995 }
996 996 Py_DECREF(value);
997 997
998 998 if (generaldelta) {
999 999 iterrev = baserev;
1000 1000 } else {
1001 1001 iterrev--;
1002 1002 }
1003 1003
1004 1004 if (iterrev < 0) {
1005 1005 break;
1006 1006 }
1007 1007
1008 1008 if (iterrev >= length) {
1009 1009 PyErr_SetString(PyExc_IndexError,
1010 1010 "revision outside index");
1011 1011 return NULL;
1012 1012 }
1013 1013
1014 1014 baserev = index_baserev(self, iterrev);
1015 1015
1016 1016 /* This should never happen. */
1017 1017 if (baserev <= -2) {
1018 1018 /* Error should be set by index_deref() */
1019 1019 assert(PyErr_Occurred());
1020 1020 goto bail;
1021 1021 }
1022 1022 }
1023 1023
1024 1024 if (iterrev == stoprev) {
1025 1025 stopped = 1;
1026 1026 } else {
1027 1027 PyObject *value = PyInt_FromLong(iterrev);
1028 1028 if (value == NULL) {
1029 1029 goto bail;
1030 1030 }
1031 1031 if (PyList_Append(chain, value)) {
1032 1032 Py_DECREF(value);
1033 1033 goto bail;
1034 1034 }
1035 1035 Py_DECREF(value);
1036 1036
1037 1037 stopped = 0;
1038 1038 }
1039 1039
1040 1040 if (PyList_Reverse(chain)) {
1041 1041 goto bail;
1042 1042 }
1043 1043
1044 1044 result = Py_BuildValue("OO", chain, stopped ? Py_True : Py_False);
1045 1045 Py_DECREF(chain);
1046 1046 return result;
1047 1047
1048 1048 bail:
1049 1049 Py_DECREF(chain);
1050 1050 return NULL;
1051 1051 }
1052 1052
1053 static inline int64_t
1054 index_segment_span(indexObject *self, Py_ssize_t start_rev, Py_ssize_t end_rev)
1055 {
1056 int64_t start_offset;
1057 int64_t end_offset;
1058 int end_size;
1059 start_offset = index_get_start(self, start_rev);
1060 if (start_offset < 0) {
1061 return -1;
1062 }
1063 end_offset = index_get_start(self, end_rev);
1064 if (end_offset < 0) {
1065 return -1;
1066 }
1067 end_size = index_get_length(self, end_rev);
1068 if (end_size < 0) {
1069 return -1;
1070 }
1071 if (end_offset < start_offset) {
1072 PyErr_Format(PyExc_ValueError,
1073 "corrupted revlog index: inconsistent offset "
1074 "between revisions (%zd) and (%zd)",
1075 start_rev, end_rev);
1076 return -1;
1077 }
1078 return (end_offset - start_offset) + (int64_t)end_size;
1079 }
1080
1053 1081 static inline int nt_level(const char *node, Py_ssize_t level)
1054 1082 {
1055 1083 int v = node[level >> 1];
1056 1084 if (!(level & 1))
1057 1085 v >>= 4;
1058 1086 return v & 0xf;
1059 1087 }
1060 1088
1061 1089 /*
1062 1090 * Return values:
1063 1091 *
1064 1092 * -4: match is ambiguous (multiple candidates)
1065 1093 * -2: not found
1066 1094 * rest: valid rev
1067 1095 */
1068 1096 static int nt_find(nodetree *self, const char *node, Py_ssize_t nodelen,
1069 1097 int hex)
1070 1098 {
1071 1099 int (*getnybble)(const char *, Py_ssize_t) = hex ? hexdigit : nt_level;
1072 1100 int level, maxlevel, off;
1073 1101
1074 1102 if (nodelen == 20 && node[0] == '\0' && memcmp(node, nullid, 20) == 0)
1075 1103 return -1;
1076 1104
1077 1105 if (hex)
1078 1106 maxlevel = nodelen > 40 ? 40 : (int)nodelen;
1079 1107 else
1080 1108 maxlevel = nodelen > 20 ? 40 : ((int)nodelen * 2);
1081 1109
1082 1110 for (level = off = 0; level < maxlevel; level++) {
1083 1111 int k = getnybble(node, level);
1084 1112 nodetreenode *n = &self->nodes[off];
1085 1113 int v = n->children[k];
1086 1114
1087 1115 if (v < 0) {
1088 1116 const char *n;
1089 1117 Py_ssize_t i;
1090 1118
1091 1119 v = -(v + 2);
1092 1120 n = index_node(self->index, v);
1093 1121 if (n == NULL)
1094 1122 return -2;
1095 1123 for (i = level; i < maxlevel; i++)
1096 1124 if (getnybble(node, i) != nt_level(n, i))
1097 1125 return -2;
1098 1126 return v;
1099 1127 }
1100 1128 if (v == 0)
1101 1129 return -2;
1102 1130 off = v;
1103 1131 }
1104 1132 /* multiple matches against an ambiguous prefix */
1105 1133 return -4;
1106 1134 }
1107 1135
1108 1136 static int nt_new(nodetree *self)
1109 1137 {
1110 1138 if (self->length == self->capacity) {
1111 1139 unsigned newcapacity;
1112 1140 nodetreenode *newnodes;
1113 1141 newcapacity = self->capacity * 2;
1114 1142 if (newcapacity >= INT_MAX / sizeof(nodetreenode)) {
1115 1143 PyErr_SetString(PyExc_MemoryError,
1116 1144 "overflow in nt_new");
1117 1145 return -1;
1118 1146 }
1119 1147 newnodes =
1120 1148 realloc(self->nodes, newcapacity * sizeof(nodetreenode));
1121 1149 if (newnodes == NULL) {
1122 1150 PyErr_SetString(PyExc_MemoryError, "out of memory");
1123 1151 return -1;
1124 1152 }
1125 1153 self->capacity = newcapacity;
1126 1154 self->nodes = newnodes;
1127 1155 memset(&self->nodes[self->length], 0,
1128 1156 sizeof(nodetreenode) * (self->capacity - self->length));
1129 1157 }
1130 1158 return self->length++;
1131 1159 }
1132 1160
1133 1161 static int nt_insert(nodetree *self, const char *node, int rev)
1134 1162 {
1135 1163 int level = 0;
1136 1164 int off = 0;
1137 1165
1138 1166 while (level < 40) {
1139 1167 int k = nt_level(node, level);
1140 1168 nodetreenode *n;
1141 1169 int v;
1142 1170
1143 1171 n = &self->nodes[off];
1144 1172 v = n->children[k];
1145 1173
1146 1174 if (v == 0) {
1147 1175 n->children[k] = -rev - 2;
1148 1176 return 0;
1149 1177 }
1150 1178 if (v < 0) {
1151 1179 const char *oldnode =
1152 1180 index_node_existing(self->index, -(v + 2));
1153 1181 int noff;
1154 1182
1155 1183 if (oldnode == NULL)
1156 1184 return -1;
1157 1185 if (!memcmp(oldnode, node, 20)) {
1158 1186 n->children[k] = -rev - 2;
1159 1187 return 0;
1160 1188 }
1161 1189 noff = nt_new(self);
1162 1190 if (noff == -1)
1163 1191 return -1;
1164 1192 /* self->nodes may have been changed by realloc */
1165 1193 self->nodes[off].children[k] = noff;
1166 1194 off = noff;
1167 1195 n = &self->nodes[off];
1168 1196 n->children[nt_level(oldnode, ++level)] = v;
1169 1197 if (level > self->depth)
1170 1198 self->depth = level;
1171 1199 self->splits += 1;
1172 1200 } else {
1173 1201 level += 1;
1174 1202 off = v;
1175 1203 }
1176 1204 }
1177 1205
1178 1206 return -1;
1179 1207 }
1180 1208
1181 1209 static PyObject *ntobj_insert(nodetreeObject *self, PyObject *args)
1182 1210 {
1183 1211 Py_ssize_t rev;
1184 1212 const char *node;
1185 1213 Py_ssize_t length;
1186 1214 if (!PyArg_ParseTuple(args, "n", &rev))
1187 1215 return NULL;
1188 1216 length = index_length(self->nt.index);
1189 1217 if (rev < 0 || rev >= length) {
1190 1218 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
1191 1219 return NULL;
1192 1220 }
1193 1221 node = index_node_existing(self->nt.index, rev);
1194 1222 if (nt_insert(&self->nt, node, (int)rev) == -1)
1195 1223 return NULL;
1196 1224 Py_RETURN_NONE;
1197 1225 }
1198 1226
1199 1227 static int nt_delete_node(nodetree *self, const char *node)
1200 1228 {
1201 1229 /* rev==-2 happens to get encoded as 0, which is interpreted as not set
1202 1230 */
1203 1231 return nt_insert(self, node, -2);
1204 1232 }
1205 1233
1206 1234 static int nt_init(nodetree *self, indexObject *index, unsigned capacity)
1207 1235 {
1208 1236 /* Initialize before overflow-checking to avoid nt_dealloc() crash. */
1209 1237 self->nodes = NULL;
1210 1238
1211 1239 self->index = index;
1212 1240 /* The input capacity is in terms of revisions, while the field is in
1213 1241 * terms of nodetree nodes. */
1214 1242 self->capacity = (capacity < 4 ? 4 : capacity / 2);
1215 1243 self->depth = 0;
1216 1244 self->splits = 0;
1217 1245 if ((size_t)self->capacity > INT_MAX / sizeof(nodetreenode)) {
1218 1246 PyErr_SetString(PyExc_ValueError, "overflow in init_nt");
1219 1247 return -1;
1220 1248 }
1221 1249 self->nodes = calloc(self->capacity, sizeof(nodetreenode));
1222 1250 if (self->nodes == NULL) {
1223 1251 PyErr_NoMemory();
1224 1252 return -1;
1225 1253 }
1226 1254 self->length = 1;
1227 1255 return 0;
1228 1256 }
1229 1257
1230 1258 static PyTypeObject indexType;
1231 1259
1232 1260 static int ntobj_init(nodetreeObject *self, PyObject *args)
1233 1261 {
1234 1262 PyObject *index;
1235 1263 unsigned capacity;
1236 1264 if (!PyArg_ParseTuple(args, "O!I", &indexType, &index, &capacity))
1237 1265 return -1;
1238 1266 Py_INCREF(index);
1239 1267 return nt_init(&self->nt, (indexObject *)index, capacity);
1240 1268 }
1241 1269
1242 1270 static int nt_partialmatch(nodetree *self, const char *node, Py_ssize_t nodelen)
1243 1271 {
1244 1272 return nt_find(self, node, nodelen, 1);
1245 1273 }
1246 1274
1247 1275 /*
1248 1276 * Find the length of the shortest unique prefix of node.
1249 1277 *
1250 1278 * Return values:
1251 1279 *
1252 1280 * -3: error (exception set)
1253 1281 * -2: not found (no exception set)
1254 1282 * rest: length of shortest prefix
1255 1283 */
1256 1284 static int nt_shortest(nodetree *self, const char *node)
1257 1285 {
1258 1286 int level, off;
1259 1287
1260 1288 for (level = off = 0; level < 40; level++) {
1261 1289 int k, v;
1262 1290 nodetreenode *n = &self->nodes[off];
1263 1291 k = nt_level(node, level);
1264 1292 v = n->children[k];
1265 1293 if (v < 0) {
1266 1294 const char *n;
1267 1295 v = -(v + 2);
1268 1296 n = index_node_existing(self->index, v);
1269 1297 if (n == NULL)
1270 1298 return -3;
1271 1299 if (memcmp(node, n, 20) != 0)
1272 1300 /*
1273 1301 * Found a unique prefix, but it wasn't for the
1274 1302 * requested node (i.e the requested node does
1275 1303 * not exist).
1276 1304 */
1277 1305 return -2;
1278 1306 return level + 1;
1279 1307 }
1280 1308 if (v == 0)
1281 1309 return -2;
1282 1310 off = v;
1283 1311 }
1284 1312 /*
1285 1313 * The node was still not unique after 40 hex digits, so this won't
1286 1314 * happen. Also, if we get here, then there's a programming error in
1287 1315 * this file that made us insert a node longer than 40 hex digits.
1288 1316 */
1289 1317 PyErr_SetString(PyExc_Exception, "broken node tree");
1290 1318 return -3;
1291 1319 }
1292 1320
1293 1321 static PyObject *ntobj_shortest(nodetreeObject *self, PyObject *args)
1294 1322 {
1295 1323 PyObject *val;
1296 1324 char *node;
1297 1325 int length;
1298 1326
1299 1327 if (!PyArg_ParseTuple(args, "O", &val))
1300 1328 return NULL;
1301 1329 if (node_check(val, &node) == -1)
1302 1330 return NULL;
1303 1331
1304 1332 length = nt_shortest(&self->nt, node);
1305 1333 if (length == -3)
1306 1334 return NULL;
1307 1335 if (length == -2) {
1308 1336 raise_revlog_error();
1309 1337 return NULL;
1310 1338 }
1311 1339 return PyInt_FromLong(length);
1312 1340 }
1313 1341
1314 1342 static void nt_dealloc(nodetree *self)
1315 1343 {
1316 1344 free(self->nodes);
1317 1345 self->nodes = NULL;
1318 1346 }
1319 1347
1320 1348 static void ntobj_dealloc(nodetreeObject *self)
1321 1349 {
1322 1350 Py_XDECREF(self->nt.index);
1323 1351 nt_dealloc(&self->nt);
1324 1352 PyObject_Del(self);
1325 1353 }
1326 1354
1327 1355 static PyMethodDef ntobj_methods[] = {
1328 1356 {"insert", (PyCFunction)ntobj_insert, METH_VARARGS,
1329 1357 "insert an index entry"},
1330 1358 {"shortest", (PyCFunction)ntobj_shortest, METH_VARARGS,
1331 1359 "find length of shortest hex nodeid of a binary ID"},
1332 1360 {NULL} /* Sentinel */
1333 1361 };
1334 1362
1335 1363 static PyTypeObject nodetreeType = {
1336 1364 PyVarObject_HEAD_INIT(NULL, 0) /* header */
1337 1365 "parsers.nodetree", /* tp_name */
1338 1366 sizeof(nodetreeObject), /* tp_basicsize */
1339 1367 0, /* tp_itemsize */
1340 1368 (destructor)ntobj_dealloc, /* tp_dealloc */
1341 1369 0, /* tp_print */
1342 1370 0, /* tp_getattr */
1343 1371 0, /* tp_setattr */
1344 1372 0, /* tp_compare */
1345 1373 0, /* tp_repr */
1346 1374 0, /* tp_as_number */
1347 1375 0, /* tp_as_sequence */
1348 1376 0, /* tp_as_mapping */
1349 1377 0, /* tp_hash */
1350 1378 0, /* tp_call */
1351 1379 0, /* tp_str */
1352 1380 0, /* tp_getattro */
1353 1381 0, /* tp_setattro */
1354 1382 0, /* tp_as_buffer */
1355 1383 Py_TPFLAGS_DEFAULT, /* tp_flags */
1356 1384 "nodetree", /* tp_doc */
1357 1385 0, /* tp_traverse */
1358 1386 0, /* tp_clear */
1359 1387 0, /* tp_richcompare */
1360 1388 0, /* tp_weaklistoffset */
1361 1389 0, /* tp_iter */
1362 1390 0, /* tp_iternext */
1363 1391 ntobj_methods, /* tp_methods */
1364 1392 0, /* tp_members */
1365 1393 0, /* tp_getset */
1366 1394 0, /* tp_base */
1367 1395 0, /* tp_dict */
1368 1396 0, /* tp_descr_get */
1369 1397 0, /* tp_descr_set */
1370 1398 0, /* tp_dictoffset */
1371 1399 (initproc)ntobj_init, /* tp_init */
1372 1400 0, /* tp_alloc */
1373 1401 };
1374 1402
1375 1403 static int index_init_nt(indexObject *self)
1376 1404 {
1377 1405 if (!self->ntinitialized) {
1378 1406 if (nt_init(&self->nt, self, (int)self->raw_length) == -1) {
1379 1407 nt_dealloc(&self->nt);
1380 1408 return -1;
1381 1409 }
1382 1410 if (nt_insert(&self->nt, nullid, -1) == -1) {
1383 1411 nt_dealloc(&self->nt);
1384 1412 return -1;
1385 1413 }
1386 1414 self->ntinitialized = 1;
1387 1415 self->ntrev = (int)index_length(self);
1388 1416 self->ntlookups = 1;
1389 1417 self->ntmisses = 0;
1390 1418 }
1391 1419 return 0;
1392 1420 }
1393 1421
1394 1422 /*
1395 1423 * Return values:
1396 1424 *
1397 1425 * -3: error (exception set)
1398 1426 * -2: not found (no exception set)
1399 1427 * rest: valid rev
1400 1428 */
1401 1429 static int index_find_node(indexObject *self, const char *node,
1402 1430 Py_ssize_t nodelen)
1403 1431 {
1404 1432 int rev;
1405 1433
1406 1434 if (index_init_nt(self) == -1)
1407 1435 return -3;
1408 1436
1409 1437 self->ntlookups++;
1410 1438 rev = nt_find(&self->nt, node, nodelen, 0);
1411 1439 if (rev >= -1)
1412 1440 return rev;
1413 1441
1414 1442 /*
1415 1443 * For the first handful of lookups, we scan the entire index,
1416 1444 * and cache only the matching nodes. This optimizes for cases
1417 1445 * like "hg tip", where only a few nodes are accessed.
1418 1446 *
1419 1447 * After that, we cache every node we visit, using a single
1420 1448 * scan amortized over multiple lookups. This gives the best
1421 1449 * bulk performance, e.g. for "hg log".
1422 1450 */
1423 1451 if (self->ntmisses++ < 4) {
1424 1452 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1425 1453 const char *n = index_node_existing(self, rev);
1426 1454 if (n == NULL)
1427 1455 return -3;
1428 1456 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1429 1457 if (nt_insert(&self->nt, n, rev) == -1)
1430 1458 return -3;
1431 1459 break;
1432 1460 }
1433 1461 }
1434 1462 } else {
1435 1463 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1436 1464 const char *n = index_node_existing(self, rev);
1437 1465 if (n == NULL)
1438 1466 return -3;
1439 1467 if (nt_insert(&self->nt, n, rev) == -1) {
1440 1468 self->ntrev = rev + 1;
1441 1469 return -3;
1442 1470 }
1443 1471 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1444 1472 break;
1445 1473 }
1446 1474 }
1447 1475 self->ntrev = rev;
1448 1476 }
1449 1477
1450 1478 if (rev >= 0)
1451 1479 return rev;
1452 1480 return -2;
1453 1481 }
1454 1482
1455 1483 static PyObject *index_getitem(indexObject *self, PyObject *value)
1456 1484 {
1457 1485 char *node;
1458 1486 int rev;
1459 1487
1460 1488 if (PyInt_Check(value)) {
1461 1489 long idx;
1462 1490 if (!pylong_to_long(value, &idx)) {
1463 1491 return NULL;
1464 1492 }
1465 1493 return index_get(self, idx);
1466 1494 }
1467 1495
1468 1496 if (node_check(value, &node) == -1)
1469 1497 return NULL;
1470 1498 rev = index_find_node(self, node, 20);
1471 1499 if (rev >= -1)
1472 1500 return PyInt_FromLong(rev);
1473 1501 if (rev == -2)
1474 1502 raise_revlog_error();
1475 1503 return NULL;
1476 1504 }
1477 1505
1478 1506 /*
1479 1507 * Fully populate the radix tree.
1480 1508 */
1481 1509 static int index_populate_nt(indexObject *self)
1482 1510 {
1483 1511 int rev;
1484 1512 if (self->ntrev > 0) {
1485 1513 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1486 1514 const char *n = index_node_existing(self, rev);
1487 1515 if (n == NULL)
1488 1516 return -1;
1489 1517 if (nt_insert(&self->nt, n, rev) == -1)
1490 1518 return -1;
1491 1519 }
1492 1520 self->ntrev = -1;
1493 1521 }
1494 1522 return 0;
1495 1523 }
1496 1524
1497 1525 static PyObject *index_partialmatch(indexObject *self, PyObject *args)
1498 1526 {
1499 1527 const char *fullnode;
1500 1528 int nodelen;
1501 1529 char *node;
1502 1530 int rev, i;
1503 1531
1504 1532 if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &node, &nodelen))
1505 1533 return NULL;
1506 1534
1507 1535 if (nodelen < 1) {
1508 1536 PyErr_SetString(PyExc_ValueError, "key too short");
1509 1537 return NULL;
1510 1538 }
1511 1539
1512 1540 if (nodelen > 40) {
1513 1541 PyErr_SetString(PyExc_ValueError, "key too long");
1514 1542 return NULL;
1515 1543 }
1516 1544
1517 1545 for (i = 0; i < nodelen; i++)
1518 1546 hexdigit(node, i);
1519 1547 if (PyErr_Occurred()) {
1520 1548 /* input contains non-hex characters */
1521 1549 PyErr_Clear();
1522 1550 Py_RETURN_NONE;
1523 1551 }
1524 1552
1525 1553 if (index_init_nt(self) == -1)
1526 1554 return NULL;
1527 1555 if (index_populate_nt(self) == -1)
1528 1556 return NULL;
1529 1557 rev = nt_partialmatch(&self->nt, node, nodelen);
1530 1558
1531 1559 switch (rev) {
1532 1560 case -4:
1533 1561 raise_revlog_error();
1534 1562 return NULL;
1535 1563 case -2:
1536 1564 Py_RETURN_NONE;
1537 1565 case -1:
1538 1566 return PyBytes_FromStringAndSize(nullid, 20);
1539 1567 }
1540 1568
1541 1569 fullnode = index_node_existing(self, rev);
1542 1570 if (fullnode == NULL) {
1543 1571 return NULL;
1544 1572 }
1545 1573 return PyBytes_FromStringAndSize(fullnode, 20);
1546 1574 }
1547 1575
1548 1576 static PyObject *index_shortest(indexObject *self, PyObject *args)
1549 1577 {
1550 1578 PyObject *val;
1551 1579 char *node;
1552 1580 int length;
1553 1581
1554 1582 if (!PyArg_ParseTuple(args, "O", &val))
1555 1583 return NULL;
1556 1584 if (node_check(val, &node) == -1)
1557 1585 return NULL;
1558 1586
1559 1587 self->ntlookups++;
1560 1588 if (index_init_nt(self) == -1)
1561 1589 return NULL;
1562 1590 if (index_populate_nt(self) == -1)
1563 1591 return NULL;
1564 1592 length = nt_shortest(&self->nt, node);
1565 1593 if (length == -3)
1566 1594 return NULL;
1567 1595 if (length == -2) {
1568 1596 raise_revlog_error();
1569 1597 return NULL;
1570 1598 }
1571 1599 return PyInt_FromLong(length);
1572 1600 }
1573 1601
1574 1602 static PyObject *index_m_get(indexObject *self, PyObject *args)
1575 1603 {
1576 1604 PyObject *val;
1577 1605 char *node;
1578 1606 int rev;
1579 1607
1580 1608 if (!PyArg_ParseTuple(args, "O", &val))
1581 1609 return NULL;
1582 1610 if (node_check(val, &node) == -1)
1583 1611 return NULL;
1584 1612 rev = index_find_node(self, node, 20);
1585 1613 if (rev == -3)
1586 1614 return NULL;
1587 1615 if (rev == -2)
1588 1616 Py_RETURN_NONE;
1589 1617 return PyInt_FromLong(rev);
1590 1618 }
1591 1619
1592 1620 static int index_contains(indexObject *self, PyObject *value)
1593 1621 {
1594 1622 char *node;
1595 1623
1596 1624 if (PyInt_Check(value)) {
1597 1625 long rev;
1598 1626 if (!pylong_to_long(value, &rev)) {
1599 1627 return -1;
1600 1628 }
1601 1629 return rev >= -1 && rev < index_length(self);
1602 1630 }
1603 1631
1604 1632 if (node_check(value, &node) == -1)
1605 1633 return -1;
1606 1634
1607 1635 switch (index_find_node(self, node, 20)) {
1608 1636 case -3:
1609 1637 return -1;
1610 1638 case -2:
1611 1639 return 0;
1612 1640 default:
1613 1641 return 1;
1614 1642 }
1615 1643 }
1616 1644
1617 1645 typedef uint64_t bitmask;
1618 1646
1619 1647 /*
1620 1648 * Given a disjoint set of revs, return all candidates for the
1621 1649 * greatest common ancestor. In revset notation, this is the set
1622 1650 * "heads(::a and ::b and ...)"
1623 1651 */
1624 1652 static PyObject *find_gca_candidates(indexObject *self, const int *revs,
1625 1653 int revcount)
1626 1654 {
1627 1655 const bitmask allseen = (1ull << revcount) - 1;
1628 1656 const bitmask poison = 1ull << revcount;
1629 1657 PyObject *gca = PyList_New(0);
1630 1658 int i, v, interesting;
1631 1659 int maxrev = -1;
1632 1660 bitmask sp;
1633 1661 bitmask *seen;
1634 1662
1635 1663 if (gca == NULL)
1636 1664 return PyErr_NoMemory();
1637 1665
1638 1666 for (i = 0; i < revcount; i++) {
1639 1667 if (revs[i] > maxrev)
1640 1668 maxrev = revs[i];
1641 1669 }
1642 1670
1643 1671 seen = calloc(sizeof(*seen), maxrev + 1);
1644 1672 if (seen == NULL) {
1645 1673 Py_DECREF(gca);
1646 1674 return PyErr_NoMemory();
1647 1675 }
1648 1676
1649 1677 for (i = 0; i < revcount; i++)
1650 1678 seen[revs[i]] = 1ull << i;
1651 1679
1652 1680 interesting = revcount;
1653 1681
1654 1682 for (v = maxrev; v >= 0 && interesting; v--) {
1655 1683 bitmask sv = seen[v];
1656 1684 int parents[2];
1657 1685
1658 1686 if (!sv)
1659 1687 continue;
1660 1688
1661 1689 if (sv < poison) {
1662 1690 interesting -= 1;
1663 1691 if (sv == allseen) {
1664 1692 PyObject *obj = PyInt_FromLong(v);
1665 1693 if (obj == NULL)
1666 1694 goto bail;
1667 1695 if (PyList_Append(gca, obj) == -1) {
1668 1696 Py_DECREF(obj);
1669 1697 goto bail;
1670 1698 }
1671 1699 sv |= poison;
1672 1700 for (i = 0; i < revcount; i++) {
1673 1701 if (revs[i] == v)
1674 1702 goto done;
1675 1703 }
1676 1704 }
1677 1705 }
1678 1706 if (index_get_parents(self, v, parents, maxrev) < 0)
1679 1707 goto bail;
1680 1708
1681 1709 for (i = 0; i < 2; i++) {
1682 1710 int p = parents[i];
1683 1711 if (p == -1)
1684 1712 continue;
1685 1713 sp = seen[p];
1686 1714 if (sv < poison) {
1687 1715 if (sp == 0) {
1688 1716 seen[p] = sv;
1689 1717 interesting++;
1690 1718 } else if (sp != sv)
1691 1719 seen[p] |= sv;
1692 1720 } else {
1693 1721 if (sp && sp < poison)
1694 1722 interesting--;
1695 1723 seen[p] = sv;
1696 1724 }
1697 1725 }
1698 1726 }
1699 1727
1700 1728 done:
1701 1729 free(seen);
1702 1730 return gca;
1703 1731 bail:
1704 1732 free(seen);
1705 1733 Py_XDECREF(gca);
1706 1734 return NULL;
1707 1735 }
1708 1736
1709 1737 /*
1710 1738 * Given a disjoint set of revs, return the subset with the longest
1711 1739 * path to the root.
1712 1740 */
1713 1741 static PyObject *find_deepest(indexObject *self, PyObject *revs)
1714 1742 {
1715 1743 const Py_ssize_t revcount = PyList_GET_SIZE(revs);
1716 1744 static const Py_ssize_t capacity = 24;
1717 1745 int *depth, *interesting = NULL;
1718 1746 int i, j, v, ninteresting;
1719 1747 PyObject *dict = NULL, *keys = NULL;
1720 1748 long *seen = NULL;
1721 1749 int maxrev = -1;
1722 1750 long final;
1723 1751
1724 1752 if (revcount > capacity) {
1725 1753 PyErr_Format(PyExc_OverflowError,
1726 1754 "bitset size (%ld) > capacity (%ld)",
1727 1755 (long)revcount, (long)capacity);
1728 1756 return NULL;
1729 1757 }
1730 1758
1731 1759 for (i = 0; i < revcount; i++) {
1732 1760 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
1733 1761 if (n > maxrev)
1734 1762 maxrev = n;
1735 1763 }
1736 1764
1737 1765 depth = calloc(sizeof(*depth), maxrev + 1);
1738 1766 if (depth == NULL)
1739 1767 return PyErr_NoMemory();
1740 1768
1741 1769 seen = calloc(sizeof(*seen), maxrev + 1);
1742 1770 if (seen == NULL) {
1743 1771 PyErr_NoMemory();
1744 1772 goto bail;
1745 1773 }
1746 1774
1747 1775 interesting = calloc(sizeof(*interesting), ((size_t)1) << revcount);
1748 1776 if (interesting == NULL) {
1749 1777 PyErr_NoMemory();
1750 1778 goto bail;
1751 1779 }
1752 1780
1753 1781 if (PyList_Sort(revs) == -1)
1754 1782 goto bail;
1755 1783
1756 1784 for (i = 0; i < revcount; i++) {
1757 1785 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
1758 1786 long b = 1l << i;
1759 1787 depth[n] = 1;
1760 1788 seen[n] = b;
1761 1789 interesting[b] = 1;
1762 1790 }
1763 1791
1764 1792 /* invariant: ninteresting is the number of non-zero entries in
1765 1793 * interesting. */
1766 1794 ninteresting = (int)revcount;
1767 1795
1768 1796 for (v = maxrev; v >= 0 && ninteresting > 1; v--) {
1769 1797 int dv = depth[v];
1770 1798 int parents[2];
1771 1799 long sv;
1772 1800
1773 1801 if (dv == 0)
1774 1802 continue;
1775 1803
1776 1804 sv = seen[v];
1777 1805 if (index_get_parents(self, v, parents, maxrev) < 0)
1778 1806 goto bail;
1779 1807
1780 1808 for (i = 0; i < 2; i++) {
1781 1809 int p = parents[i];
1782 1810 long sp;
1783 1811 int dp;
1784 1812
1785 1813 if (p == -1)
1786 1814 continue;
1787 1815
1788 1816 dp = depth[p];
1789 1817 sp = seen[p];
1790 1818 if (dp <= dv) {
1791 1819 depth[p] = dv + 1;
1792 1820 if (sp != sv) {
1793 1821 interesting[sv] += 1;
1794 1822 seen[p] = sv;
1795 1823 if (sp) {
1796 1824 interesting[sp] -= 1;
1797 1825 if (interesting[sp] == 0)
1798 1826 ninteresting -= 1;
1799 1827 }
1800 1828 }
1801 1829 } else if (dv == dp - 1) {
1802 1830 long nsp = sp | sv;
1803 1831 if (nsp == sp)
1804 1832 continue;
1805 1833 seen[p] = nsp;
1806 1834 interesting[sp] -= 1;
1807 1835 if (interesting[sp] == 0)
1808 1836 ninteresting -= 1;
1809 1837 if (interesting[nsp] == 0)
1810 1838 ninteresting += 1;
1811 1839 interesting[nsp] += 1;
1812 1840 }
1813 1841 }
1814 1842 interesting[sv] -= 1;
1815 1843 if (interesting[sv] == 0)
1816 1844 ninteresting -= 1;
1817 1845 }
1818 1846
1819 1847 final = 0;
1820 1848 j = ninteresting;
1821 1849 for (i = 0; i < (int)(2 << revcount) && j > 0; i++) {
1822 1850 if (interesting[i] == 0)
1823 1851 continue;
1824 1852 final |= i;
1825 1853 j -= 1;
1826 1854 }
1827 1855 if (final == 0) {
1828 1856 keys = PyList_New(0);
1829 1857 goto bail;
1830 1858 }
1831 1859
1832 1860 dict = PyDict_New();
1833 1861 if (dict == NULL)
1834 1862 goto bail;
1835 1863
1836 1864 for (i = 0; i < revcount; i++) {
1837 1865 PyObject *key;
1838 1866
1839 1867 if ((final & (1 << i)) == 0)
1840 1868 continue;
1841 1869
1842 1870 key = PyList_GET_ITEM(revs, i);
1843 1871 Py_INCREF(key);
1844 1872 Py_INCREF(Py_None);
1845 1873 if (PyDict_SetItem(dict, key, Py_None) == -1) {
1846 1874 Py_DECREF(key);
1847 1875 Py_DECREF(Py_None);
1848 1876 goto bail;
1849 1877 }
1850 1878 }
1851 1879
1852 1880 keys = PyDict_Keys(dict);
1853 1881
1854 1882 bail:
1855 1883 free(depth);
1856 1884 free(seen);
1857 1885 free(interesting);
1858 1886 Py_XDECREF(dict);
1859 1887
1860 1888 return keys;
1861 1889 }
1862 1890
1863 1891 /*
1864 1892 * Given a (possibly overlapping) set of revs, return all the
1865 1893 * common ancestors heads: heads(::args[0] and ::a[1] and ...)
1866 1894 */
1867 1895 static PyObject *index_commonancestorsheads(indexObject *self, PyObject *args)
1868 1896 {
1869 1897 PyObject *ret = NULL;
1870 1898 Py_ssize_t argcount, i, len;
1871 1899 bitmask repeat = 0;
1872 1900 int revcount = 0;
1873 1901 int *revs;
1874 1902
1875 1903 argcount = PySequence_Length(args);
1876 1904 revs = PyMem_Malloc(argcount * sizeof(*revs));
1877 1905 if (argcount > 0 && revs == NULL)
1878 1906 return PyErr_NoMemory();
1879 1907 len = index_length(self);
1880 1908
1881 1909 for (i = 0; i < argcount; i++) {
1882 1910 static const int capacity = 24;
1883 1911 PyObject *obj = PySequence_GetItem(args, i);
1884 1912 bitmask x;
1885 1913 long val;
1886 1914
1887 1915 if (!PyInt_Check(obj)) {
1888 1916 PyErr_SetString(PyExc_TypeError,
1889 1917 "arguments must all be ints");
1890 1918 Py_DECREF(obj);
1891 1919 goto bail;
1892 1920 }
1893 1921 val = PyInt_AsLong(obj);
1894 1922 Py_DECREF(obj);
1895 1923 if (val == -1) {
1896 1924 ret = PyList_New(0);
1897 1925 goto done;
1898 1926 }
1899 1927 if (val < 0 || val >= len) {
1900 1928 PyErr_SetString(PyExc_IndexError, "index out of range");
1901 1929 goto bail;
1902 1930 }
1903 1931 /* this cheesy bloom filter lets us avoid some more
1904 1932 * expensive duplicate checks in the common set-is-disjoint
1905 1933 * case */
1906 1934 x = 1ull << (val & 0x3f);
1907 1935 if (repeat & x) {
1908 1936 int k;
1909 1937 for (k = 0; k < revcount; k++) {
1910 1938 if (val == revs[k])
1911 1939 goto duplicate;
1912 1940 }
1913 1941 } else
1914 1942 repeat |= x;
1915 1943 if (revcount >= capacity) {
1916 1944 PyErr_Format(PyExc_OverflowError,
1917 1945 "bitset size (%d) > capacity (%d)",
1918 1946 revcount, capacity);
1919 1947 goto bail;
1920 1948 }
1921 1949 revs[revcount++] = (int)val;
1922 1950 duplicate:;
1923 1951 }
1924 1952
1925 1953 if (revcount == 0) {
1926 1954 ret = PyList_New(0);
1927 1955 goto done;
1928 1956 }
1929 1957 if (revcount == 1) {
1930 1958 PyObject *obj;
1931 1959 ret = PyList_New(1);
1932 1960 if (ret == NULL)
1933 1961 goto bail;
1934 1962 obj = PyInt_FromLong(revs[0]);
1935 1963 if (obj == NULL)
1936 1964 goto bail;
1937 1965 PyList_SET_ITEM(ret, 0, obj);
1938 1966 goto done;
1939 1967 }
1940 1968
1941 1969 ret = find_gca_candidates(self, revs, revcount);
1942 1970 if (ret == NULL)
1943 1971 goto bail;
1944 1972
1945 1973 done:
1946 1974 PyMem_Free(revs);
1947 1975 return ret;
1948 1976
1949 1977 bail:
1950 1978 PyMem_Free(revs);
1951 1979 Py_XDECREF(ret);
1952 1980 return NULL;
1953 1981 }
1954 1982
1955 1983 /*
1956 1984 * Given a (possibly overlapping) set of revs, return the greatest
1957 1985 * common ancestors: those with the longest path to the root.
1958 1986 */
1959 1987 static PyObject *index_ancestors(indexObject *self, PyObject *args)
1960 1988 {
1961 1989 PyObject *ret;
1962 1990 PyObject *gca = index_commonancestorsheads(self, args);
1963 1991 if (gca == NULL)
1964 1992 return NULL;
1965 1993
1966 1994 if (PyList_GET_SIZE(gca) <= 1) {
1967 1995 return gca;
1968 1996 }
1969 1997
1970 1998 ret = find_deepest(self, gca);
1971 1999 Py_DECREF(gca);
1972 2000 return ret;
1973 2001 }
1974 2002
1975 2003 /*
1976 2004 * Invalidate any trie entries introduced by added revs.
1977 2005 */
1978 2006 static void index_invalidate_added(indexObject *self, Py_ssize_t start)
1979 2007 {
1980 2008 Py_ssize_t i, len = PyList_GET_SIZE(self->added);
1981 2009
1982 2010 for (i = start; i < len; i++) {
1983 2011 PyObject *tuple = PyList_GET_ITEM(self->added, i);
1984 2012 PyObject *node = PyTuple_GET_ITEM(tuple, 7);
1985 2013
1986 2014 nt_delete_node(&self->nt, PyBytes_AS_STRING(node));
1987 2015 }
1988 2016
1989 2017 if (start == 0)
1990 2018 Py_CLEAR(self->added);
1991 2019 }
1992 2020
1993 2021 /*
1994 2022 * Delete a numeric range of revs, which must be at the end of the
1995 2023 * range, but exclude the sentinel nullid entry.
1996 2024 */
1997 2025 static int index_slice_del(indexObject *self, PyObject *item)
1998 2026 {
1999 2027 Py_ssize_t start, stop, step, slicelength;
2000 2028 Py_ssize_t length = index_length(self) + 1;
2001 2029 int ret = 0;
2002 2030
2003 2031 /* Argument changed from PySliceObject* to PyObject* in Python 3. */
2004 2032 #ifdef IS_PY3K
2005 2033 if (PySlice_GetIndicesEx(item, length, &start, &stop, &step,
2006 2034 &slicelength) < 0)
2007 2035 #else
2008 2036 if (PySlice_GetIndicesEx((PySliceObject *)item, length, &start, &stop,
2009 2037 &step, &slicelength) < 0)
2010 2038 #endif
2011 2039 return -1;
2012 2040
2013 2041 if (slicelength <= 0)
2014 2042 return 0;
2015 2043
2016 2044 if ((step < 0 && start < stop) || (step > 0 && start > stop))
2017 2045 stop = start;
2018 2046
2019 2047 if (step < 0) {
2020 2048 stop = start + 1;
2021 2049 start = stop + step * (slicelength - 1) - 1;
2022 2050 step = -step;
2023 2051 }
2024 2052
2025 2053 if (step != 1) {
2026 2054 PyErr_SetString(PyExc_ValueError,
2027 2055 "revlog index delete requires step size of 1");
2028 2056 return -1;
2029 2057 }
2030 2058
2031 2059 if (stop != length - 1) {
2032 2060 PyErr_SetString(PyExc_IndexError,
2033 2061 "revlog index deletion indices are invalid");
2034 2062 return -1;
2035 2063 }
2036 2064
2037 2065 if (start < self->length) {
2038 2066 if (self->ntinitialized) {
2039 2067 Py_ssize_t i;
2040 2068
2041 2069 for (i = start + 1; i < self->length; i++) {
2042 2070 const char *node = index_node_existing(self, i);
2043 2071 if (node == NULL)
2044 2072 return -1;
2045 2073
2046 2074 nt_delete_node(&self->nt, node);
2047 2075 }
2048 2076 if (self->added)
2049 2077 index_invalidate_added(self, 0);
2050 2078 if (self->ntrev > start)
2051 2079 self->ntrev = (int)start;
2052 2080 }
2053 2081 self->length = start;
2054 2082 if (start < self->raw_length) {
2055 2083 if (self->cache) {
2056 2084 Py_ssize_t i;
2057 2085 for (i = start; i < self->raw_length; i++)
2058 2086 Py_CLEAR(self->cache[i]);
2059 2087 }
2060 2088 self->raw_length = start;
2061 2089 }
2062 2090 goto done;
2063 2091 }
2064 2092
2065 2093 if (self->ntinitialized) {
2066 2094 index_invalidate_added(self, start - self->length);
2067 2095 if (self->ntrev > start)
2068 2096 self->ntrev = (int)start;
2069 2097 }
2070 2098 if (self->added)
2071 2099 ret = PyList_SetSlice(self->added, start - self->length,
2072 2100 PyList_GET_SIZE(self->added), NULL);
2073 2101 done:
2074 2102 Py_CLEAR(self->headrevs);
2075 2103 return ret;
2076 2104 }
2077 2105
2078 2106 /*
2079 2107 * Supported ops:
2080 2108 *
2081 2109 * slice deletion
2082 2110 * string assignment (extend node->rev mapping)
2083 2111 * string deletion (shrink node->rev mapping)
2084 2112 */
2085 2113 static int index_assign_subscript(indexObject *self, PyObject *item,
2086 2114 PyObject *value)
2087 2115 {
2088 2116 char *node;
2089 2117 long rev;
2090 2118
2091 2119 if (PySlice_Check(item) && value == NULL)
2092 2120 return index_slice_del(self, item);
2093 2121
2094 2122 if (node_check(item, &node) == -1)
2095 2123 return -1;
2096 2124
2097 2125 if (value == NULL)
2098 2126 return self->ntinitialized ? nt_delete_node(&self->nt, node)
2099 2127 : 0;
2100 2128 rev = PyInt_AsLong(value);
2101 2129 if (rev > INT_MAX || rev < 0) {
2102 2130 if (!PyErr_Occurred())
2103 2131 PyErr_SetString(PyExc_ValueError, "rev out of range");
2104 2132 return -1;
2105 2133 }
2106 2134
2107 2135 if (index_init_nt(self) == -1)
2108 2136 return -1;
2109 2137 return nt_insert(&self->nt, node, (int)rev);
2110 2138 }
2111 2139
2112 2140 /*
2113 2141 * Find all RevlogNG entries in an index that has inline data. Update
2114 2142 * the optional "offsets" table with those entries.
2115 2143 */
2116 2144 static Py_ssize_t inline_scan(indexObject *self, const char **offsets)
2117 2145 {
2118 2146 const char *data = (const char *)self->buf.buf;
2119 2147 Py_ssize_t pos = 0;
2120 2148 Py_ssize_t end = self->buf.len;
2121 2149 long incr = v1_hdrsize;
2122 2150 Py_ssize_t len = 0;
2123 2151
2124 2152 while (pos + v1_hdrsize <= end && pos >= 0) {
2125 2153 uint32_t comp_len;
2126 2154 /* 3rd element of header is length of compressed inline data */
2127 2155 comp_len = getbe32(data + pos + 8);
2128 2156 incr = v1_hdrsize + comp_len;
2129 2157 if (offsets)
2130 2158 offsets[len] = data + pos;
2131 2159 len++;
2132 2160 pos += incr;
2133 2161 }
2134 2162
2135 2163 if (pos != end) {
2136 2164 if (!PyErr_Occurred())
2137 2165 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2138 2166 return -1;
2139 2167 }
2140 2168
2141 2169 return len;
2142 2170 }
2143 2171
2144 2172 static int index_init(indexObject *self, PyObject *args)
2145 2173 {
2146 2174 PyObject *data_obj, *inlined_obj;
2147 2175 Py_ssize_t size;
2148 2176
2149 2177 /* Initialize before argument-checking to avoid index_dealloc() crash.
2150 2178 */
2151 2179 self->raw_length = 0;
2152 2180 self->added = NULL;
2153 2181 self->cache = NULL;
2154 2182 self->data = NULL;
2155 2183 memset(&self->buf, 0, sizeof(self->buf));
2156 2184 self->headrevs = NULL;
2157 2185 self->filteredrevs = Py_None;
2158 2186 Py_INCREF(Py_None);
2159 2187 self->ntinitialized = 0;
2160 2188 self->offsets = NULL;
2161 2189
2162 2190 if (!PyArg_ParseTuple(args, "OO", &data_obj, &inlined_obj))
2163 2191 return -1;
2164 2192 if (!PyObject_CheckBuffer(data_obj)) {
2165 2193 PyErr_SetString(PyExc_TypeError,
2166 2194 "data does not support buffer interface");
2167 2195 return -1;
2168 2196 }
2169 2197
2170 2198 if (PyObject_GetBuffer(data_obj, &self->buf, PyBUF_SIMPLE) == -1)
2171 2199 return -1;
2172 2200 size = self->buf.len;
2173 2201
2174 2202 self->inlined = inlined_obj && PyObject_IsTrue(inlined_obj);
2175 2203 self->data = data_obj;
2176 2204
2177 2205 self->ntlookups = self->ntmisses = 0;
2178 2206 self->ntrev = -1;
2179 2207 Py_INCREF(self->data);
2180 2208
2181 2209 if (self->inlined) {
2182 2210 Py_ssize_t len = inline_scan(self, NULL);
2183 2211 if (len == -1)
2184 2212 goto bail;
2185 2213 self->raw_length = len;
2186 2214 self->length = len;
2187 2215 } else {
2188 2216 if (size % v1_hdrsize) {
2189 2217 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2190 2218 goto bail;
2191 2219 }
2192 2220 self->raw_length = size / v1_hdrsize;
2193 2221 self->length = self->raw_length;
2194 2222 }
2195 2223
2196 2224 return 0;
2197 2225 bail:
2198 2226 return -1;
2199 2227 }
2200 2228
2201 2229 static PyObject *index_nodemap(indexObject *self)
2202 2230 {
2203 2231 Py_INCREF(self);
2204 2232 return (PyObject *)self;
2205 2233 }
2206 2234
2207 2235 static void _index_clearcaches(indexObject *self)
2208 2236 {
2209 2237 if (self->cache) {
2210 2238 Py_ssize_t i;
2211 2239
2212 2240 for (i = 0; i < self->raw_length; i++)
2213 2241 Py_CLEAR(self->cache[i]);
2214 2242 free(self->cache);
2215 2243 self->cache = NULL;
2216 2244 }
2217 2245 if (self->offsets) {
2218 2246 PyMem_Free((void *)self->offsets);
2219 2247 self->offsets = NULL;
2220 2248 }
2221 2249 if (self->ntinitialized) {
2222 2250 nt_dealloc(&self->nt);
2223 2251 }
2224 2252 self->ntinitialized = 0;
2225 2253 Py_CLEAR(self->headrevs);
2226 2254 }
2227 2255
2228 2256 static PyObject *index_clearcaches(indexObject *self)
2229 2257 {
2230 2258 _index_clearcaches(self);
2231 2259 self->ntrev = -1;
2232 2260 self->ntlookups = self->ntmisses = 0;
2233 2261 Py_RETURN_NONE;
2234 2262 }
2235 2263
2236 2264 static void index_dealloc(indexObject *self)
2237 2265 {
2238 2266 _index_clearcaches(self);
2239 2267 Py_XDECREF(self->filteredrevs);
2240 2268 if (self->buf.buf) {
2241 2269 PyBuffer_Release(&self->buf);
2242 2270 memset(&self->buf, 0, sizeof(self->buf));
2243 2271 }
2244 2272 Py_XDECREF(self->data);
2245 2273 Py_XDECREF(self->added);
2246 2274 PyObject_Del(self);
2247 2275 }
2248 2276
2249 2277 static PySequenceMethods index_sequence_methods = {
2250 2278 (lenfunc)index_length, /* sq_length */
2251 2279 0, /* sq_concat */
2252 2280 0, /* sq_repeat */
2253 2281 (ssizeargfunc)index_get, /* sq_item */
2254 2282 0, /* sq_slice */
2255 2283 0, /* sq_ass_item */
2256 2284 0, /* sq_ass_slice */
2257 2285 (objobjproc)index_contains, /* sq_contains */
2258 2286 };
2259 2287
2260 2288 static PyMappingMethods index_mapping_methods = {
2261 2289 (lenfunc)index_length, /* mp_length */
2262 2290 (binaryfunc)index_getitem, /* mp_subscript */
2263 2291 (objobjargproc)index_assign_subscript, /* mp_ass_subscript */
2264 2292 };
2265 2293
2266 2294 static PyMethodDef index_methods[] = {
2267 2295 {"ancestors", (PyCFunction)index_ancestors, METH_VARARGS,
2268 2296 "return the gca set of the given revs"},
2269 2297 {"commonancestorsheads", (PyCFunction)index_commonancestorsheads,
2270 2298 METH_VARARGS,
2271 2299 "return the heads of the common ancestors of the given revs"},
2272 2300 {"clearcaches", (PyCFunction)index_clearcaches, METH_NOARGS,
2273 2301 "clear the index caches"},
2274 2302 {"get", (PyCFunction)index_m_get, METH_VARARGS, "get an index entry"},
2275 2303 {"computephasesmapsets", (PyCFunction)compute_phases_map_sets, METH_VARARGS,
2276 2304 "compute phases"},
2277 2305 {"reachableroots2", (PyCFunction)reachableroots2, METH_VARARGS,
2278 2306 "reachableroots"},
2279 2307 {"headrevs", (PyCFunction)index_headrevs, METH_VARARGS,
2280 2308 "get head revisions"}, /* Can do filtering since 3.2 */
2281 2309 {"headrevsfiltered", (PyCFunction)index_headrevs, METH_VARARGS,
2282 2310 "get filtered head revisions"}, /* Can always do filtering */
2283 2311 {"deltachain", (PyCFunction)index_deltachain, METH_VARARGS,
2284 2312 "determine revisions with deltas to reconstruct fulltext"},
2285 2313 {"append", (PyCFunction)index_append, METH_O, "append an index entry"},
2286 2314 {"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS,
2287 2315 "match a potentially ambiguous node ID"},
2288 2316 {"shortest", (PyCFunction)index_shortest, METH_VARARGS,
2289 2317 "find length of shortest hex nodeid of a binary ID"},
2290 2318 {"stats", (PyCFunction)index_stats, METH_NOARGS, "stats for the index"},
2291 2319 {NULL} /* Sentinel */
2292 2320 };
2293 2321
2294 2322 static PyGetSetDef index_getset[] = {
2295 2323 {"nodemap", (getter)index_nodemap, NULL, "nodemap", NULL},
2296 2324 {NULL} /* Sentinel */
2297 2325 };
2298 2326
2299 2327 static PyTypeObject indexType = {
2300 2328 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2301 2329 "parsers.index", /* tp_name */
2302 2330 sizeof(indexObject), /* tp_basicsize */
2303 2331 0, /* tp_itemsize */
2304 2332 (destructor)index_dealloc, /* tp_dealloc */
2305 2333 0, /* tp_print */
2306 2334 0, /* tp_getattr */
2307 2335 0, /* tp_setattr */
2308 2336 0, /* tp_compare */
2309 2337 0, /* tp_repr */
2310 2338 0, /* tp_as_number */
2311 2339 &index_sequence_methods, /* tp_as_sequence */
2312 2340 &index_mapping_methods, /* tp_as_mapping */
2313 2341 0, /* tp_hash */
2314 2342 0, /* tp_call */
2315 2343 0, /* tp_str */
2316 2344 0, /* tp_getattro */
2317 2345 0, /* tp_setattro */
2318 2346 0, /* tp_as_buffer */
2319 2347 Py_TPFLAGS_DEFAULT, /* tp_flags */
2320 2348 "revlog index", /* tp_doc */
2321 2349 0, /* tp_traverse */
2322 2350 0, /* tp_clear */
2323 2351 0, /* tp_richcompare */
2324 2352 0, /* tp_weaklistoffset */
2325 2353 0, /* tp_iter */
2326 2354 0, /* tp_iternext */
2327 2355 index_methods, /* tp_methods */
2328 2356 0, /* tp_members */
2329 2357 index_getset, /* tp_getset */
2330 2358 0, /* tp_base */
2331 2359 0, /* tp_dict */
2332 2360 0, /* tp_descr_get */
2333 2361 0, /* tp_descr_set */
2334 2362 0, /* tp_dictoffset */
2335 2363 (initproc)index_init, /* tp_init */
2336 2364 0, /* tp_alloc */
2337 2365 };
2338 2366
2339 2367 /*
2340 2368 * returns a tuple of the form (index, index, cache) with elements as
2341 2369 * follows:
2342 2370 *
2343 2371 * index: an index object that lazily parses RevlogNG records
2344 2372 * cache: if data is inlined, a tuple (0, index_file_content), else None
2345 2373 * index_file_content could be a string, or a buffer
2346 2374 *
2347 2375 * added complications are for backwards compatibility
2348 2376 */
2349 2377 PyObject *parse_index2(PyObject *self, PyObject *args)
2350 2378 {
2351 2379 PyObject *tuple = NULL, *cache = NULL;
2352 2380 indexObject *idx;
2353 2381 int ret;
2354 2382
2355 2383 idx = PyObject_New(indexObject, &indexType);
2356 2384 if (idx == NULL)
2357 2385 goto bail;
2358 2386
2359 2387 ret = index_init(idx, args);
2360 2388 if (ret == -1)
2361 2389 goto bail;
2362 2390
2363 2391 if (idx->inlined) {
2364 2392 cache = Py_BuildValue("iO", 0, idx->data);
2365 2393 if (cache == NULL)
2366 2394 goto bail;
2367 2395 } else {
2368 2396 cache = Py_None;
2369 2397 Py_INCREF(cache);
2370 2398 }
2371 2399
2372 2400 tuple = Py_BuildValue("NN", idx, cache);
2373 2401 if (!tuple)
2374 2402 goto bail;
2375 2403 return tuple;
2376 2404
2377 2405 bail:
2378 2406 Py_XDECREF(idx);
2379 2407 Py_XDECREF(cache);
2380 2408 Py_XDECREF(tuple);
2381 2409 return NULL;
2382 2410 }
2383 2411
2384 2412 #ifdef WITH_RUST
2385 2413
2386 2414 /* rustlazyancestors: iteration over ancestors implemented in Rust
2387 2415 *
2388 2416 * This class holds a reference to an index and to the Rust iterator.
2389 2417 */
2390 2418 typedef struct rustlazyancestorsObjectStruct rustlazyancestorsObject;
2391 2419
2392 2420 struct rustlazyancestorsObjectStruct {
2393 2421 PyObject_HEAD
2394 2422 /* Type-specific fields go here. */
2395 2423 indexObject *index; /* Ref kept to avoid GC'ing the index */
2396 2424 void *iter; /* Rust iterator */
2397 2425 };
2398 2426
2399 2427 /* FFI exposed from Rust code */
2400 2428 rustlazyancestorsObject *
2401 2429 rustlazyancestors_init(indexObject *index,
2402 2430 /* to pass index_get_parents() */
2403 2431 int (*)(indexObject *, Py_ssize_t, int *, int),
2404 2432 /* intrevs vector */
2405 2433 Py_ssize_t initrevslen, long *initrevs, long stoprev,
2406 2434 int inclusive);
2407 2435 void rustlazyancestors_drop(rustlazyancestorsObject *self);
2408 2436 int rustlazyancestors_next(rustlazyancestorsObject *self);
2409 2437 int rustlazyancestors_contains(rustlazyancestorsObject *self, long rev);
2410 2438
2411 2439 /* CPython instance methods */
2412 2440 static int rustla_init(rustlazyancestorsObject *self, PyObject *args)
2413 2441 {
2414 2442 PyObject *initrevsarg = NULL;
2415 2443 PyObject *inclusivearg = NULL;
2416 2444 long stoprev = 0;
2417 2445 long *initrevs = NULL;
2418 2446 int inclusive = 0;
2419 2447 Py_ssize_t i;
2420 2448
2421 2449 indexObject *index;
2422 2450 if (!PyArg_ParseTuple(args, "O!O!lO!", &indexType, &index, &PyList_Type,
2423 2451 &initrevsarg, &stoprev, &PyBool_Type,
2424 2452 &inclusivearg))
2425 2453 return -1;
2426 2454
2427 2455 Py_INCREF(index);
2428 2456 self->index = index;
2429 2457
2430 2458 if (inclusivearg == Py_True)
2431 2459 inclusive = 1;
2432 2460
2433 2461 Py_ssize_t linit = PyList_GET_SIZE(initrevsarg);
2434 2462
2435 2463 initrevs = (long *)calloc(linit, sizeof(long));
2436 2464
2437 2465 if (initrevs == NULL) {
2438 2466 PyErr_NoMemory();
2439 2467 goto bail;
2440 2468 }
2441 2469
2442 2470 for (i = 0; i < linit; i++) {
2443 2471 initrevs[i] = PyInt_AsLong(PyList_GET_ITEM(initrevsarg, i));
2444 2472 }
2445 2473 if (PyErr_Occurred())
2446 2474 goto bail;
2447 2475
2448 2476 self->iter = rustlazyancestors_init(index, index_get_parents, linit,
2449 2477 initrevs, stoprev, inclusive);
2450 2478 if (self->iter == NULL) {
2451 2479 /* if this is because of GraphError::ParentOutOfRange
2452 2480 * index_get_parents() has already set the proper ValueError */
2453 2481 goto bail;
2454 2482 }
2455 2483
2456 2484 free(initrevs);
2457 2485 return 0;
2458 2486
2459 2487 bail:
2460 2488 free(initrevs);
2461 2489 return -1;
2462 2490 };
2463 2491
2464 2492 static void rustla_dealloc(rustlazyancestorsObject *self)
2465 2493 {
2466 2494 Py_XDECREF(self->index);
2467 2495 if (self->iter != NULL) { /* can happen if rustla_init failed */
2468 2496 rustlazyancestors_drop(self->iter);
2469 2497 }
2470 2498 PyObject_Del(self);
2471 2499 }
2472 2500
2473 2501 static PyObject *rustla_next(rustlazyancestorsObject *self)
2474 2502 {
2475 2503 int res = rustlazyancestors_next(self->iter);
2476 2504 if (res == -1) {
2477 2505 /* Setting an explicit exception seems unnecessary
2478 2506 * as examples from Python source code (Objects/rangeobjets.c
2479 2507 * and Modules/_io/stringio.c) seem to demonstrate.
2480 2508 */
2481 2509 return NULL;
2482 2510 }
2483 2511 return PyInt_FromLong(res);
2484 2512 }
2485 2513
2486 2514 static int rustla_contains(rustlazyancestorsObject *self, PyObject *rev)
2487 2515 {
2488 2516 long lrev;
2489 2517 if (!pylong_to_long(rev, &lrev)) {
2490 2518 PyErr_Clear();
2491 2519 return 0;
2492 2520 }
2493 2521 return rustlazyancestors_contains(self->iter, lrev);
2494 2522 }
2495 2523
2496 2524 static PySequenceMethods rustla_sequence_methods = {
2497 2525 0, /* sq_length */
2498 2526 0, /* sq_concat */
2499 2527 0, /* sq_repeat */
2500 2528 0, /* sq_item */
2501 2529 0, /* sq_slice */
2502 2530 0, /* sq_ass_item */
2503 2531 0, /* sq_ass_slice */
2504 2532 (objobjproc)rustla_contains, /* sq_contains */
2505 2533 };
2506 2534
2507 2535 static PyTypeObject rustlazyancestorsType = {
2508 2536 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2509 2537 "parsers.rustlazyancestors", /* tp_name */
2510 2538 sizeof(rustlazyancestorsObject), /* tp_basicsize */
2511 2539 0, /* tp_itemsize */
2512 2540 (destructor)rustla_dealloc, /* tp_dealloc */
2513 2541 0, /* tp_print */
2514 2542 0, /* tp_getattr */
2515 2543 0, /* tp_setattr */
2516 2544 0, /* tp_compare */
2517 2545 0, /* tp_repr */
2518 2546 0, /* tp_as_number */
2519 2547 &rustla_sequence_methods, /* tp_as_sequence */
2520 2548 0, /* tp_as_mapping */
2521 2549 0, /* tp_hash */
2522 2550 0, /* tp_call */
2523 2551 0, /* tp_str */
2524 2552 0, /* tp_getattro */
2525 2553 0, /* tp_setattro */
2526 2554 0, /* tp_as_buffer */
2527 2555 Py_TPFLAGS_DEFAULT, /* tp_flags */
2528 2556 "Iterator over ancestors, implemented in Rust", /* tp_doc */
2529 2557 0, /* tp_traverse */
2530 2558 0, /* tp_clear */
2531 2559 0, /* tp_richcompare */
2532 2560 0, /* tp_weaklistoffset */
2533 2561 0, /* tp_iter */
2534 2562 (iternextfunc)rustla_next, /* tp_iternext */
2535 2563 0, /* tp_methods */
2536 2564 0, /* tp_members */
2537 2565 0, /* tp_getset */
2538 2566 0, /* tp_base */
2539 2567 0, /* tp_dict */
2540 2568 0, /* tp_descr_get */
2541 2569 0, /* tp_descr_set */
2542 2570 0, /* tp_dictoffset */
2543 2571 (initproc)rustla_init, /* tp_init */
2544 2572 0, /* tp_alloc */
2545 2573 };
2546 2574 #endif /* WITH_RUST */
2547 2575
2548 2576 void revlog_module_init(PyObject *mod)
2549 2577 {
2550 2578 indexType.tp_new = PyType_GenericNew;
2551 2579 if (PyType_Ready(&indexType) < 0)
2552 2580 return;
2553 2581 Py_INCREF(&indexType);
2554 2582 PyModule_AddObject(mod, "index", (PyObject *)&indexType);
2555 2583
2556 2584 nodetreeType.tp_new = PyType_GenericNew;
2557 2585 if (PyType_Ready(&nodetreeType) < 0)
2558 2586 return;
2559 2587 Py_INCREF(&nodetreeType);
2560 2588 PyModule_AddObject(mod, "nodetree", (PyObject *)&nodetreeType);
2561 2589
2562 2590 if (!nullentry) {
2563 2591 nullentry = Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0,
2564 2592 0, -1, -1, -1, -1, nullid, 20);
2565 2593 }
2566 2594 if (nullentry)
2567 2595 PyObject_GC_UnTrack(nullentry);
2568 2596
2569 2597 #ifdef WITH_RUST
2570 2598 rustlazyancestorsType.tp_new = PyType_GenericNew;
2571 2599 if (PyType_Ready(&rustlazyancestorsType) < 0)
2572 2600 return;
2573 2601 Py_INCREF(&rustlazyancestorsType);
2574 2602 PyModule_AddObject(mod, "rustlazyancestors",
2575 2603 (PyObject *)&rustlazyancestorsType);
2576 2604 #endif
2577 2605 }
General Comments 0
You need to be logged in to leave comments. Login now