##// END OF EJS Templates
sparse-revlog: add a `trim_endidx` function in C...
Boris Feld -
r40742:0650be87 default
parent child Browse files
Show More
@@ -1,2605 +1,2623 b''
1 1 /*
2 2 parsers.c - efficient content parsing
3 3
4 4 Copyright 2008 Matt Mackall <mpm@selenic.com> and others
5 5
6 6 This software may be used and distributed according to the terms of
7 7 the GNU General Public License, incorporated herein by reference.
8 8 */
9 9
10 10 #include <Python.h>
11 11 #include <assert.h>
12 12 #include <ctype.h>
13 13 #include <limits.h>
14 14 #include <stddef.h>
15 15 #include <string.h>
16 16
17 17 #include "bitmanipulation.h"
18 18 #include "charencode.h"
19 19 #include "util.h"
20 20
21 21 #ifdef IS_PY3K
22 22 /* The mapping of Python types is meant to be temporary to get Python
23 23 * 3 to compile. We should remove this once Python 3 support is fully
24 24 * supported and proper types are used in the extensions themselves. */
25 25 #define PyInt_Check PyLong_Check
26 26 #define PyInt_FromLong PyLong_FromLong
27 27 #define PyInt_FromSsize_t PyLong_FromSsize_t
28 28 #define PyInt_AsLong PyLong_AsLong
29 29 #endif
30 30
31 31 typedef struct indexObjectStruct indexObject;
32 32
33 33 typedef struct {
34 34 int children[16];
35 35 } nodetreenode;
36 36
37 37 /*
38 38 * A base-16 trie for fast node->rev mapping.
39 39 *
40 40 * Positive value is index of the next node in the trie
41 41 * Negative value is a leaf: -(rev + 2)
42 42 * Zero is empty
43 43 */
44 44 typedef struct {
45 45 indexObject *index;
46 46 nodetreenode *nodes;
47 47 unsigned length; /* # nodes in use */
48 48 unsigned capacity; /* # nodes allocated */
49 49 int depth; /* maximum depth of tree */
50 50 int splits; /* # splits performed */
51 51 } nodetree;
52 52
53 53 typedef struct {
54 54 PyObject_HEAD /* ; */
55 55 nodetree nt;
56 56 } nodetreeObject;
57 57
58 58 /*
59 59 * This class has two behaviors.
60 60 *
61 61 * When used in a list-like way (with integer keys), we decode an
62 62 * entry in a RevlogNG index file on demand. Our last entry is a
63 63 * sentinel, always a nullid. We have limited support for
64 64 * integer-keyed insert and delete, only at elements right before the
65 65 * sentinel.
66 66 *
67 67 * With string keys, we lazily perform a reverse mapping from node to
68 68 * rev, using a base-16 trie.
69 69 */
70 70 struct indexObjectStruct {
71 71 PyObject_HEAD
72 72 /* Type-specific fields go here. */
73 73 PyObject *data; /* raw bytes of index */
74 74 Py_buffer buf; /* buffer of data */
75 75 PyObject **cache; /* cached tuples */
76 76 const char **offsets; /* populated on demand */
77 77 Py_ssize_t raw_length; /* original number of elements */
78 78 Py_ssize_t length; /* current number of elements */
79 79 PyObject *added; /* populated on demand */
80 80 PyObject *headrevs; /* cache, invalidated on changes */
81 81 PyObject *filteredrevs; /* filtered revs set */
82 82 nodetree nt; /* base-16 trie */
83 83 int ntinitialized; /* 0 or 1 */
84 84 int ntrev; /* last rev scanned */
85 85 int ntlookups; /* # lookups */
86 86 int ntmisses; /* # lookups that miss the cache */
87 87 int inlined;
88 88 };
89 89
90 90 static Py_ssize_t index_length(const indexObject *self)
91 91 {
92 92 if (self->added == NULL)
93 93 return self->length;
94 94 return self->length + PyList_GET_SIZE(self->added);
95 95 }
96 96
97 97 static PyObject *nullentry = NULL;
98 98 static const char nullid[20] = {0};
99 99
100 100 static Py_ssize_t inline_scan(indexObject *self, const char **offsets);
101 101
102 102 #if LONG_MAX == 0x7fffffffL
103 103 static const char *const tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
104 104 #else
105 105 static const char *const tuple_format = PY23("kiiiiiis#", "kiiiiiiy#");
106 106 #endif
107 107
108 108 /* A RevlogNG v1 index entry is 64 bytes long. */
109 109 static const long v1_hdrsize = 64;
110 110
111 111 static void raise_revlog_error(void)
112 112 {
113 113 PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
114 114
115 115 mod = PyImport_ImportModule("mercurial.error");
116 116 if (mod == NULL) {
117 117 goto cleanup;
118 118 }
119 119
120 120 dict = PyModule_GetDict(mod);
121 121 if (dict == NULL) {
122 122 goto cleanup;
123 123 }
124 124 Py_INCREF(dict);
125 125
126 126 errclass = PyDict_GetItemString(dict, "RevlogError");
127 127 if (errclass == NULL) {
128 128 PyErr_SetString(PyExc_SystemError,
129 129 "could not find RevlogError");
130 130 goto cleanup;
131 131 }
132 132
133 133 /* value of exception is ignored by callers */
134 134 PyErr_SetString(errclass, "RevlogError");
135 135
136 136 cleanup:
137 137 Py_XDECREF(dict);
138 138 Py_XDECREF(mod);
139 139 }
140 140
141 141 /*
142 142 * Return a pointer to the beginning of a RevlogNG record.
143 143 */
144 144 static const char *index_deref(indexObject *self, Py_ssize_t pos)
145 145 {
146 146 if (self->inlined && pos > 0) {
147 147 if (self->offsets == NULL) {
148 148 self->offsets = PyMem_Malloc(self->raw_length *
149 149 sizeof(*self->offsets));
150 150 if (self->offsets == NULL)
151 151 return (const char *)PyErr_NoMemory();
152 152 inline_scan(self, self->offsets);
153 153 }
154 154 return self->offsets[pos];
155 155 }
156 156
157 157 return (const char *)(self->buf.buf) + pos * v1_hdrsize;
158 158 }
159 159
160 160 static inline int index_get_parents(indexObject *self, Py_ssize_t rev, int *ps,
161 161 int maxrev)
162 162 {
163 163 if (rev >= self->length) {
164 164 long tmp;
165 165 PyObject *tuple =
166 166 PyList_GET_ITEM(self->added, rev - self->length);
167 167 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 5), &tmp)) {
168 168 return -1;
169 169 }
170 170 ps[0] = (int)tmp;
171 171 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 6), &tmp)) {
172 172 return -1;
173 173 }
174 174 ps[1] = (int)tmp;
175 175 } else {
176 176 const char *data = index_deref(self, rev);
177 177 ps[0] = getbe32(data + 24);
178 178 ps[1] = getbe32(data + 28);
179 179 }
180 180 /* If index file is corrupted, ps[] may point to invalid revisions. So
181 181 * there is a risk of buffer overflow to trust them unconditionally. */
182 182 if (ps[0] > maxrev || ps[1] > maxrev) {
183 183 PyErr_SetString(PyExc_ValueError, "parent out of range");
184 184 return -1;
185 185 }
186 186 return 0;
187 187 }
188 188
189 189 static inline int64_t index_get_start(indexObject *self, Py_ssize_t rev)
190 190 {
191 191 uint64_t offset;
192 192 if (rev >= self->length) {
193 193 PyObject *tuple;
194 194 PyObject *pylong;
195 195 PY_LONG_LONG tmp;
196 196 tuple = PyList_GET_ITEM(self->added, rev - self->length);
197 197 pylong = PyTuple_GET_ITEM(tuple, 0);
198 198 tmp = PyLong_AsLongLong(pylong);
199 199 if (tmp == -1 && PyErr_Occurred()) {
200 200 return -1;
201 201 }
202 202 if (tmp < 0) {
203 203 PyErr_Format(PyExc_OverflowError,
204 204 "revlog entry size out of bound (%lld)",
205 205 (long long)tmp);
206 206 return -1;
207 207 }
208 208 offset = (uint64_t)tmp;
209 209 } else {
210 210 const char *data = index_deref(self, rev);
211 211 offset = getbe32(data + 4);
212 212 if (rev == 0) {
213 213 /* mask out version number for the first entry */
214 214 offset &= 0xFFFF;
215 215 } else {
216 216 uint32_t offset_high = getbe32(data);
217 217 offset |= ((uint64_t)offset_high) << 32;
218 218 }
219 219 }
220 220 return (int64_t)(offset >> 16);
221 221 }
222 222
223 223 static inline int index_get_length(indexObject *self, Py_ssize_t rev)
224 224 {
225 225 if (rev >= self->length) {
226 226 PyObject *tuple;
227 227 PyObject *pylong;
228 228 long ret;
229 229 tuple = PyList_GET_ITEM(self->added, rev - self->length);
230 230 pylong = PyTuple_GET_ITEM(tuple, 1);
231 231 ret = PyInt_AsLong(pylong);
232 232 if (ret == -1 && PyErr_Occurred()) {
233 233 return -1;
234 234 }
235 235 if (ret < 0 || ret > (long)INT_MAX) {
236 236 PyErr_Format(PyExc_OverflowError,
237 237 "revlog entry size out of bound (%ld)",
238 238 ret);
239 239 return -1;
240 240 }
241 241 return (int)ret;
242 242 } else {
243 243 const char *data = index_deref(self, rev);
244 244 return (int)getbe32(data + 8);
245 245 }
246 246 }
247 247
248 248 /*
249 249 * RevlogNG format (all in big endian, data may be inlined):
250 250 * 6 bytes: offset
251 251 * 2 bytes: flags
252 252 * 4 bytes: compressed length
253 253 * 4 bytes: uncompressed length
254 254 * 4 bytes: base revision
255 255 * 4 bytes: link revision
256 256 * 4 bytes: parent 1 revision
257 257 * 4 bytes: parent 2 revision
258 258 * 32 bytes: nodeid (only 20 bytes used)
259 259 */
260 260 static PyObject *index_get(indexObject *self, Py_ssize_t pos)
261 261 {
262 262 uint64_t offset_flags;
263 263 int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
264 264 const char *c_node_id;
265 265 const char *data;
266 266 Py_ssize_t length = index_length(self);
267 267 PyObject *entry;
268 268
269 269 if (pos == -1) {
270 270 Py_INCREF(nullentry);
271 271 return nullentry;
272 272 }
273 273
274 274 if (pos < 0 || pos >= length) {
275 275 PyErr_SetString(PyExc_IndexError, "revlog index out of range");
276 276 return NULL;
277 277 }
278 278
279 279 if (pos >= self->length) {
280 280 PyObject *obj;
281 281 obj = PyList_GET_ITEM(self->added, pos - self->length);
282 282 Py_INCREF(obj);
283 283 return obj;
284 284 }
285 285
286 286 if (self->cache) {
287 287 if (self->cache[pos]) {
288 288 Py_INCREF(self->cache[pos]);
289 289 return self->cache[pos];
290 290 }
291 291 } else {
292 292 self->cache = calloc(self->raw_length, sizeof(PyObject *));
293 293 if (self->cache == NULL)
294 294 return PyErr_NoMemory();
295 295 }
296 296
297 297 data = index_deref(self, pos);
298 298 if (data == NULL)
299 299 return NULL;
300 300
301 301 offset_flags = getbe32(data + 4);
302 302 if (pos == 0) /* mask out version number for the first entry */
303 303 offset_flags &= 0xFFFF;
304 304 else {
305 305 uint32_t offset_high = getbe32(data);
306 306 offset_flags |= ((uint64_t)offset_high) << 32;
307 307 }
308 308
309 309 comp_len = getbe32(data + 8);
310 310 uncomp_len = getbe32(data + 12);
311 311 base_rev = getbe32(data + 16);
312 312 link_rev = getbe32(data + 20);
313 313 parent_1 = getbe32(data + 24);
314 314 parent_2 = getbe32(data + 28);
315 315 c_node_id = data + 32;
316 316
317 317 entry = Py_BuildValue(tuple_format, offset_flags, comp_len, uncomp_len,
318 318 base_rev, link_rev, parent_1, parent_2, c_node_id,
319 319 20);
320 320
321 321 if (entry) {
322 322 PyObject_GC_UnTrack(entry);
323 323 Py_INCREF(entry);
324 324 }
325 325
326 326 self->cache[pos] = entry;
327 327
328 328 return entry;
329 329 }
330 330
331 331 /*
332 332 * Return the 20-byte SHA of the node corresponding to the given rev.
333 333 */
334 334 static const char *index_node(indexObject *self, Py_ssize_t pos)
335 335 {
336 336 Py_ssize_t length = index_length(self);
337 337 const char *data;
338 338
339 339 if (pos == -1)
340 340 return nullid;
341 341
342 342 if (pos >= length)
343 343 return NULL;
344 344
345 345 if (pos >= self->length) {
346 346 PyObject *tuple, *str;
347 347 tuple = PyList_GET_ITEM(self->added, pos - self->length);
348 348 str = PyTuple_GetItem(tuple, 7);
349 349 return str ? PyBytes_AS_STRING(str) : NULL;
350 350 }
351 351
352 352 data = index_deref(self, pos);
353 353 return data ? data + 32 : NULL;
354 354 }
355 355
356 356 /*
357 357 * Return the 20-byte SHA of the node corresponding to the given rev. The
358 358 * rev is assumed to be existing. If not, an exception is set.
359 359 */
360 360 static const char *index_node_existing(indexObject *self, Py_ssize_t pos)
361 361 {
362 362 const char *node = index_node(self, pos);
363 363 if (node == NULL) {
364 364 PyErr_Format(PyExc_IndexError, "could not access rev %d",
365 365 (int)pos);
366 366 }
367 367 return node;
368 368 }
369 369
370 370 static int nt_insert(nodetree *self, const char *node, int rev);
371 371
372 372 static int node_check(PyObject *obj, char **node)
373 373 {
374 374 Py_ssize_t nodelen;
375 375 if (PyBytes_AsStringAndSize(obj, node, &nodelen) == -1)
376 376 return -1;
377 377 if (nodelen == 20)
378 378 return 0;
379 379 PyErr_SetString(PyExc_ValueError, "20-byte hash required");
380 380 return -1;
381 381 }
382 382
383 383 static PyObject *index_append(indexObject *self, PyObject *obj)
384 384 {
385 385 char *node;
386 386 Py_ssize_t len;
387 387
388 388 if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj) != 8) {
389 389 PyErr_SetString(PyExc_TypeError, "8-tuple required");
390 390 return NULL;
391 391 }
392 392
393 393 if (node_check(PyTuple_GET_ITEM(obj, 7), &node) == -1)
394 394 return NULL;
395 395
396 396 len = index_length(self);
397 397
398 398 if (self->added == NULL) {
399 399 self->added = PyList_New(0);
400 400 if (self->added == NULL)
401 401 return NULL;
402 402 }
403 403
404 404 if (PyList_Append(self->added, obj) == -1)
405 405 return NULL;
406 406
407 407 if (self->ntinitialized)
408 408 nt_insert(&self->nt, node, (int)len);
409 409
410 410 Py_CLEAR(self->headrevs);
411 411 Py_RETURN_NONE;
412 412 }
413 413
414 414 static PyObject *index_stats(indexObject *self)
415 415 {
416 416 PyObject *obj = PyDict_New();
417 417 PyObject *s = NULL;
418 418 PyObject *t = NULL;
419 419
420 420 if (obj == NULL)
421 421 return NULL;
422 422
423 423 #define istat(__n, __d) \
424 424 do { \
425 425 s = PyBytes_FromString(__d); \
426 426 t = PyInt_FromSsize_t(self->__n); \
427 427 if (!s || !t) \
428 428 goto bail; \
429 429 if (PyDict_SetItem(obj, s, t) == -1) \
430 430 goto bail; \
431 431 Py_CLEAR(s); \
432 432 Py_CLEAR(t); \
433 433 } while (0)
434 434
435 435 if (self->added) {
436 436 Py_ssize_t len = PyList_GET_SIZE(self->added);
437 437 s = PyBytes_FromString("index entries added");
438 438 t = PyInt_FromSsize_t(len);
439 439 if (!s || !t)
440 440 goto bail;
441 441 if (PyDict_SetItem(obj, s, t) == -1)
442 442 goto bail;
443 443 Py_CLEAR(s);
444 444 Py_CLEAR(t);
445 445 }
446 446
447 447 if (self->raw_length != self->length)
448 448 istat(raw_length, "revs on disk");
449 449 istat(length, "revs in memory");
450 450 istat(ntlookups, "node trie lookups");
451 451 istat(ntmisses, "node trie misses");
452 452 istat(ntrev, "node trie last rev scanned");
453 453 if (self->ntinitialized) {
454 454 istat(nt.capacity, "node trie capacity");
455 455 istat(nt.depth, "node trie depth");
456 456 istat(nt.length, "node trie count");
457 457 istat(nt.splits, "node trie splits");
458 458 }
459 459
460 460 #undef istat
461 461
462 462 return obj;
463 463
464 464 bail:
465 465 Py_XDECREF(obj);
466 466 Py_XDECREF(s);
467 467 Py_XDECREF(t);
468 468 return NULL;
469 469 }
470 470
471 471 /*
472 472 * When we cache a list, we want to be sure the caller can't mutate
473 473 * the cached copy.
474 474 */
475 475 static PyObject *list_copy(PyObject *list)
476 476 {
477 477 Py_ssize_t len = PyList_GET_SIZE(list);
478 478 PyObject *newlist = PyList_New(len);
479 479 Py_ssize_t i;
480 480
481 481 if (newlist == NULL)
482 482 return NULL;
483 483
484 484 for (i = 0; i < len; i++) {
485 485 PyObject *obj = PyList_GET_ITEM(list, i);
486 486 Py_INCREF(obj);
487 487 PyList_SET_ITEM(newlist, i, obj);
488 488 }
489 489
490 490 return newlist;
491 491 }
492 492
493 493 static int check_filter(PyObject *filter, Py_ssize_t arg)
494 494 {
495 495 if (filter) {
496 496 PyObject *arglist, *result;
497 497 int isfiltered;
498 498
499 499 arglist = Py_BuildValue("(n)", arg);
500 500 if (!arglist) {
501 501 return -1;
502 502 }
503 503
504 504 result = PyEval_CallObject(filter, arglist);
505 505 Py_DECREF(arglist);
506 506 if (!result) {
507 507 return -1;
508 508 }
509 509
510 510 /* PyObject_IsTrue returns 1 if true, 0 if false, -1 if error,
511 511 * same as this function, so we can just return it directly.*/
512 512 isfiltered = PyObject_IsTrue(result);
513 513 Py_DECREF(result);
514 514 return isfiltered;
515 515 } else {
516 516 return 0;
517 517 }
518 518 }
519 519
520 520 static Py_ssize_t add_roots_get_min(indexObject *self, PyObject *list,
521 521 Py_ssize_t marker, char *phases)
522 522 {
523 523 PyObject *iter = NULL;
524 524 PyObject *iter_item = NULL;
525 525 Py_ssize_t min_idx = index_length(self) + 2;
526 526 long iter_item_long;
527 527
528 528 if (PyList_GET_SIZE(list) != 0) {
529 529 iter = PyObject_GetIter(list);
530 530 if (iter == NULL)
531 531 return -2;
532 532 while ((iter_item = PyIter_Next(iter))) {
533 533 if (!pylong_to_long(iter_item, &iter_item_long)) {
534 534 Py_DECREF(iter_item);
535 535 return -2;
536 536 }
537 537 Py_DECREF(iter_item);
538 538 if (iter_item_long < min_idx)
539 539 min_idx = iter_item_long;
540 540 phases[iter_item_long] = (char)marker;
541 541 }
542 542 Py_DECREF(iter);
543 543 }
544 544
545 545 return min_idx;
546 546 }
547 547
548 548 static inline void set_phase_from_parents(char *phases, int parent_1,
549 549 int parent_2, Py_ssize_t i)
550 550 {
551 551 if (parent_1 >= 0 && phases[parent_1] > phases[i])
552 552 phases[i] = phases[parent_1];
553 553 if (parent_2 >= 0 && phases[parent_2] > phases[i])
554 554 phases[i] = phases[parent_2];
555 555 }
556 556
557 557 static PyObject *reachableroots2(indexObject *self, PyObject *args)
558 558 {
559 559
560 560 /* Input */
561 561 long minroot;
562 562 PyObject *includepatharg = NULL;
563 563 int includepath = 0;
564 564 /* heads and roots are lists */
565 565 PyObject *heads = NULL;
566 566 PyObject *roots = NULL;
567 567 PyObject *reachable = NULL;
568 568
569 569 PyObject *val;
570 570 Py_ssize_t len = index_length(self);
571 571 long revnum;
572 572 Py_ssize_t k;
573 573 Py_ssize_t i;
574 574 Py_ssize_t l;
575 575 int r;
576 576 int parents[2];
577 577
578 578 /* Internal data structure:
579 579 * tovisit: array of length len+1 (all revs + nullrev), filled upto
580 580 * lentovisit
581 581 *
582 582 * revstates: array of length len+1 (all revs + nullrev) */
583 583 int *tovisit = NULL;
584 584 long lentovisit = 0;
585 585 enum { RS_SEEN = 1, RS_ROOT = 2, RS_REACHABLE = 4 };
586 586 char *revstates = NULL;
587 587
588 588 /* Get arguments */
589 589 if (!PyArg_ParseTuple(args, "lO!O!O!", &minroot, &PyList_Type, &heads,
590 590 &PyList_Type, &roots, &PyBool_Type,
591 591 &includepatharg))
592 592 goto bail;
593 593
594 594 if (includepatharg == Py_True)
595 595 includepath = 1;
596 596
597 597 /* Initialize return set */
598 598 reachable = PyList_New(0);
599 599 if (reachable == NULL)
600 600 goto bail;
601 601
602 602 /* Initialize internal datastructures */
603 603 tovisit = (int *)malloc((len + 1) * sizeof(int));
604 604 if (tovisit == NULL) {
605 605 PyErr_NoMemory();
606 606 goto bail;
607 607 }
608 608
609 609 revstates = (char *)calloc(len + 1, 1);
610 610 if (revstates == NULL) {
611 611 PyErr_NoMemory();
612 612 goto bail;
613 613 }
614 614
615 615 l = PyList_GET_SIZE(roots);
616 616 for (i = 0; i < l; i++) {
617 617 revnum = PyInt_AsLong(PyList_GET_ITEM(roots, i));
618 618 if (revnum == -1 && PyErr_Occurred())
619 619 goto bail;
620 620 /* If root is out of range, e.g. wdir(), it must be unreachable
621 621 * from heads. So we can just ignore it. */
622 622 if (revnum + 1 < 0 || revnum + 1 >= len + 1)
623 623 continue;
624 624 revstates[revnum + 1] |= RS_ROOT;
625 625 }
626 626
627 627 /* Populate tovisit with all the heads */
628 628 l = PyList_GET_SIZE(heads);
629 629 for (i = 0; i < l; i++) {
630 630 revnum = PyInt_AsLong(PyList_GET_ITEM(heads, i));
631 631 if (revnum == -1 && PyErr_Occurred())
632 632 goto bail;
633 633 if (revnum + 1 < 0 || revnum + 1 >= len + 1) {
634 634 PyErr_SetString(PyExc_IndexError, "head out of range");
635 635 goto bail;
636 636 }
637 637 if (!(revstates[revnum + 1] & RS_SEEN)) {
638 638 tovisit[lentovisit++] = (int)revnum;
639 639 revstates[revnum + 1] |= RS_SEEN;
640 640 }
641 641 }
642 642
643 643 /* Visit the tovisit list and find the reachable roots */
644 644 k = 0;
645 645 while (k < lentovisit) {
646 646 /* Add the node to reachable if it is a root*/
647 647 revnum = tovisit[k++];
648 648 if (revstates[revnum + 1] & RS_ROOT) {
649 649 revstates[revnum + 1] |= RS_REACHABLE;
650 650 val = PyInt_FromLong(revnum);
651 651 if (val == NULL)
652 652 goto bail;
653 653 r = PyList_Append(reachable, val);
654 654 Py_DECREF(val);
655 655 if (r < 0)
656 656 goto bail;
657 657 if (includepath == 0)
658 658 continue;
659 659 }
660 660
661 661 /* Add its parents to the list of nodes to visit */
662 662 if (revnum == -1)
663 663 continue;
664 664 r = index_get_parents(self, revnum, parents, (int)len - 1);
665 665 if (r < 0)
666 666 goto bail;
667 667 for (i = 0; i < 2; i++) {
668 668 if (!(revstates[parents[i] + 1] & RS_SEEN) &&
669 669 parents[i] >= minroot) {
670 670 tovisit[lentovisit++] = parents[i];
671 671 revstates[parents[i] + 1] |= RS_SEEN;
672 672 }
673 673 }
674 674 }
675 675
676 676 /* Find all the nodes in between the roots we found and the heads
677 677 * and add them to the reachable set */
678 678 if (includepath == 1) {
679 679 long minidx = minroot;
680 680 if (minidx < 0)
681 681 minidx = 0;
682 682 for (i = minidx; i < len; i++) {
683 683 if (!(revstates[i + 1] & RS_SEEN))
684 684 continue;
685 685 r = index_get_parents(self, i, parents, (int)len - 1);
686 686 /* Corrupted index file, error is set from
687 687 * index_get_parents */
688 688 if (r < 0)
689 689 goto bail;
690 690 if (((revstates[parents[0] + 1] |
691 691 revstates[parents[1] + 1]) &
692 692 RS_REACHABLE) &&
693 693 !(revstates[i + 1] & RS_REACHABLE)) {
694 694 revstates[i + 1] |= RS_REACHABLE;
695 695 val = PyInt_FromSsize_t(i);
696 696 if (val == NULL)
697 697 goto bail;
698 698 r = PyList_Append(reachable, val);
699 699 Py_DECREF(val);
700 700 if (r < 0)
701 701 goto bail;
702 702 }
703 703 }
704 704 }
705 705
706 706 free(revstates);
707 707 free(tovisit);
708 708 return reachable;
709 709 bail:
710 710 Py_XDECREF(reachable);
711 711 free(revstates);
712 712 free(tovisit);
713 713 return NULL;
714 714 }
715 715
716 716 static PyObject *compute_phases_map_sets(indexObject *self, PyObject *args)
717 717 {
718 718 PyObject *roots = Py_None;
719 719 PyObject *ret = NULL;
720 720 PyObject *phasessize = NULL;
721 721 PyObject *phaseroots = NULL;
722 722 PyObject *phaseset = NULL;
723 723 PyObject *phasessetlist = NULL;
724 724 PyObject *rev = NULL;
725 725 Py_ssize_t len = index_length(self);
726 726 Py_ssize_t numphase = 0;
727 727 Py_ssize_t minrevallphases = 0;
728 728 Py_ssize_t minrevphase = 0;
729 729 Py_ssize_t i = 0;
730 730 char *phases = NULL;
731 731 long phase;
732 732
733 733 if (!PyArg_ParseTuple(args, "O", &roots))
734 734 goto done;
735 735 if (roots == NULL || !PyList_Check(roots)) {
736 736 PyErr_SetString(PyExc_TypeError, "roots must be a list");
737 737 goto done;
738 738 }
739 739
740 740 phases = calloc(
741 741 len, 1); /* phase per rev: {0: public, 1: draft, 2: secret} */
742 742 if (phases == NULL) {
743 743 PyErr_NoMemory();
744 744 goto done;
745 745 }
746 746 /* Put the phase information of all the roots in phases */
747 747 numphase = PyList_GET_SIZE(roots) + 1;
748 748 minrevallphases = len + 1;
749 749 phasessetlist = PyList_New(numphase);
750 750 if (phasessetlist == NULL)
751 751 goto done;
752 752
753 753 PyList_SET_ITEM(phasessetlist, 0, Py_None);
754 754 Py_INCREF(Py_None);
755 755
756 756 for (i = 0; i < numphase - 1; i++) {
757 757 phaseroots = PyList_GET_ITEM(roots, i);
758 758 phaseset = PySet_New(NULL);
759 759 if (phaseset == NULL)
760 760 goto release;
761 761 PyList_SET_ITEM(phasessetlist, i + 1, phaseset);
762 762 if (!PyList_Check(phaseroots)) {
763 763 PyErr_SetString(PyExc_TypeError,
764 764 "roots item must be a list");
765 765 goto release;
766 766 }
767 767 minrevphase =
768 768 add_roots_get_min(self, phaseroots, i + 1, phases);
769 769 if (minrevphase == -2) /* Error from add_roots_get_min */
770 770 goto release;
771 771 minrevallphases = MIN(minrevallphases, minrevphase);
772 772 }
773 773 /* Propagate the phase information from the roots to the revs */
774 774 if (minrevallphases != -1) {
775 775 int parents[2];
776 776 for (i = minrevallphases; i < len; i++) {
777 777 if (index_get_parents(self, i, parents, (int)len - 1) <
778 778 0)
779 779 goto release;
780 780 set_phase_from_parents(phases, parents[0], parents[1],
781 781 i);
782 782 }
783 783 }
784 784 /* Transform phase list to a python list */
785 785 phasessize = PyInt_FromSsize_t(len);
786 786 if (phasessize == NULL)
787 787 goto release;
788 788 for (i = 0; i < len; i++) {
789 789 phase = phases[i];
790 790 /* We only store the sets of phase for non public phase, the
791 791 * public phase is computed as a difference */
792 792 if (phase != 0) {
793 793 phaseset = PyList_GET_ITEM(phasessetlist, phase);
794 794 rev = PyInt_FromSsize_t(i);
795 795 if (rev == NULL)
796 796 goto release;
797 797 PySet_Add(phaseset, rev);
798 798 Py_XDECREF(rev);
799 799 }
800 800 }
801 801 ret = PyTuple_Pack(2, phasessize, phasessetlist);
802 802
803 803 release:
804 804 Py_XDECREF(phasessize);
805 805 Py_XDECREF(phasessetlist);
806 806 done:
807 807 free(phases);
808 808 return ret;
809 809 }
810 810
811 811 static PyObject *index_headrevs(indexObject *self, PyObject *args)
812 812 {
813 813 Py_ssize_t i, j, len;
814 814 char *nothead = NULL;
815 815 PyObject *heads = NULL;
816 816 PyObject *filter = NULL;
817 817 PyObject *filteredrevs = Py_None;
818 818
819 819 if (!PyArg_ParseTuple(args, "|O", &filteredrevs)) {
820 820 return NULL;
821 821 }
822 822
823 823 if (self->headrevs && filteredrevs == self->filteredrevs)
824 824 return list_copy(self->headrevs);
825 825
826 826 Py_DECREF(self->filteredrevs);
827 827 self->filteredrevs = filteredrevs;
828 828 Py_INCREF(filteredrevs);
829 829
830 830 if (filteredrevs != Py_None) {
831 831 filter = PyObject_GetAttrString(filteredrevs, "__contains__");
832 832 if (!filter) {
833 833 PyErr_SetString(
834 834 PyExc_TypeError,
835 835 "filteredrevs has no attribute __contains__");
836 836 goto bail;
837 837 }
838 838 }
839 839
840 840 len = index_length(self);
841 841 heads = PyList_New(0);
842 842 if (heads == NULL)
843 843 goto bail;
844 844 if (len == 0) {
845 845 PyObject *nullid = PyInt_FromLong(-1);
846 846 if (nullid == NULL || PyList_Append(heads, nullid) == -1) {
847 847 Py_XDECREF(nullid);
848 848 goto bail;
849 849 }
850 850 goto done;
851 851 }
852 852
853 853 nothead = calloc(len, 1);
854 854 if (nothead == NULL) {
855 855 PyErr_NoMemory();
856 856 goto bail;
857 857 }
858 858
859 859 for (i = len - 1; i >= 0; i--) {
860 860 int isfiltered;
861 861 int parents[2];
862 862
863 863 /* If nothead[i] == 1, it means we've seen an unfiltered child
864 864 * of this node already, and therefore this node is not
865 865 * filtered. So we can skip the expensive check_filter step.
866 866 */
867 867 if (nothead[i] != 1) {
868 868 isfiltered = check_filter(filter, i);
869 869 if (isfiltered == -1) {
870 870 PyErr_SetString(PyExc_TypeError,
871 871 "unable to check filter");
872 872 goto bail;
873 873 }
874 874
875 875 if (isfiltered) {
876 876 nothead[i] = 1;
877 877 continue;
878 878 }
879 879 }
880 880
881 881 if (index_get_parents(self, i, parents, (int)len - 1) < 0)
882 882 goto bail;
883 883 for (j = 0; j < 2; j++) {
884 884 if (parents[j] >= 0)
885 885 nothead[parents[j]] = 1;
886 886 }
887 887 }
888 888
889 889 for (i = 0; i < len; i++) {
890 890 PyObject *head;
891 891
892 892 if (nothead[i])
893 893 continue;
894 894 head = PyInt_FromSsize_t(i);
895 895 if (head == NULL || PyList_Append(heads, head) == -1) {
896 896 Py_XDECREF(head);
897 897 goto bail;
898 898 }
899 899 }
900 900
901 901 done:
902 902 self->headrevs = heads;
903 903 Py_XDECREF(filter);
904 904 free(nothead);
905 905 return list_copy(self->headrevs);
906 906 bail:
907 907 Py_XDECREF(filter);
908 908 Py_XDECREF(heads);
909 909 free(nothead);
910 910 return NULL;
911 911 }
912 912
913 913 /**
914 914 * Obtain the base revision index entry.
915 915 *
916 916 * Callers must ensure that rev >= 0 or illegal memory access may occur.
917 917 */
918 918 static inline int index_baserev(indexObject *self, int rev)
919 919 {
920 920 const char *data;
921 921
922 922 if (rev >= self->length) {
923 923 PyObject *tuple =
924 924 PyList_GET_ITEM(self->added, rev - self->length);
925 925 long ret;
926 926 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 3), &ret)) {
927 927 return -2;
928 928 }
929 929 return (int)ret;
930 930 } else {
931 931 data = index_deref(self, rev);
932 932 if (data == NULL) {
933 933 return -2;
934 934 }
935 935
936 936 return getbe32(data + 16);
937 937 }
938 938 }
939 939
940 940 static PyObject *index_deltachain(indexObject *self, PyObject *args)
941 941 {
942 942 int rev, generaldelta;
943 943 PyObject *stoparg;
944 944 int stoprev, iterrev, baserev = -1;
945 945 int stopped;
946 946 PyObject *chain = NULL, *result = NULL;
947 947 const Py_ssize_t length = index_length(self);
948 948
949 949 if (!PyArg_ParseTuple(args, "iOi", &rev, &stoparg, &generaldelta)) {
950 950 return NULL;
951 951 }
952 952
953 953 if (PyInt_Check(stoparg)) {
954 954 stoprev = (int)PyInt_AsLong(stoparg);
955 955 if (stoprev == -1 && PyErr_Occurred()) {
956 956 return NULL;
957 957 }
958 958 } else if (stoparg == Py_None) {
959 959 stoprev = -2;
960 960 } else {
961 961 PyErr_SetString(PyExc_ValueError,
962 962 "stoprev must be integer or None");
963 963 return NULL;
964 964 }
965 965
966 966 if (rev < 0 || rev >= length) {
967 967 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
968 968 return NULL;
969 969 }
970 970
971 971 chain = PyList_New(0);
972 972 if (chain == NULL) {
973 973 return NULL;
974 974 }
975 975
976 976 baserev = index_baserev(self, rev);
977 977
978 978 /* This should never happen. */
979 979 if (baserev <= -2) {
980 980 /* Error should be set by index_deref() */
981 981 assert(PyErr_Occurred());
982 982 goto bail;
983 983 }
984 984
985 985 iterrev = rev;
986 986
987 987 while (iterrev != baserev && iterrev != stoprev) {
988 988 PyObject *value = PyInt_FromLong(iterrev);
989 989 if (value == NULL) {
990 990 goto bail;
991 991 }
992 992 if (PyList_Append(chain, value)) {
993 993 Py_DECREF(value);
994 994 goto bail;
995 995 }
996 996 Py_DECREF(value);
997 997
998 998 if (generaldelta) {
999 999 iterrev = baserev;
1000 1000 } else {
1001 1001 iterrev--;
1002 1002 }
1003 1003
1004 1004 if (iterrev < 0) {
1005 1005 break;
1006 1006 }
1007 1007
1008 1008 if (iterrev >= length) {
1009 1009 PyErr_SetString(PyExc_IndexError,
1010 1010 "revision outside index");
1011 1011 return NULL;
1012 1012 }
1013 1013
1014 1014 baserev = index_baserev(self, iterrev);
1015 1015
1016 1016 /* This should never happen. */
1017 1017 if (baserev <= -2) {
1018 1018 /* Error should be set by index_deref() */
1019 1019 assert(PyErr_Occurred());
1020 1020 goto bail;
1021 1021 }
1022 1022 }
1023 1023
1024 1024 if (iterrev == stoprev) {
1025 1025 stopped = 1;
1026 1026 } else {
1027 1027 PyObject *value = PyInt_FromLong(iterrev);
1028 1028 if (value == NULL) {
1029 1029 goto bail;
1030 1030 }
1031 1031 if (PyList_Append(chain, value)) {
1032 1032 Py_DECREF(value);
1033 1033 goto bail;
1034 1034 }
1035 1035 Py_DECREF(value);
1036 1036
1037 1037 stopped = 0;
1038 1038 }
1039 1039
1040 1040 if (PyList_Reverse(chain)) {
1041 1041 goto bail;
1042 1042 }
1043 1043
1044 1044 result = Py_BuildValue("OO", chain, stopped ? Py_True : Py_False);
1045 1045 Py_DECREF(chain);
1046 1046 return result;
1047 1047
1048 1048 bail:
1049 1049 Py_DECREF(chain);
1050 1050 return NULL;
1051 1051 }
1052 1052
1053 1053 static inline int64_t
1054 1054 index_segment_span(indexObject *self, Py_ssize_t start_rev, Py_ssize_t end_rev)
1055 1055 {
1056 1056 int64_t start_offset;
1057 1057 int64_t end_offset;
1058 1058 int end_size;
1059 1059 start_offset = index_get_start(self, start_rev);
1060 1060 if (start_offset < 0) {
1061 1061 return -1;
1062 1062 }
1063 1063 end_offset = index_get_start(self, end_rev);
1064 1064 if (end_offset < 0) {
1065 1065 return -1;
1066 1066 }
1067 1067 end_size = index_get_length(self, end_rev);
1068 1068 if (end_size < 0) {
1069 1069 return -1;
1070 1070 }
1071 1071 if (end_offset < start_offset) {
1072 1072 PyErr_Format(PyExc_ValueError,
1073 1073 "corrupted revlog index: inconsistent offset "
1074 1074 "between revisions (%zd) and (%zd)",
1075 1075 start_rev, end_rev);
1076 1076 return -1;
1077 1077 }
1078 1078 return (end_offset - start_offset) + (int64_t)end_size;
1079 1079 }
1080 1080
1081 /* returns revs[startidx:endidx] without empty trailing revs */
1082 static Py_ssize_t trim_endidx(indexObject *self, const Py_ssize_t *revs,
1083 Py_ssize_t startidx, Py_ssize_t endidx)
1084 {
1085 int length;
1086 while (endidx > 1 && endidx > startidx) {
1087 length = index_get_length(self, revs[endidx - 1]);
1088 if (length < 0) {
1089 return -1;
1090 }
1091 if (length != 0) {
1092 break;
1093 }
1094 endidx -= 1;
1095 }
1096 return endidx;
1097 }
1098
1081 1099 static inline int nt_level(const char *node, Py_ssize_t level)
1082 1100 {
1083 1101 int v = node[level >> 1];
1084 1102 if (!(level & 1))
1085 1103 v >>= 4;
1086 1104 return v & 0xf;
1087 1105 }
1088 1106
1089 1107 /*
1090 1108 * Return values:
1091 1109 *
1092 1110 * -4: match is ambiguous (multiple candidates)
1093 1111 * -2: not found
1094 1112 * rest: valid rev
1095 1113 */
1096 1114 static int nt_find(nodetree *self, const char *node, Py_ssize_t nodelen,
1097 1115 int hex)
1098 1116 {
1099 1117 int (*getnybble)(const char *, Py_ssize_t) = hex ? hexdigit : nt_level;
1100 1118 int level, maxlevel, off;
1101 1119
1102 1120 if (nodelen == 20 && node[0] == '\0' && memcmp(node, nullid, 20) == 0)
1103 1121 return -1;
1104 1122
1105 1123 if (hex)
1106 1124 maxlevel = nodelen > 40 ? 40 : (int)nodelen;
1107 1125 else
1108 1126 maxlevel = nodelen > 20 ? 40 : ((int)nodelen * 2);
1109 1127
1110 1128 for (level = off = 0; level < maxlevel; level++) {
1111 1129 int k = getnybble(node, level);
1112 1130 nodetreenode *n = &self->nodes[off];
1113 1131 int v = n->children[k];
1114 1132
1115 1133 if (v < 0) {
1116 1134 const char *n;
1117 1135 Py_ssize_t i;
1118 1136
1119 1137 v = -(v + 2);
1120 1138 n = index_node(self->index, v);
1121 1139 if (n == NULL)
1122 1140 return -2;
1123 1141 for (i = level; i < maxlevel; i++)
1124 1142 if (getnybble(node, i) != nt_level(n, i))
1125 1143 return -2;
1126 1144 return v;
1127 1145 }
1128 1146 if (v == 0)
1129 1147 return -2;
1130 1148 off = v;
1131 1149 }
1132 1150 /* multiple matches against an ambiguous prefix */
1133 1151 return -4;
1134 1152 }
1135 1153
1136 1154 static int nt_new(nodetree *self)
1137 1155 {
1138 1156 if (self->length == self->capacity) {
1139 1157 unsigned newcapacity;
1140 1158 nodetreenode *newnodes;
1141 1159 newcapacity = self->capacity * 2;
1142 1160 if (newcapacity >= INT_MAX / sizeof(nodetreenode)) {
1143 1161 PyErr_SetString(PyExc_MemoryError,
1144 1162 "overflow in nt_new");
1145 1163 return -1;
1146 1164 }
1147 1165 newnodes =
1148 1166 realloc(self->nodes, newcapacity * sizeof(nodetreenode));
1149 1167 if (newnodes == NULL) {
1150 1168 PyErr_SetString(PyExc_MemoryError, "out of memory");
1151 1169 return -1;
1152 1170 }
1153 1171 self->capacity = newcapacity;
1154 1172 self->nodes = newnodes;
1155 1173 memset(&self->nodes[self->length], 0,
1156 1174 sizeof(nodetreenode) * (self->capacity - self->length));
1157 1175 }
1158 1176 return self->length++;
1159 1177 }
1160 1178
1161 1179 static int nt_insert(nodetree *self, const char *node, int rev)
1162 1180 {
1163 1181 int level = 0;
1164 1182 int off = 0;
1165 1183
1166 1184 while (level < 40) {
1167 1185 int k = nt_level(node, level);
1168 1186 nodetreenode *n;
1169 1187 int v;
1170 1188
1171 1189 n = &self->nodes[off];
1172 1190 v = n->children[k];
1173 1191
1174 1192 if (v == 0) {
1175 1193 n->children[k] = -rev - 2;
1176 1194 return 0;
1177 1195 }
1178 1196 if (v < 0) {
1179 1197 const char *oldnode =
1180 1198 index_node_existing(self->index, -(v + 2));
1181 1199 int noff;
1182 1200
1183 1201 if (oldnode == NULL)
1184 1202 return -1;
1185 1203 if (!memcmp(oldnode, node, 20)) {
1186 1204 n->children[k] = -rev - 2;
1187 1205 return 0;
1188 1206 }
1189 1207 noff = nt_new(self);
1190 1208 if (noff == -1)
1191 1209 return -1;
1192 1210 /* self->nodes may have been changed by realloc */
1193 1211 self->nodes[off].children[k] = noff;
1194 1212 off = noff;
1195 1213 n = &self->nodes[off];
1196 1214 n->children[nt_level(oldnode, ++level)] = v;
1197 1215 if (level > self->depth)
1198 1216 self->depth = level;
1199 1217 self->splits += 1;
1200 1218 } else {
1201 1219 level += 1;
1202 1220 off = v;
1203 1221 }
1204 1222 }
1205 1223
1206 1224 return -1;
1207 1225 }
1208 1226
1209 1227 static PyObject *ntobj_insert(nodetreeObject *self, PyObject *args)
1210 1228 {
1211 1229 Py_ssize_t rev;
1212 1230 const char *node;
1213 1231 Py_ssize_t length;
1214 1232 if (!PyArg_ParseTuple(args, "n", &rev))
1215 1233 return NULL;
1216 1234 length = index_length(self->nt.index);
1217 1235 if (rev < 0 || rev >= length) {
1218 1236 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
1219 1237 return NULL;
1220 1238 }
1221 1239 node = index_node_existing(self->nt.index, rev);
1222 1240 if (nt_insert(&self->nt, node, (int)rev) == -1)
1223 1241 return NULL;
1224 1242 Py_RETURN_NONE;
1225 1243 }
1226 1244
1227 1245 static int nt_delete_node(nodetree *self, const char *node)
1228 1246 {
1229 1247 /* rev==-2 happens to get encoded as 0, which is interpreted as not set
1230 1248 */
1231 1249 return nt_insert(self, node, -2);
1232 1250 }
1233 1251
1234 1252 static int nt_init(nodetree *self, indexObject *index, unsigned capacity)
1235 1253 {
1236 1254 /* Initialize before overflow-checking to avoid nt_dealloc() crash. */
1237 1255 self->nodes = NULL;
1238 1256
1239 1257 self->index = index;
1240 1258 /* The input capacity is in terms of revisions, while the field is in
1241 1259 * terms of nodetree nodes. */
1242 1260 self->capacity = (capacity < 4 ? 4 : capacity / 2);
1243 1261 self->depth = 0;
1244 1262 self->splits = 0;
1245 1263 if ((size_t)self->capacity > INT_MAX / sizeof(nodetreenode)) {
1246 1264 PyErr_SetString(PyExc_ValueError, "overflow in init_nt");
1247 1265 return -1;
1248 1266 }
1249 1267 self->nodes = calloc(self->capacity, sizeof(nodetreenode));
1250 1268 if (self->nodes == NULL) {
1251 1269 PyErr_NoMemory();
1252 1270 return -1;
1253 1271 }
1254 1272 self->length = 1;
1255 1273 return 0;
1256 1274 }
1257 1275
1258 1276 static PyTypeObject indexType;
1259 1277
1260 1278 static int ntobj_init(nodetreeObject *self, PyObject *args)
1261 1279 {
1262 1280 PyObject *index;
1263 1281 unsigned capacity;
1264 1282 if (!PyArg_ParseTuple(args, "O!I", &indexType, &index, &capacity))
1265 1283 return -1;
1266 1284 Py_INCREF(index);
1267 1285 return nt_init(&self->nt, (indexObject *)index, capacity);
1268 1286 }
1269 1287
1270 1288 static int nt_partialmatch(nodetree *self, const char *node, Py_ssize_t nodelen)
1271 1289 {
1272 1290 return nt_find(self, node, nodelen, 1);
1273 1291 }
1274 1292
1275 1293 /*
1276 1294 * Find the length of the shortest unique prefix of node.
1277 1295 *
1278 1296 * Return values:
1279 1297 *
1280 1298 * -3: error (exception set)
1281 1299 * -2: not found (no exception set)
1282 1300 * rest: length of shortest prefix
1283 1301 */
1284 1302 static int nt_shortest(nodetree *self, const char *node)
1285 1303 {
1286 1304 int level, off;
1287 1305
1288 1306 for (level = off = 0; level < 40; level++) {
1289 1307 int k, v;
1290 1308 nodetreenode *n = &self->nodes[off];
1291 1309 k = nt_level(node, level);
1292 1310 v = n->children[k];
1293 1311 if (v < 0) {
1294 1312 const char *n;
1295 1313 v = -(v + 2);
1296 1314 n = index_node_existing(self->index, v);
1297 1315 if (n == NULL)
1298 1316 return -3;
1299 1317 if (memcmp(node, n, 20) != 0)
1300 1318 /*
1301 1319 * Found a unique prefix, but it wasn't for the
1302 1320 * requested node (i.e the requested node does
1303 1321 * not exist).
1304 1322 */
1305 1323 return -2;
1306 1324 return level + 1;
1307 1325 }
1308 1326 if (v == 0)
1309 1327 return -2;
1310 1328 off = v;
1311 1329 }
1312 1330 /*
1313 1331 * The node was still not unique after 40 hex digits, so this won't
1314 1332 * happen. Also, if we get here, then there's a programming error in
1315 1333 * this file that made us insert a node longer than 40 hex digits.
1316 1334 */
1317 1335 PyErr_SetString(PyExc_Exception, "broken node tree");
1318 1336 return -3;
1319 1337 }
1320 1338
1321 1339 static PyObject *ntobj_shortest(nodetreeObject *self, PyObject *args)
1322 1340 {
1323 1341 PyObject *val;
1324 1342 char *node;
1325 1343 int length;
1326 1344
1327 1345 if (!PyArg_ParseTuple(args, "O", &val))
1328 1346 return NULL;
1329 1347 if (node_check(val, &node) == -1)
1330 1348 return NULL;
1331 1349
1332 1350 length = nt_shortest(&self->nt, node);
1333 1351 if (length == -3)
1334 1352 return NULL;
1335 1353 if (length == -2) {
1336 1354 raise_revlog_error();
1337 1355 return NULL;
1338 1356 }
1339 1357 return PyInt_FromLong(length);
1340 1358 }
1341 1359
1342 1360 static void nt_dealloc(nodetree *self)
1343 1361 {
1344 1362 free(self->nodes);
1345 1363 self->nodes = NULL;
1346 1364 }
1347 1365
1348 1366 static void ntobj_dealloc(nodetreeObject *self)
1349 1367 {
1350 1368 Py_XDECREF(self->nt.index);
1351 1369 nt_dealloc(&self->nt);
1352 1370 PyObject_Del(self);
1353 1371 }
1354 1372
1355 1373 static PyMethodDef ntobj_methods[] = {
1356 1374 {"insert", (PyCFunction)ntobj_insert, METH_VARARGS,
1357 1375 "insert an index entry"},
1358 1376 {"shortest", (PyCFunction)ntobj_shortest, METH_VARARGS,
1359 1377 "find length of shortest hex nodeid of a binary ID"},
1360 1378 {NULL} /* Sentinel */
1361 1379 };
1362 1380
1363 1381 static PyTypeObject nodetreeType = {
1364 1382 PyVarObject_HEAD_INIT(NULL, 0) /* header */
1365 1383 "parsers.nodetree", /* tp_name */
1366 1384 sizeof(nodetreeObject), /* tp_basicsize */
1367 1385 0, /* tp_itemsize */
1368 1386 (destructor)ntobj_dealloc, /* tp_dealloc */
1369 1387 0, /* tp_print */
1370 1388 0, /* tp_getattr */
1371 1389 0, /* tp_setattr */
1372 1390 0, /* tp_compare */
1373 1391 0, /* tp_repr */
1374 1392 0, /* tp_as_number */
1375 1393 0, /* tp_as_sequence */
1376 1394 0, /* tp_as_mapping */
1377 1395 0, /* tp_hash */
1378 1396 0, /* tp_call */
1379 1397 0, /* tp_str */
1380 1398 0, /* tp_getattro */
1381 1399 0, /* tp_setattro */
1382 1400 0, /* tp_as_buffer */
1383 1401 Py_TPFLAGS_DEFAULT, /* tp_flags */
1384 1402 "nodetree", /* tp_doc */
1385 1403 0, /* tp_traverse */
1386 1404 0, /* tp_clear */
1387 1405 0, /* tp_richcompare */
1388 1406 0, /* tp_weaklistoffset */
1389 1407 0, /* tp_iter */
1390 1408 0, /* tp_iternext */
1391 1409 ntobj_methods, /* tp_methods */
1392 1410 0, /* tp_members */
1393 1411 0, /* tp_getset */
1394 1412 0, /* tp_base */
1395 1413 0, /* tp_dict */
1396 1414 0, /* tp_descr_get */
1397 1415 0, /* tp_descr_set */
1398 1416 0, /* tp_dictoffset */
1399 1417 (initproc)ntobj_init, /* tp_init */
1400 1418 0, /* tp_alloc */
1401 1419 };
1402 1420
1403 1421 static int index_init_nt(indexObject *self)
1404 1422 {
1405 1423 if (!self->ntinitialized) {
1406 1424 if (nt_init(&self->nt, self, (int)self->raw_length) == -1) {
1407 1425 nt_dealloc(&self->nt);
1408 1426 return -1;
1409 1427 }
1410 1428 if (nt_insert(&self->nt, nullid, -1) == -1) {
1411 1429 nt_dealloc(&self->nt);
1412 1430 return -1;
1413 1431 }
1414 1432 self->ntinitialized = 1;
1415 1433 self->ntrev = (int)index_length(self);
1416 1434 self->ntlookups = 1;
1417 1435 self->ntmisses = 0;
1418 1436 }
1419 1437 return 0;
1420 1438 }
1421 1439
1422 1440 /*
1423 1441 * Return values:
1424 1442 *
1425 1443 * -3: error (exception set)
1426 1444 * -2: not found (no exception set)
1427 1445 * rest: valid rev
1428 1446 */
1429 1447 static int index_find_node(indexObject *self, const char *node,
1430 1448 Py_ssize_t nodelen)
1431 1449 {
1432 1450 int rev;
1433 1451
1434 1452 if (index_init_nt(self) == -1)
1435 1453 return -3;
1436 1454
1437 1455 self->ntlookups++;
1438 1456 rev = nt_find(&self->nt, node, nodelen, 0);
1439 1457 if (rev >= -1)
1440 1458 return rev;
1441 1459
1442 1460 /*
1443 1461 * For the first handful of lookups, we scan the entire index,
1444 1462 * and cache only the matching nodes. This optimizes for cases
1445 1463 * like "hg tip", where only a few nodes are accessed.
1446 1464 *
1447 1465 * After that, we cache every node we visit, using a single
1448 1466 * scan amortized over multiple lookups. This gives the best
1449 1467 * bulk performance, e.g. for "hg log".
1450 1468 */
1451 1469 if (self->ntmisses++ < 4) {
1452 1470 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1453 1471 const char *n = index_node_existing(self, rev);
1454 1472 if (n == NULL)
1455 1473 return -3;
1456 1474 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1457 1475 if (nt_insert(&self->nt, n, rev) == -1)
1458 1476 return -3;
1459 1477 break;
1460 1478 }
1461 1479 }
1462 1480 } else {
1463 1481 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1464 1482 const char *n = index_node_existing(self, rev);
1465 1483 if (n == NULL)
1466 1484 return -3;
1467 1485 if (nt_insert(&self->nt, n, rev) == -1) {
1468 1486 self->ntrev = rev + 1;
1469 1487 return -3;
1470 1488 }
1471 1489 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1472 1490 break;
1473 1491 }
1474 1492 }
1475 1493 self->ntrev = rev;
1476 1494 }
1477 1495
1478 1496 if (rev >= 0)
1479 1497 return rev;
1480 1498 return -2;
1481 1499 }
1482 1500
1483 1501 static PyObject *index_getitem(indexObject *self, PyObject *value)
1484 1502 {
1485 1503 char *node;
1486 1504 int rev;
1487 1505
1488 1506 if (PyInt_Check(value)) {
1489 1507 long idx;
1490 1508 if (!pylong_to_long(value, &idx)) {
1491 1509 return NULL;
1492 1510 }
1493 1511 return index_get(self, idx);
1494 1512 }
1495 1513
1496 1514 if (node_check(value, &node) == -1)
1497 1515 return NULL;
1498 1516 rev = index_find_node(self, node, 20);
1499 1517 if (rev >= -1)
1500 1518 return PyInt_FromLong(rev);
1501 1519 if (rev == -2)
1502 1520 raise_revlog_error();
1503 1521 return NULL;
1504 1522 }
1505 1523
1506 1524 /*
1507 1525 * Fully populate the radix tree.
1508 1526 */
1509 1527 static int index_populate_nt(indexObject *self)
1510 1528 {
1511 1529 int rev;
1512 1530 if (self->ntrev > 0) {
1513 1531 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1514 1532 const char *n = index_node_existing(self, rev);
1515 1533 if (n == NULL)
1516 1534 return -1;
1517 1535 if (nt_insert(&self->nt, n, rev) == -1)
1518 1536 return -1;
1519 1537 }
1520 1538 self->ntrev = -1;
1521 1539 }
1522 1540 return 0;
1523 1541 }
1524 1542
1525 1543 static PyObject *index_partialmatch(indexObject *self, PyObject *args)
1526 1544 {
1527 1545 const char *fullnode;
1528 1546 int nodelen;
1529 1547 char *node;
1530 1548 int rev, i;
1531 1549
1532 1550 if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &node, &nodelen))
1533 1551 return NULL;
1534 1552
1535 1553 if (nodelen < 1) {
1536 1554 PyErr_SetString(PyExc_ValueError, "key too short");
1537 1555 return NULL;
1538 1556 }
1539 1557
1540 1558 if (nodelen > 40) {
1541 1559 PyErr_SetString(PyExc_ValueError, "key too long");
1542 1560 return NULL;
1543 1561 }
1544 1562
1545 1563 for (i = 0; i < nodelen; i++)
1546 1564 hexdigit(node, i);
1547 1565 if (PyErr_Occurred()) {
1548 1566 /* input contains non-hex characters */
1549 1567 PyErr_Clear();
1550 1568 Py_RETURN_NONE;
1551 1569 }
1552 1570
1553 1571 if (index_init_nt(self) == -1)
1554 1572 return NULL;
1555 1573 if (index_populate_nt(self) == -1)
1556 1574 return NULL;
1557 1575 rev = nt_partialmatch(&self->nt, node, nodelen);
1558 1576
1559 1577 switch (rev) {
1560 1578 case -4:
1561 1579 raise_revlog_error();
1562 1580 return NULL;
1563 1581 case -2:
1564 1582 Py_RETURN_NONE;
1565 1583 case -1:
1566 1584 return PyBytes_FromStringAndSize(nullid, 20);
1567 1585 }
1568 1586
1569 1587 fullnode = index_node_existing(self, rev);
1570 1588 if (fullnode == NULL) {
1571 1589 return NULL;
1572 1590 }
1573 1591 return PyBytes_FromStringAndSize(fullnode, 20);
1574 1592 }
1575 1593
1576 1594 static PyObject *index_shortest(indexObject *self, PyObject *args)
1577 1595 {
1578 1596 PyObject *val;
1579 1597 char *node;
1580 1598 int length;
1581 1599
1582 1600 if (!PyArg_ParseTuple(args, "O", &val))
1583 1601 return NULL;
1584 1602 if (node_check(val, &node) == -1)
1585 1603 return NULL;
1586 1604
1587 1605 self->ntlookups++;
1588 1606 if (index_init_nt(self) == -1)
1589 1607 return NULL;
1590 1608 if (index_populate_nt(self) == -1)
1591 1609 return NULL;
1592 1610 length = nt_shortest(&self->nt, node);
1593 1611 if (length == -3)
1594 1612 return NULL;
1595 1613 if (length == -2) {
1596 1614 raise_revlog_error();
1597 1615 return NULL;
1598 1616 }
1599 1617 return PyInt_FromLong(length);
1600 1618 }
1601 1619
1602 1620 static PyObject *index_m_get(indexObject *self, PyObject *args)
1603 1621 {
1604 1622 PyObject *val;
1605 1623 char *node;
1606 1624 int rev;
1607 1625
1608 1626 if (!PyArg_ParseTuple(args, "O", &val))
1609 1627 return NULL;
1610 1628 if (node_check(val, &node) == -1)
1611 1629 return NULL;
1612 1630 rev = index_find_node(self, node, 20);
1613 1631 if (rev == -3)
1614 1632 return NULL;
1615 1633 if (rev == -2)
1616 1634 Py_RETURN_NONE;
1617 1635 return PyInt_FromLong(rev);
1618 1636 }
1619 1637
1620 1638 static int index_contains(indexObject *self, PyObject *value)
1621 1639 {
1622 1640 char *node;
1623 1641
1624 1642 if (PyInt_Check(value)) {
1625 1643 long rev;
1626 1644 if (!pylong_to_long(value, &rev)) {
1627 1645 return -1;
1628 1646 }
1629 1647 return rev >= -1 && rev < index_length(self);
1630 1648 }
1631 1649
1632 1650 if (node_check(value, &node) == -1)
1633 1651 return -1;
1634 1652
1635 1653 switch (index_find_node(self, node, 20)) {
1636 1654 case -3:
1637 1655 return -1;
1638 1656 case -2:
1639 1657 return 0;
1640 1658 default:
1641 1659 return 1;
1642 1660 }
1643 1661 }
1644 1662
1645 1663 typedef uint64_t bitmask;
1646 1664
1647 1665 /*
1648 1666 * Given a disjoint set of revs, return all candidates for the
1649 1667 * greatest common ancestor. In revset notation, this is the set
1650 1668 * "heads(::a and ::b and ...)"
1651 1669 */
1652 1670 static PyObject *find_gca_candidates(indexObject *self, const int *revs,
1653 1671 int revcount)
1654 1672 {
1655 1673 const bitmask allseen = (1ull << revcount) - 1;
1656 1674 const bitmask poison = 1ull << revcount;
1657 1675 PyObject *gca = PyList_New(0);
1658 1676 int i, v, interesting;
1659 1677 int maxrev = -1;
1660 1678 bitmask sp;
1661 1679 bitmask *seen;
1662 1680
1663 1681 if (gca == NULL)
1664 1682 return PyErr_NoMemory();
1665 1683
1666 1684 for (i = 0; i < revcount; i++) {
1667 1685 if (revs[i] > maxrev)
1668 1686 maxrev = revs[i];
1669 1687 }
1670 1688
1671 1689 seen = calloc(sizeof(*seen), maxrev + 1);
1672 1690 if (seen == NULL) {
1673 1691 Py_DECREF(gca);
1674 1692 return PyErr_NoMemory();
1675 1693 }
1676 1694
1677 1695 for (i = 0; i < revcount; i++)
1678 1696 seen[revs[i]] = 1ull << i;
1679 1697
1680 1698 interesting = revcount;
1681 1699
1682 1700 for (v = maxrev; v >= 0 && interesting; v--) {
1683 1701 bitmask sv = seen[v];
1684 1702 int parents[2];
1685 1703
1686 1704 if (!sv)
1687 1705 continue;
1688 1706
1689 1707 if (sv < poison) {
1690 1708 interesting -= 1;
1691 1709 if (sv == allseen) {
1692 1710 PyObject *obj = PyInt_FromLong(v);
1693 1711 if (obj == NULL)
1694 1712 goto bail;
1695 1713 if (PyList_Append(gca, obj) == -1) {
1696 1714 Py_DECREF(obj);
1697 1715 goto bail;
1698 1716 }
1699 1717 sv |= poison;
1700 1718 for (i = 0; i < revcount; i++) {
1701 1719 if (revs[i] == v)
1702 1720 goto done;
1703 1721 }
1704 1722 }
1705 1723 }
1706 1724 if (index_get_parents(self, v, parents, maxrev) < 0)
1707 1725 goto bail;
1708 1726
1709 1727 for (i = 0; i < 2; i++) {
1710 1728 int p = parents[i];
1711 1729 if (p == -1)
1712 1730 continue;
1713 1731 sp = seen[p];
1714 1732 if (sv < poison) {
1715 1733 if (sp == 0) {
1716 1734 seen[p] = sv;
1717 1735 interesting++;
1718 1736 } else if (sp != sv)
1719 1737 seen[p] |= sv;
1720 1738 } else {
1721 1739 if (sp && sp < poison)
1722 1740 interesting--;
1723 1741 seen[p] = sv;
1724 1742 }
1725 1743 }
1726 1744 }
1727 1745
1728 1746 done:
1729 1747 free(seen);
1730 1748 return gca;
1731 1749 bail:
1732 1750 free(seen);
1733 1751 Py_XDECREF(gca);
1734 1752 return NULL;
1735 1753 }
1736 1754
1737 1755 /*
1738 1756 * Given a disjoint set of revs, return the subset with the longest
1739 1757 * path to the root.
1740 1758 */
1741 1759 static PyObject *find_deepest(indexObject *self, PyObject *revs)
1742 1760 {
1743 1761 const Py_ssize_t revcount = PyList_GET_SIZE(revs);
1744 1762 static const Py_ssize_t capacity = 24;
1745 1763 int *depth, *interesting = NULL;
1746 1764 int i, j, v, ninteresting;
1747 1765 PyObject *dict = NULL, *keys = NULL;
1748 1766 long *seen = NULL;
1749 1767 int maxrev = -1;
1750 1768 long final;
1751 1769
1752 1770 if (revcount > capacity) {
1753 1771 PyErr_Format(PyExc_OverflowError,
1754 1772 "bitset size (%ld) > capacity (%ld)",
1755 1773 (long)revcount, (long)capacity);
1756 1774 return NULL;
1757 1775 }
1758 1776
1759 1777 for (i = 0; i < revcount; i++) {
1760 1778 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
1761 1779 if (n > maxrev)
1762 1780 maxrev = n;
1763 1781 }
1764 1782
1765 1783 depth = calloc(sizeof(*depth), maxrev + 1);
1766 1784 if (depth == NULL)
1767 1785 return PyErr_NoMemory();
1768 1786
1769 1787 seen = calloc(sizeof(*seen), maxrev + 1);
1770 1788 if (seen == NULL) {
1771 1789 PyErr_NoMemory();
1772 1790 goto bail;
1773 1791 }
1774 1792
1775 1793 interesting = calloc(sizeof(*interesting), ((size_t)1) << revcount);
1776 1794 if (interesting == NULL) {
1777 1795 PyErr_NoMemory();
1778 1796 goto bail;
1779 1797 }
1780 1798
1781 1799 if (PyList_Sort(revs) == -1)
1782 1800 goto bail;
1783 1801
1784 1802 for (i = 0; i < revcount; i++) {
1785 1803 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
1786 1804 long b = 1l << i;
1787 1805 depth[n] = 1;
1788 1806 seen[n] = b;
1789 1807 interesting[b] = 1;
1790 1808 }
1791 1809
1792 1810 /* invariant: ninteresting is the number of non-zero entries in
1793 1811 * interesting. */
1794 1812 ninteresting = (int)revcount;
1795 1813
1796 1814 for (v = maxrev; v >= 0 && ninteresting > 1; v--) {
1797 1815 int dv = depth[v];
1798 1816 int parents[2];
1799 1817 long sv;
1800 1818
1801 1819 if (dv == 0)
1802 1820 continue;
1803 1821
1804 1822 sv = seen[v];
1805 1823 if (index_get_parents(self, v, parents, maxrev) < 0)
1806 1824 goto bail;
1807 1825
1808 1826 for (i = 0; i < 2; i++) {
1809 1827 int p = parents[i];
1810 1828 long sp;
1811 1829 int dp;
1812 1830
1813 1831 if (p == -1)
1814 1832 continue;
1815 1833
1816 1834 dp = depth[p];
1817 1835 sp = seen[p];
1818 1836 if (dp <= dv) {
1819 1837 depth[p] = dv + 1;
1820 1838 if (sp != sv) {
1821 1839 interesting[sv] += 1;
1822 1840 seen[p] = sv;
1823 1841 if (sp) {
1824 1842 interesting[sp] -= 1;
1825 1843 if (interesting[sp] == 0)
1826 1844 ninteresting -= 1;
1827 1845 }
1828 1846 }
1829 1847 } else if (dv == dp - 1) {
1830 1848 long nsp = sp | sv;
1831 1849 if (nsp == sp)
1832 1850 continue;
1833 1851 seen[p] = nsp;
1834 1852 interesting[sp] -= 1;
1835 1853 if (interesting[sp] == 0)
1836 1854 ninteresting -= 1;
1837 1855 if (interesting[nsp] == 0)
1838 1856 ninteresting += 1;
1839 1857 interesting[nsp] += 1;
1840 1858 }
1841 1859 }
1842 1860 interesting[sv] -= 1;
1843 1861 if (interesting[sv] == 0)
1844 1862 ninteresting -= 1;
1845 1863 }
1846 1864
1847 1865 final = 0;
1848 1866 j = ninteresting;
1849 1867 for (i = 0; i < (int)(2 << revcount) && j > 0; i++) {
1850 1868 if (interesting[i] == 0)
1851 1869 continue;
1852 1870 final |= i;
1853 1871 j -= 1;
1854 1872 }
1855 1873 if (final == 0) {
1856 1874 keys = PyList_New(0);
1857 1875 goto bail;
1858 1876 }
1859 1877
1860 1878 dict = PyDict_New();
1861 1879 if (dict == NULL)
1862 1880 goto bail;
1863 1881
1864 1882 for (i = 0; i < revcount; i++) {
1865 1883 PyObject *key;
1866 1884
1867 1885 if ((final & (1 << i)) == 0)
1868 1886 continue;
1869 1887
1870 1888 key = PyList_GET_ITEM(revs, i);
1871 1889 Py_INCREF(key);
1872 1890 Py_INCREF(Py_None);
1873 1891 if (PyDict_SetItem(dict, key, Py_None) == -1) {
1874 1892 Py_DECREF(key);
1875 1893 Py_DECREF(Py_None);
1876 1894 goto bail;
1877 1895 }
1878 1896 }
1879 1897
1880 1898 keys = PyDict_Keys(dict);
1881 1899
1882 1900 bail:
1883 1901 free(depth);
1884 1902 free(seen);
1885 1903 free(interesting);
1886 1904 Py_XDECREF(dict);
1887 1905
1888 1906 return keys;
1889 1907 }
1890 1908
1891 1909 /*
1892 1910 * Given a (possibly overlapping) set of revs, return all the
1893 1911 * common ancestors heads: heads(::args[0] and ::a[1] and ...)
1894 1912 */
1895 1913 static PyObject *index_commonancestorsheads(indexObject *self, PyObject *args)
1896 1914 {
1897 1915 PyObject *ret = NULL;
1898 1916 Py_ssize_t argcount, i, len;
1899 1917 bitmask repeat = 0;
1900 1918 int revcount = 0;
1901 1919 int *revs;
1902 1920
1903 1921 argcount = PySequence_Length(args);
1904 1922 revs = PyMem_Malloc(argcount * sizeof(*revs));
1905 1923 if (argcount > 0 && revs == NULL)
1906 1924 return PyErr_NoMemory();
1907 1925 len = index_length(self);
1908 1926
1909 1927 for (i = 0; i < argcount; i++) {
1910 1928 static const int capacity = 24;
1911 1929 PyObject *obj = PySequence_GetItem(args, i);
1912 1930 bitmask x;
1913 1931 long val;
1914 1932
1915 1933 if (!PyInt_Check(obj)) {
1916 1934 PyErr_SetString(PyExc_TypeError,
1917 1935 "arguments must all be ints");
1918 1936 Py_DECREF(obj);
1919 1937 goto bail;
1920 1938 }
1921 1939 val = PyInt_AsLong(obj);
1922 1940 Py_DECREF(obj);
1923 1941 if (val == -1) {
1924 1942 ret = PyList_New(0);
1925 1943 goto done;
1926 1944 }
1927 1945 if (val < 0 || val >= len) {
1928 1946 PyErr_SetString(PyExc_IndexError, "index out of range");
1929 1947 goto bail;
1930 1948 }
1931 1949 /* this cheesy bloom filter lets us avoid some more
1932 1950 * expensive duplicate checks in the common set-is-disjoint
1933 1951 * case */
1934 1952 x = 1ull << (val & 0x3f);
1935 1953 if (repeat & x) {
1936 1954 int k;
1937 1955 for (k = 0; k < revcount; k++) {
1938 1956 if (val == revs[k])
1939 1957 goto duplicate;
1940 1958 }
1941 1959 } else
1942 1960 repeat |= x;
1943 1961 if (revcount >= capacity) {
1944 1962 PyErr_Format(PyExc_OverflowError,
1945 1963 "bitset size (%d) > capacity (%d)",
1946 1964 revcount, capacity);
1947 1965 goto bail;
1948 1966 }
1949 1967 revs[revcount++] = (int)val;
1950 1968 duplicate:;
1951 1969 }
1952 1970
1953 1971 if (revcount == 0) {
1954 1972 ret = PyList_New(0);
1955 1973 goto done;
1956 1974 }
1957 1975 if (revcount == 1) {
1958 1976 PyObject *obj;
1959 1977 ret = PyList_New(1);
1960 1978 if (ret == NULL)
1961 1979 goto bail;
1962 1980 obj = PyInt_FromLong(revs[0]);
1963 1981 if (obj == NULL)
1964 1982 goto bail;
1965 1983 PyList_SET_ITEM(ret, 0, obj);
1966 1984 goto done;
1967 1985 }
1968 1986
1969 1987 ret = find_gca_candidates(self, revs, revcount);
1970 1988 if (ret == NULL)
1971 1989 goto bail;
1972 1990
1973 1991 done:
1974 1992 PyMem_Free(revs);
1975 1993 return ret;
1976 1994
1977 1995 bail:
1978 1996 PyMem_Free(revs);
1979 1997 Py_XDECREF(ret);
1980 1998 return NULL;
1981 1999 }
1982 2000
1983 2001 /*
1984 2002 * Given a (possibly overlapping) set of revs, return the greatest
1985 2003 * common ancestors: those with the longest path to the root.
1986 2004 */
1987 2005 static PyObject *index_ancestors(indexObject *self, PyObject *args)
1988 2006 {
1989 2007 PyObject *ret;
1990 2008 PyObject *gca = index_commonancestorsheads(self, args);
1991 2009 if (gca == NULL)
1992 2010 return NULL;
1993 2011
1994 2012 if (PyList_GET_SIZE(gca) <= 1) {
1995 2013 return gca;
1996 2014 }
1997 2015
1998 2016 ret = find_deepest(self, gca);
1999 2017 Py_DECREF(gca);
2000 2018 return ret;
2001 2019 }
2002 2020
2003 2021 /*
2004 2022 * Invalidate any trie entries introduced by added revs.
2005 2023 */
2006 2024 static void index_invalidate_added(indexObject *self, Py_ssize_t start)
2007 2025 {
2008 2026 Py_ssize_t i, len = PyList_GET_SIZE(self->added);
2009 2027
2010 2028 for (i = start; i < len; i++) {
2011 2029 PyObject *tuple = PyList_GET_ITEM(self->added, i);
2012 2030 PyObject *node = PyTuple_GET_ITEM(tuple, 7);
2013 2031
2014 2032 nt_delete_node(&self->nt, PyBytes_AS_STRING(node));
2015 2033 }
2016 2034
2017 2035 if (start == 0)
2018 2036 Py_CLEAR(self->added);
2019 2037 }
2020 2038
2021 2039 /*
2022 2040 * Delete a numeric range of revs, which must be at the end of the
2023 2041 * range, but exclude the sentinel nullid entry.
2024 2042 */
2025 2043 static int index_slice_del(indexObject *self, PyObject *item)
2026 2044 {
2027 2045 Py_ssize_t start, stop, step, slicelength;
2028 2046 Py_ssize_t length = index_length(self) + 1;
2029 2047 int ret = 0;
2030 2048
2031 2049 /* Argument changed from PySliceObject* to PyObject* in Python 3. */
2032 2050 #ifdef IS_PY3K
2033 2051 if (PySlice_GetIndicesEx(item, length, &start, &stop, &step,
2034 2052 &slicelength) < 0)
2035 2053 #else
2036 2054 if (PySlice_GetIndicesEx((PySliceObject *)item, length, &start, &stop,
2037 2055 &step, &slicelength) < 0)
2038 2056 #endif
2039 2057 return -1;
2040 2058
2041 2059 if (slicelength <= 0)
2042 2060 return 0;
2043 2061
2044 2062 if ((step < 0 && start < stop) || (step > 0 && start > stop))
2045 2063 stop = start;
2046 2064
2047 2065 if (step < 0) {
2048 2066 stop = start + 1;
2049 2067 start = stop + step * (slicelength - 1) - 1;
2050 2068 step = -step;
2051 2069 }
2052 2070
2053 2071 if (step != 1) {
2054 2072 PyErr_SetString(PyExc_ValueError,
2055 2073 "revlog index delete requires step size of 1");
2056 2074 return -1;
2057 2075 }
2058 2076
2059 2077 if (stop != length - 1) {
2060 2078 PyErr_SetString(PyExc_IndexError,
2061 2079 "revlog index deletion indices are invalid");
2062 2080 return -1;
2063 2081 }
2064 2082
2065 2083 if (start < self->length) {
2066 2084 if (self->ntinitialized) {
2067 2085 Py_ssize_t i;
2068 2086
2069 2087 for (i = start + 1; i < self->length; i++) {
2070 2088 const char *node = index_node_existing(self, i);
2071 2089 if (node == NULL)
2072 2090 return -1;
2073 2091
2074 2092 nt_delete_node(&self->nt, node);
2075 2093 }
2076 2094 if (self->added)
2077 2095 index_invalidate_added(self, 0);
2078 2096 if (self->ntrev > start)
2079 2097 self->ntrev = (int)start;
2080 2098 }
2081 2099 self->length = start;
2082 2100 if (start < self->raw_length) {
2083 2101 if (self->cache) {
2084 2102 Py_ssize_t i;
2085 2103 for (i = start; i < self->raw_length; i++)
2086 2104 Py_CLEAR(self->cache[i]);
2087 2105 }
2088 2106 self->raw_length = start;
2089 2107 }
2090 2108 goto done;
2091 2109 }
2092 2110
2093 2111 if (self->ntinitialized) {
2094 2112 index_invalidate_added(self, start - self->length);
2095 2113 if (self->ntrev > start)
2096 2114 self->ntrev = (int)start;
2097 2115 }
2098 2116 if (self->added)
2099 2117 ret = PyList_SetSlice(self->added, start - self->length,
2100 2118 PyList_GET_SIZE(self->added), NULL);
2101 2119 done:
2102 2120 Py_CLEAR(self->headrevs);
2103 2121 return ret;
2104 2122 }
2105 2123
2106 2124 /*
2107 2125 * Supported ops:
2108 2126 *
2109 2127 * slice deletion
2110 2128 * string assignment (extend node->rev mapping)
2111 2129 * string deletion (shrink node->rev mapping)
2112 2130 */
2113 2131 static int index_assign_subscript(indexObject *self, PyObject *item,
2114 2132 PyObject *value)
2115 2133 {
2116 2134 char *node;
2117 2135 long rev;
2118 2136
2119 2137 if (PySlice_Check(item) && value == NULL)
2120 2138 return index_slice_del(self, item);
2121 2139
2122 2140 if (node_check(item, &node) == -1)
2123 2141 return -1;
2124 2142
2125 2143 if (value == NULL)
2126 2144 return self->ntinitialized ? nt_delete_node(&self->nt, node)
2127 2145 : 0;
2128 2146 rev = PyInt_AsLong(value);
2129 2147 if (rev > INT_MAX || rev < 0) {
2130 2148 if (!PyErr_Occurred())
2131 2149 PyErr_SetString(PyExc_ValueError, "rev out of range");
2132 2150 return -1;
2133 2151 }
2134 2152
2135 2153 if (index_init_nt(self) == -1)
2136 2154 return -1;
2137 2155 return nt_insert(&self->nt, node, (int)rev);
2138 2156 }
2139 2157
2140 2158 /*
2141 2159 * Find all RevlogNG entries in an index that has inline data. Update
2142 2160 * the optional "offsets" table with those entries.
2143 2161 */
2144 2162 static Py_ssize_t inline_scan(indexObject *self, const char **offsets)
2145 2163 {
2146 2164 const char *data = (const char *)self->buf.buf;
2147 2165 Py_ssize_t pos = 0;
2148 2166 Py_ssize_t end = self->buf.len;
2149 2167 long incr = v1_hdrsize;
2150 2168 Py_ssize_t len = 0;
2151 2169
2152 2170 while (pos + v1_hdrsize <= end && pos >= 0) {
2153 2171 uint32_t comp_len;
2154 2172 /* 3rd element of header is length of compressed inline data */
2155 2173 comp_len = getbe32(data + pos + 8);
2156 2174 incr = v1_hdrsize + comp_len;
2157 2175 if (offsets)
2158 2176 offsets[len] = data + pos;
2159 2177 len++;
2160 2178 pos += incr;
2161 2179 }
2162 2180
2163 2181 if (pos != end) {
2164 2182 if (!PyErr_Occurred())
2165 2183 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2166 2184 return -1;
2167 2185 }
2168 2186
2169 2187 return len;
2170 2188 }
2171 2189
2172 2190 static int index_init(indexObject *self, PyObject *args)
2173 2191 {
2174 2192 PyObject *data_obj, *inlined_obj;
2175 2193 Py_ssize_t size;
2176 2194
2177 2195 /* Initialize before argument-checking to avoid index_dealloc() crash.
2178 2196 */
2179 2197 self->raw_length = 0;
2180 2198 self->added = NULL;
2181 2199 self->cache = NULL;
2182 2200 self->data = NULL;
2183 2201 memset(&self->buf, 0, sizeof(self->buf));
2184 2202 self->headrevs = NULL;
2185 2203 self->filteredrevs = Py_None;
2186 2204 Py_INCREF(Py_None);
2187 2205 self->ntinitialized = 0;
2188 2206 self->offsets = NULL;
2189 2207
2190 2208 if (!PyArg_ParseTuple(args, "OO", &data_obj, &inlined_obj))
2191 2209 return -1;
2192 2210 if (!PyObject_CheckBuffer(data_obj)) {
2193 2211 PyErr_SetString(PyExc_TypeError,
2194 2212 "data does not support buffer interface");
2195 2213 return -1;
2196 2214 }
2197 2215
2198 2216 if (PyObject_GetBuffer(data_obj, &self->buf, PyBUF_SIMPLE) == -1)
2199 2217 return -1;
2200 2218 size = self->buf.len;
2201 2219
2202 2220 self->inlined = inlined_obj && PyObject_IsTrue(inlined_obj);
2203 2221 self->data = data_obj;
2204 2222
2205 2223 self->ntlookups = self->ntmisses = 0;
2206 2224 self->ntrev = -1;
2207 2225 Py_INCREF(self->data);
2208 2226
2209 2227 if (self->inlined) {
2210 2228 Py_ssize_t len = inline_scan(self, NULL);
2211 2229 if (len == -1)
2212 2230 goto bail;
2213 2231 self->raw_length = len;
2214 2232 self->length = len;
2215 2233 } else {
2216 2234 if (size % v1_hdrsize) {
2217 2235 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2218 2236 goto bail;
2219 2237 }
2220 2238 self->raw_length = size / v1_hdrsize;
2221 2239 self->length = self->raw_length;
2222 2240 }
2223 2241
2224 2242 return 0;
2225 2243 bail:
2226 2244 return -1;
2227 2245 }
2228 2246
2229 2247 static PyObject *index_nodemap(indexObject *self)
2230 2248 {
2231 2249 Py_INCREF(self);
2232 2250 return (PyObject *)self;
2233 2251 }
2234 2252
2235 2253 static void _index_clearcaches(indexObject *self)
2236 2254 {
2237 2255 if (self->cache) {
2238 2256 Py_ssize_t i;
2239 2257
2240 2258 for (i = 0; i < self->raw_length; i++)
2241 2259 Py_CLEAR(self->cache[i]);
2242 2260 free(self->cache);
2243 2261 self->cache = NULL;
2244 2262 }
2245 2263 if (self->offsets) {
2246 2264 PyMem_Free((void *)self->offsets);
2247 2265 self->offsets = NULL;
2248 2266 }
2249 2267 if (self->ntinitialized) {
2250 2268 nt_dealloc(&self->nt);
2251 2269 }
2252 2270 self->ntinitialized = 0;
2253 2271 Py_CLEAR(self->headrevs);
2254 2272 }
2255 2273
2256 2274 static PyObject *index_clearcaches(indexObject *self)
2257 2275 {
2258 2276 _index_clearcaches(self);
2259 2277 self->ntrev = -1;
2260 2278 self->ntlookups = self->ntmisses = 0;
2261 2279 Py_RETURN_NONE;
2262 2280 }
2263 2281
2264 2282 static void index_dealloc(indexObject *self)
2265 2283 {
2266 2284 _index_clearcaches(self);
2267 2285 Py_XDECREF(self->filteredrevs);
2268 2286 if (self->buf.buf) {
2269 2287 PyBuffer_Release(&self->buf);
2270 2288 memset(&self->buf, 0, sizeof(self->buf));
2271 2289 }
2272 2290 Py_XDECREF(self->data);
2273 2291 Py_XDECREF(self->added);
2274 2292 PyObject_Del(self);
2275 2293 }
2276 2294
2277 2295 static PySequenceMethods index_sequence_methods = {
2278 2296 (lenfunc)index_length, /* sq_length */
2279 2297 0, /* sq_concat */
2280 2298 0, /* sq_repeat */
2281 2299 (ssizeargfunc)index_get, /* sq_item */
2282 2300 0, /* sq_slice */
2283 2301 0, /* sq_ass_item */
2284 2302 0, /* sq_ass_slice */
2285 2303 (objobjproc)index_contains, /* sq_contains */
2286 2304 };
2287 2305
2288 2306 static PyMappingMethods index_mapping_methods = {
2289 2307 (lenfunc)index_length, /* mp_length */
2290 2308 (binaryfunc)index_getitem, /* mp_subscript */
2291 2309 (objobjargproc)index_assign_subscript, /* mp_ass_subscript */
2292 2310 };
2293 2311
2294 2312 static PyMethodDef index_methods[] = {
2295 2313 {"ancestors", (PyCFunction)index_ancestors, METH_VARARGS,
2296 2314 "return the gca set of the given revs"},
2297 2315 {"commonancestorsheads", (PyCFunction)index_commonancestorsheads,
2298 2316 METH_VARARGS,
2299 2317 "return the heads of the common ancestors of the given revs"},
2300 2318 {"clearcaches", (PyCFunction)index_clearcaches, METH_NOARGS,
2301 2319 "clear the index caches"},
2302 2320 {"get", (PyCFunction)index_m_get, METH_VARARGS, "get an index entry"},
2303 2321 {"computephasesmapsets", (PyCFunction)compute_phases_map_sets, METH_VARARGS,
2304 2322 "compute phases"},
2305 2323 {"reachableroots2", (PyCFunction)reachableroots2, METH_VARARGS,
2306 2324 "reachableroots"},
2307 2325 {"headrevs", (PyCFunction)index_headrevs, METH_VARARGS,
2308 2326 "get head revisions"}, /* Can do filtering since 3.2 */
2309 2327 {"headrevsfiltered", (PyCFunction)index_headrevs, METH_VARARGS,
2310 2328 "get filtered head revisions"}, /* Can always do filtering */
2311 2329 {"deltachain", (PyCFunction)index_deltachain, METH_VARARGS,
2312 2330 "determine revisions with deltas to reconstruct fulltext"},
2313 2331 {"append", (PyCFunction)index_append, METH_O, "append an index entry"},
2314 2332 {"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS,
2315 2333 "match a potentially ambiguous node ID"},
2316 2334 {"shortest", (PyCFunction)index_shortest, METH_VARARGS,
2317 2335 "find length of shortest hex nodeid of a binary ID"},
2318 2336 {"stats", (PyCFunction)index_stats, METH_NOARGS, "stats for the index"},
2319 2337 {NULL} /* Sentinel */
2320 2338 };
2321 2339
2322 2340 static PyGetSetDef index_getset[] = {
2323 2341 {"nodemap", (getter)index_nodemap, NULL, "nodemap", NULL},
2324 2342 {NULL} /* Sentinel */
2325 2343 };
2326 2344
2327 2345 static PyTypeObject indexType = {
2328 2346 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2329 2347 "parsers.index", /* tp_name */
2330 2348 sizeof(indexObject), /* tp_basicsize */
2331 2349 0, /* tp_itemsize */
2332 2350 (destructor)index_dealloc, /* tp_dealloc */
2333 2351 0, /* tp_print */
2334 2352 0, /* tp_getattr */
2335 2353 0, /* tp_setattr */
2336 2354 0, /* tp_compare */
2337 2355 0, /* tp_repr */
2338 2356 0, /* tp_as_number */
2339 2357 &index_sequence_methods, /* tp_as_sequence */
2340 2358 &index_mapping_methods, /* tp_as_mapping */
2341 2359 0, /* tp_hash */
2342 2360 0, /* tp_call */
2343 2361 0, /* tp_str */
2344 2362 0, /* tp_getattro */
2345 2363 0, /* tp_setattro */
2346 2364 0, /* tp_as_buffer */
2347 2365 Py_TPFLAGS_DEFAULT, /* tp_flags */
2348 2366 "revlog index", /* tp_doc */
2349 2367 0, /* tp_traverse */
2350 2368 0, /* tp_clear */
2351 2369 0, /* tp_richcompare */
2352 2370 0, /* tp_weaklistoffset */
2353 2371 0, /* tp_iter */
2354 2372 0, /* tp_iternext */
2355 2373 index_methods, /* tp_methods */
2356 2374 0, /* tp_members */
2357 2375 index_getset, /* tp_getset */
2358 2376 0, /* tp_base */
2359 2377 0, /* tp_dict */
2360 2378 0, /* tp_descr_get */
2361 2379 0, /* tp_descr_set */
2362 2380 0, /* tp_dictoffset */
2363 2381 (initproc)index_init, /* tp_init */
2364 2382 0, /* tp_alloc */
2365 2383 };
2366 2384
2367 2385 /*
2368 2386 * returns a tuple of the form (index, index, cache) with elements as
2369 2387 * follows:
2370 2388 *
2371 2389 * index: an index object that lazily parses RevlogNG records
2372 2390 * cache: if data is inlined, a tuple (0, index_file_content), else None
2373 2391 * index_file_content could be a string, or a buffer
2374 2392 *
2375 2393 * added complications are for backwards compatibility
2376 2394 */
2377 2395 PyObject *parse_index2(PyObject *self, PyObject *args)
2378 2396 {
2379 2397 PyObject *tuple = NULL, *cache = NULL;
2380 2398 indexObject *idx;
2381 2399 int ret;
2382 2400
2383 2401 idx = PyObject_New(indexObject, &indexType);
2384 2402 if (idx == NULL)
2385 2403 goto bail;
2386 2404
2387 2405 ret = index_init(idx, args);
2388 2406 if (ret == -1)
2389 2407 goto bail;
2390 2408
2391 2409 if (idx->inlined) {
2392 2410 cache = Py_BuildValue("iO", 0, idx->data);
2393 2411 if (cache == NULL)
2394 2412 goto bail;
2395 2413 } else {
2396 2414 cache = Py_None;
2397 2415 Py_INCREF(cache);
2398 2416 }
2399 2417
2400 2418 tuple = Py_BuildValue("NN", idx, cache);
2401 2419 if (!tuple)
2402 2420 goto bail;
2403 2421 return tuple;
2404 2422
2405 2423 bail:
2406 2424 Py_XDECREF(idx);
2407 2425 Py_XDECREF(cache);
2408 2426 Py_XDECREF(tuple);
2409 2427 return NULL;
2410 2428 }
2411 2429
2412 2430 #ifdef WITH_RUST
2413 2431
2414 2432 /* rustlazyancestors: iteration over ancestors implemented in Rust
2415 2433 *
2416 2434 * This class holds a reference to an index and to the Rust iterator.
2417 2435 */
2418 2436 typedef struct rustlazyancestorsObjectStruct rustlazyancestorsObject;
2419 2437
2420 2438 struct rustlazyancestorsObjectStruct {
2421 2439 PyObject_HEAD
2422 2440 /* Type-specific fields go here. */
2423 2441 indexObject *index; /* Ref kept to avoid GC'ing the index */
2424 2442 void *iter; /* Rust iterator */
2425 2443 };
2426 2444
2427 2445 /* FFI exposed from Rust code */
2428 2446 rustlazyancestorsObject *
2429 2447 rustlazyancestors_init(indexObject *index,
2430 2448 /* to pass index_get_parents() */
2431 2449 int (*)(indexObject *, Py_ssize_t, int *, int),
2432 2450 /* intrevs vector */
2433 2451 Py_ssize_t initrevslen, long *initrevs, long stoprev,
2434 2452 int inclusive);
2435 2453 void rustlazyancestors_drop(rustlazyancestorsObject *self);
2436 2454 int rustlazyancestors_next(rustlazyancestorsObject *self);
2437 2455 int rustlazyancestors_contains(rustlazyancestorsObject *self, long rev);
2438 2456
2439 2457 /* CPython instance methods */
2440 2458 static int rustla_init(rustlazyancestorsObject *self, PyObject *args)
2441 2459 {
2442 2460 PyObject *initrevsarg = NULL;
2443 2461 PyObject *inclusivearg = NULL;
2444 2462 long stoprev = 0;
2445 2463 long *initrevs = NULL;
2446 2464 int inclusive = 0;
2447 2465 Py_ssize_t i;
2448 2466
2449 2467 indexObject *index;
2450 2468 if (!PyArg_ParseTuple(args, "O!O!lO!", &indexType, &index, &PyList_Type,
2451 2469 &initrevsarg, &stoprev, &PyBool_Type,
2452 2470 &inclusivearg))
2453 2471 return -1;
2454 2472
2455 2473 Py_INCREF(index);
2456 2474 self->index = index;
2457 2475
2458 2476 if (inclusivearg == Py_True)
2459 2477 inclusive = 1;
2460 2478
2461 2479 Py_ssize_t linit = PyList_GET_SIZE(initrevsarg);
2462 2480
2463 2481 initrevs = (long *)calloc(linit, sizeof(long));
2464 2482
2465 2483 if (initrevs == NULL) {
2466 2484 PyErr_NoMemory();
2467 2485 goto bail;
2468 2486 }
2469 2487
2470 2488 for (i = 0; i < linit; i++) {
2471 2489 initrevs[i] = PyInt_AsLong(PyList_GET_ITEM(initrevsarg, i));
2472 2490 }
2473 2491 if (PyErr_Occurred())
2474 2492 goto bail;
2475 2493
2476 2494 self->iter = rustlazyancestors_init(index, index_get_parents, linit,
2477 2495 initrevs, stoprev, inclusive);
2478 2496 if (self->iter == NULL) {
2479 2497 /* if this is because of GraphError::ParentOutOfRange
2480 2498 * index_get_parents() has already set the proper ValueError */
2481 2499 goto bail;
2482 2500 }
2483 2501
2484 2502 free(initrevs);
2485 2503 return 0;
2486 2504
2487 2505 bail:
2488 2506 free(initrevs);
2489 2507 return -1;
2490 2508 };
2491 2509
2492 2510 static void rustla_dealloc(rustlazyancestorsObject *self)
2493 2511 {
2494 2512 Py_XDECREF(self->index);
2495 2513 if (self->iter != NULL) { /* can happen if rustla_init failed */
2496 2514 rustlazyancestors_drop(self->iter);
2497 2515 }
2498 2516 PyObject_Del(self);
2499 2517 }
2500 2518
2501 2519 static PyObject *rustla_next(rustlazyancestorsObject *self)
2502 2520 {
2503 2521 int res = rustlazyancestors_next(self->iter);
2504 2522 if (res == -1) {
2505 2523 /* Setting an explicit exception seems unnecessary
2506 2524 * as examples from Python source code (Objects/rangeobjets.c
2507 2525 * and Modules/_io/stringio.c) seem to demonstrate.
2508 2526 */
2509 2527 return NULL;
2510 2528 }
2511 2529 return PyInt_FromLong(res);
2512 2530 }
2513 2531
2514 2532 static int rustla_contains(rustlazyancestorsObject *self, PyObject *rev)
2515 2533 {
2516 2534 long lrev;
2517 2535 if (!pylong_to_long(rev, &lrev)) {
2518 2536 PyErr_Clear();
2519 2537 return 0;
2520 2538 }
2521 2539 return rustlazyancestors_contains(self->iter, lrev);
2522 2540 }
2523 2541
2524 2542 static PySequenceMethods rustla_sequence_methods = {
2525 2543 0, /* sq_length */
2526 2544 0, /* sq_concat */
2527 2545 0, /* sq_repeat */
2528 2546 0, /* sq_item */
2529 2547 0, /* sq_slice */
2530 2548 0, /* sq_ass_item */
2531 2549 0, /* sq_ass_slice */
2532 2550 (objobjproc)rustla_contains, /* sq_contains */
2533 2551 };
2534 2552
2535 2553 static PyTypeObject rustlazyancestorsType = {
2536 2554 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2537 2555 "parsers.rustlazyancestors", /* tp_name */
2538 2556 sizeof(rustlazyancestorsObject), /* tp_basicsize */
2539 2557 0, /* tp_itemsize */
2540 2558 (destructor)rustla_dealloc, /* tp_dealloc */
2541 2559 0, /* tp_print */
2542 2560 0, /* tp_getattr */
2543 2561 0, /* tp_setattr */
2544 2562 0, /* tp_compare */
2545 2563 0, /* tp_repr */
2546 2564 0, /* tp_as_number */
2547 2565 &rustla_sequence_methods, /* tp_as_sequence */
2548 2566 0, /* tp_as_mapping */
2549 2567 0, /* tp_hash */
2550 2568 0, /* tp_call */
2551 2569 0, /* tp_str */
2552 2570 0, /* tp_getattro */
2553 2571 0, /* tp_setattro */
2554 2572 0, /* tp_as_buffer */
2555 2573 Py_TPFLAGS_DEFAULT, /* tp_flags */
2556 2574 "Iterator over ancestors, implemented in Rust", /* tp_doc */
2557 2575 0, /* tp_traverse */
2558 2576 0, /* tp_clear */
2559 2577 0, /* tp_richcompare */
2560 2578 0, /* tp_weaklistoffset */
2561 2579 0, /* tp_iter */
2562 2580 (iternextfunc)rustla_next, /* tp_iternext */
2563 2581 0, /* tp_methods */
2564 2582 0, /* tp_members */
2565 2583 0, /* tp_getset */
2566 2584 0, /* tp_base */
2567 2585 0, /* tp_dict */
2568 2586 0, /* tp_descr_get */
2569 2587 0, /* tp_descr_set */
2570 2588 0, /* tp_dictoffset */
2571 2589 (initproc)rustla_init, /* tp_init */
2572 2590 0, /* tp_alloc */
2573 2591 };
2574 2592 #endif /* WITH_RUST */
2575 2593
2576 2594 void revlog_module_init(PyObject *mod)
2577 2595 {
2578 2596 indexType.tp_new = PyType_GenericNew;
2579 2597 if (PyType_Ready(&indexType) < 0)
2580 2598 return;
2581 2599 Py_INCREF(&indexType);
2582 2600 PyModule_AddObject(mod, "index", (PyObject *)&indexType);
2583 2601
2584 2602 nodetreeType.tp_new = PyType_GenericNew;
2585 2603 if (PyType_Ready(&nodetreeType) < 0)
2586 2604 return;
2587 2605 Py_INCREF(&nodetreeType);
2588 2606 PyModule_AddObject(mod, "nodetree", (PyObject *)&nodetreeType);
2589 2607
2590 2608 if (!nullentry) {
2591 2609 nullentry = Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0,
2592 2610 0, -1, -1, -1, -1, nullid, 20);
2593 2611 }
2594 2612 if (nullentry)
2595 2613 PyObject_GC_UnTrack(nullentry);
2596 2614
2597 2615 #ifdef WITH_RUST
2598 2616 rustlazyancestorsType.tp_new = PyType_GenericNew;
2599 2617 if (PyType_Ready(&rustlazyancestorsType) < 0)
2600 2618 return;
2601 2619 Py_INCREF(&rustlazyancestorsType);
2602 2620 PyModule_AddObject(mod, "rustlazyancestors",
2603 2621 (PyObject *)&rustlazyancestorsType);
2604 2622 #endif
2605 2623 }
General Comments 0
You need to be logged in to leave comments. Login now