##// END OF EJS Templates
revlog: update the documentation for `trim_endidx`...
Boris Feld -
r40773:8edca70d default
parent child Browse files
Show More
@@ -1,2856 +1,2856 b''
1 1 /*
2 2 parsers.c - efficient content parsing
3 3
4 4 Copyright 2008 Matt Mackall <mpm@selenic.com> and others
5 5
6 6 This software may be used and distributed according to the terms of
7 7 the GNU General Public License, incorporated herein by reference.
8 8 */
9 9
10 10 #include <Python.h>
11 11 #include <assert.h>
12 12 #include <ctype.h>
13 13 #include <limits.h>
14 14 #include <stddef.h>
15 15 #include <stdlib.h>
16 16 #include <string.h>
17 17
18 18 #include "bitmanipulation.h"
19 19 #include "charencode.h"
20 20 #include "util.h"
21 21
22 22 #ifdef IS_PY3K
23 23 /* The mapping of Python types is meant to be temporary to get Python
24 24 * 3 to compile. We should remove this once Python 3 support is fully
25 25 * supported and proper types are used in the extensions themselves. */
26 26 #define PyInt_Check PyLong_Check
27 27 #define PyInt_FromLong PyLong_FromLong
28 28 #define PyInt_FromSsize_t PyLong_FromSsize_t
29 29 #define PyInt_AsLong PyLong_AsLong
30 30 #endif
31 31
32 32 typedef struct indexObjectStruct indexObject;
33 33
34 34 typedef struct {
35 35 int children[16];
36 36 } nodetreenode;
37 37
38 38 /*
39 39 * A base-16 trie for fast node->rev mapping.
40 40 *
41 41 * Positive value is index of the next node in the trie
42 42 * Negative value is a leaf: -(rev + 2)
43 43 * Zero is empty
44 44 */
45 45 typedef struct {
46 46 indexObject *index;
47 47 nodetreenode *nodes;
48 48 unsigned length; /* # nodes in use */
49 49 unsigned capacity; /* # nodes allocated */
50 50 int depth; /* maximum depth of tree */
51 51 int splits; /* # splits performed */
52 52 } nodetree;
53 53
54 54 typedef struct {
55 55 PyObject_HEAD /* ; */
56 56 nodetree nt;
57 57 } nodetreeObject;
58 58
59 59 /*
60 60 * This class has two behaviors.
61 61 *
62 62 * When used in a list-like way (with integer keys), we decode an
63 63 * entry in a RevlogNG index file on demand. Our last entry is a
64 64 * sentinel, always a nullid. We have limited support for
65 65 * integer-keyed insert and delete, only at elements right before the
66 66 * sentinel.
67 67 *
68 68 * With string keys, we lazily perform a reverse mapping from node to
69 69 * rev, using a base-16 trie.
70 70 */
71 71 struct indexObjectStruct {
72 72 PyObject_HEAD
73 73 /* Type-specific fields go here. */
74 74 PyObject *data; /* raw bytes of index */
75 75 Py_buffer buf; /* buffer of data */
76 76 PyObject **cache; /* cached tuples */
77 77 const char **offsets; /* populated on demand */
78 78 Py_ssize_t raw_length; /* original number of elements */
79 79 Py_ssize_t length; /* current number of elements */
80 80 PyObject *added; /* populated on demand */
81 81 PyObject *headrevs; /* cache, invalidated on changes */
82 82 PyObject *filteredrevs; /* filtered revs set */
83 83 nodetree nt; /* base-16 trie */
84 84 int ntinitialized; /* 0 or 1 */
85 85 int ntrev; /* last rev scanned */
86 86 int ntlookups; /* # lookups */
87 87 int ntmisses; /* # lookups that miss the cache */
88 88 int inlined;
89 89 };
90 90
91 91 static Py_ssize_t index_length(const indexObject *self)
92 92 {
93 93 if (self->added == NULL)
94 94 return self->length;
95 95 return self->length + PyList_GET_SIZE(self->added);
96 96 }
97 97
98 98 static PyObject *nullentry = NULL;
99 99 static const char nullid[20] = {0};
100 100
101 101 static Py_ssize_t inline_scan(indexObject *self, const char **offsets);
102 102
103 103 #if LONG_MAX == 0x7fffffffL
104 104 static const char *const tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
105 105 #else
106 106 static const char *const tuple_format = PY23("kiiiiiis#", "kiiiiiiy#");
107 107 #endif
108 108
109 109 /* A RevlogNG v1 index entry is 64 bytes long. */
110 110 static const long v1_hdrsize = 64;
111 111
112 112 static void raise_revlog_error(void)
113 113 {
114 114 PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
115 115
116 116 mod = PyImport_ImportModule("mercurial.error");
117 117 if (mod == NULL) {
118 118 goto cleanup;
119 119 }
120 120
121 121 dict = PyModule_GetDict(mod);
122 122 if (dict == NULL) {
123 123 goto cleanup;
124 124 }
125 125 Py_INCREF(dict);
126 126
127 127 errclass = PyDict_GetItemString(dict, "RevlogError");
128 128 if (errclass == NULL) {
129 129 PyErr_SetString(PyExc_SystemError,
130 130 "could not find RevlogError");
131 131 goto cleanup;
132 132 }
133 133
134 134 /* value of exception is ignored by callers */
135 135 PyErr_SetString(errclass, "RevlogError");
136 136
137 137 cleanup:
138 138 Py_XDECREF(dict);
139 139 Py_XDECREF(mod);
140 140 }
141 141
142 142 /*
143 143 * Return a pointer to the beginning of a RevlogNG record.
144 144 */
145 145 static const char *index_deref(indexObject *self, Py_ssize_t pos)
146 146 {
147 147 if (self->inlined && pos > 0) {
148 148 if (self->offsets == NULL) {
149 149 self->offsets = PyMem_Malloc(self->raw_length *
150 150 sizeof(*self->offsets));
151 151 if (self->offsets == NULL)
152 152 return (const char *)PyErr_NoMemory();
153 153 inline_scan(self, self->offsets);
154 154 }
155 155 return self->offsets[pos];
156 156 }
157 157
158 158 return (const char *)(self->buf.buf) + pos * v1_hdrsize;
159 159 }
160 160
161 161 static inline int index_get_parents(indexObject *self, Py_ssize_t rev, int *ps,
162 162 int maxrev)
163 163 {
164 164 if (rev >= self->length) {
165 165 long tmp;
166 166 PyObject *tuple =
167 167 PyList_GET_ITEM(self->added, rev - self->length);
168 168 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 5), &tmp)) {
169 169 return -1;
170 170 }
171 171 ps[0] = (int)tmp;
172 172 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 6), &tmp)) {
173 173 return -1;
174 174 }
175 175 ps[1] = (int)tmp;
176 176 } else {
177 177 const char *data = index_deref(self, rev);
178 178 ps[0] = getbe32(data + 24);
179 179 ps[1] = getbe32(data + 28);
180 180 }
181 181 /* If index file is corrupted, ps[] may point to invalid revisions. So
182 182 * there is a risk of buffer overflow to trust them unconditionally. */
183 183 if (ps[0] > maxrev || ps[1] > maxrev) {
184 184 PyErr_SetString(PyExc_ValueError, "parent out of range");
185 185 return -1;
186 186 }
187 187 return 0;
188 188 }
189 189
190 190 static inline int64_t index_get_start(indexObject *self, Py_ssize_t rev)
191 191 {
192 192 uint64_t offset;
193 193 if (rev >= self->length) {
194 194 PyObject *tuple;
195 195 PyObject *pylong;
196 196 PY_LONG_LONG tmp;
197 197 tuple = PyList_GET_ITEM(self->added, rev - self->length);
198 198 pylong = PyTuple_GET_ITEM(tuple, 0);
199 199 tmp = PyLong_AsLongLong(pylong);
200 200 if (tmp == -1 && PyErr_Occurred()) {
201 201 return -1;
202 202 }
203 203 if (tmp < 0) {
204 204 PyErr_Format(PyExc_OverflowError,
205 205 "revlog entry size out of bound (%lld)",
206 206 (long long)tmp);
207 207 return -1;
208 208 }
209 209 offset = (uint64_t)tmp;
210 210 } else {
211 211 const char *data = index_deref(self, rev);
212 212 offset = getbe32(data + 4);
213 213 if (rev == 0) {
214 214 /* mask out version number for the first entry */
215 215 offset &= 0xFFFF;
216 216 } else {
217 217 uint32_t offset_high = getbe32(data);
218 218 offset |= ((uint64_t)offset_high) << 32;
219 219 }
220 220 }
221 221 return (int64_t)(offset >> 16);
222 222 }
223 223
224 224 static inline int index_get_length(indexObject *self, Py_ssize_t rev)
225 225 {
226 226 if (rev >= self->length) {
227 227 PyObject *tuple;
228 228 PyObject *pylong;
229 229 long ret;
230 230 tuple = PyList_GET_ITEM(self->added, rev - self->length);
231 231 pylong = PyTuple_GET_ITEM(tuple, 1);
232 232 ret = PyInt_AsLong(pylong);
233 233 if (ret == -1 && PyErr_Occurred()) {
234 234 return -1;
235 235 }
236 236 if (ret < 0 || ret > (long)INT_MAX) {
237 237 PyErr_Format(PyExc_OverflowError,
238 238 "revlog entry size out of bound (%ld)",
239 239 ret);
240 240 return -1;
241 241 }
242 242 return (int)ret;
243 243 } else {
244 244 const char *data = index_deref(self, rev);
245 245 int tmp = (int)getbe32(data + 8);
246 246 if (tmp < 0) {
247 247 PyErr_Format(PyExc_OverflowError,
248 248 "revlog entry size out of bound (%d)",
249 249 tmp);
250 250 return -1;
251 251 }
252 252 return tmp;
253 253 }
254 254 }
255 255
256 256 /*
257 257 * RevlogNG format (all in big endian, data may be inlined):
258 258 * 6 bytes: offset
259 259 * 2 bytes: flags
260 260 * 4 bytes: compressed length
261 261 * 4 bytes: uncompressed length
262 262 * 4 bytes: base revision
263 263 * 4 bytes: link revision
264 264 * 4 bytes: parent 1 revision
265 265 * 4 bytes: parent 2 revision
266 266 * 32 bytes: nodeid (only 20 bytes used)
267 267 */
268 268 static PyObject *index_get(indexObject *self, Py_ssize_t pos)
269 269 {
270 270 uint64_t offset_flags;
271 271 int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
272 272 const char *c_node_id;
273 273 const char *data;
274 274 Py_ssize_t length = index_length(self);
275 275 PyObject *entry;
276 276
277 277 if (pos == -1) {
278 278 Py_INCREF(nullentry);
279 279 return nullentry;
280 280 }
281 281
282 282 if (pos < 0 || pos >= length) {
283 283 PyErr_SetString(PyExc_IndexError, "revlog index out of range");
284 284 return NULL;
285 285 }
286 286
287 287 if (pos >= self->length) {
288 288 PyObject *obj;
289 289 obj = PyList_GET_ITEM(self->added, pos - self->length);
290 290 Py_INCREF(obj);
291 291 return obj;
292 292 }
293 293
294 294 if (self->cache) {
295 295 if (self->cache[pos]) {
296 296 Py_INCREF(self->cache[pos]);
297 297 return self->cache[pos];
298 298 }
299 299 } else {
300 300 self->cache = calloc(self->raw_length, sizeof(PyObject *));
301 301 if (self->cache == NULL)
302 302 return PyErr_NoMemory();
303 303 }
304 304
305 305 data = index_deref(self, pos);
306 306 if (data == NULL)
307 307 return NULL;
308 308
309 309 offset_flags = getbe32(data + 4);
310 310 if (pos == 0) /* mask out version number for the first entry */
311 311 offset_flags &= 0xFFFF;
312 312 else {
313 313 uint32_t offset_high = getbe32(data);
314 314 offset_flags |= ((uint64_t)offset_high) << 32;
315 315 }
316 316
317 317 comp_len = getbe32(data + 8);
318 318 uncomp_len = getbe32(data + 12);
319 319 base_rev = getbe32(data + 16);
320 320 link_rev = getbe32(data + 20);
321 321 parent_1 = getbe32(data + 24);
322 322 parent_2 = getbe32(data + 28);
323 323 c_node_id = data + 32;
324 324
325 325 entry = Py_BuildValue(tuple_format, offset_flags, comp_len, uncomp_len,
326 326 base_rev, link_rev, parent_1, parent_2, c_node_id,
327 327 20);
328 328
329 329 if (entry) {
330 330 PyObject_GC_UnTrack(entry);
331 331 Py_INCREF(entry);
332 332 }
333 333
334 334 self->cache[pos] = entry;
335 335
336 336 return entry;
337 337 }
338 338
339 339 /*
340 340 * Return the 20-byte SHA of the node corresponding to the given rev.
341 341 */
342 342 static const char *index_node(indexObject *self, Py_ssize_t pos)
343 343 {
344 344 Py_ssize_t length = index_length(self);
345 345 const char *data;
346 346
347 347 if (pos == -1)
348 348 return nullid;
349 349
350 350 if (pos >= length)
351 351 return NULL;
352 352
353 353 if (pos >= self->length) {
354 354 PyObject *tuple, *str;
355 355 tuple = PyList_GET_ITEM(self->added, pos - self->length);
356 356 str = PyTuple_GetItem(tuple, 7);
357 357 return str ? PyBytes_AS_STRING(str) : NULL;
358 358 }
359 359
360 360 data = index_deref(self, pos);
361 361 return data ? data + 32 : NULL;
362 362 }
363 363
364 364 /*
365 365 * Return the 20-byte SHA of the node corresponding to the given rev. The
366 366 * rev is assumed to be existing. If not, an exception is set.
367 367 */
368 368 static const char *index_node_existing(indexObject *self, Py_ssize_t pos)
369 369 {
370 370 const char *node = index_node(self, pos);
371 371 if (node == NULL) {
372 372 PyErr_Format(PyExc_IndexError, "could not access rev %d",
373 373 (int)pos);
374 374 }
375 375 return node;
376 376 }
377 377
378 378 static int nt_insert(nodetree *self, const char *node, int rev);
379 379
380 380 static int node_check(PyObject *obj, char **node)
381 381 {
382 382 Py_ssize_t nodelen;
383 383 if (PyBytes_AsStringAndSize(obj, node, &nodelen) == -1)
384 384 return -1;
385 385 if (nodelen == 20)
386 386 return 0;
387 387 PyErr_SetString(PyExc_ValueError, "20-byte hash required");
388 388 return -1;
389 389 }
390 390
391 391 static PyObject *index_append(indexObject *self, PyObject *obj)
392 392 {
393 393 char *node;
394 394 Py_ssize_t len;
395 395
396 396 if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj) != 8) {
397 397 PyErr_SetString(PyExc_TypeError, "8-tuple required");
398 398 return NULL;
399 399 }
400 400
401 401 if (node_check(PyTuple_GET_ITEM(obj, 7), &node) == -1)
402 402 return NULL;
403 403
404 404 len = index_length(self);
405 405
406 406 if (self->added == NULL) {
407 407 self->added = PyList_New(0);
408 408 if (self->added == NULL)
409 409 return NULL;
410 410 }
411 411
412 412 if (PyList_Append(self->added, obj) == -1)
413 413 return NULL;
414 414
415 415 if (self->ntinitialized)
416 416 nt_insert(&self->nt, node, (int)len);
417 417
418 418 Py_CLEAR(self->headrevs);
419 419 Py_RETURN_NONE;
420 420 }
421 421
422 422 static PyObject *index_stats(indexObject *self)
423 423 {
424 424 PyObject *obj = PyDict_New();
425 425 PyObject *s = NULL;
426 426 PyObject *t = NULL;
427 427
428 428 if (obj == NULL)
429 429 return NULL;
430 430
431 431 #define istat(__n, __d) \
432 432 do { \
433 433 s = PyBytes_FromString(__d); \
434 434 t = PyInt_FromSsize_t(self->__n); \
435 435 if (!s || !t) \
436 436 goto bail; \
437 437 if (PyDict_SetItem(obj, s, t) == -1) \
438 438 goto bail; \
439 439 Py_CLEAR(s); \
440 440 Py_CLEAR(t); \
441 441 } while (0)
442 442
443 443 if (self->added) {
444 444 Py_ssize_t len = PyList_GET_SIZE(self->added);
445 445 s = PyBytes_FromString("index entries added");
446 446 t = PyInt_FromSsize_t(len);
447 447 if (!s || !t)
448 448 goto bail;
449 449 if (PyDict_SetItem(obj, s, t) == -1)
450 450 goto bail;
451 451 Py_CLEAR(s);
452 452 Py_CLEAR(t);
453 453 }
454 454
455 455 if (self->raw_length != self->length)
456 456 istat(raw_length, "revs on disk");
457 457 istat(length, "revs in memory");
458 458 istat(ntlookups, "node trie lookups");
459 459 istat(ntmisses, "node trie misses");
460 460 istat(ntrev, "node trie last rev scanned");
461 461 if (self->ntinitialized) {
462 462 istat(nt.capacity, "node trie capacity");
463 463 istat(nt.depth, "node trie depth");
464 464 istat(nt.length, "node trie count");
465 465 istat(nt.splits, "node trie splits");
466 466 }
467 467
468 468 #undef istat
469 469
470 470 return obj;
471 471
472 472 bail:
473 473 Py_XDECREF(obj);
474 474 Py_XDECREF(s);
475 475 Py_XDECREF(t);
476 476 return NULL;
477 477 }
478 478
479 479 /*
480 480 * When we cache a list, we want to be sure the caller can't mutate
481 481 * the cached copy.
482 482 */
483 483 static PyObject *list_copy(PyObject *list)
484 484 {
485 485 Py_ssize_t len = PyList_GET_SIZE(list);
486 486 PyObject *newlist = PyList_New(len);
487 487 Py_ssize_t i;
488 488
489 489 if (newlist == NULL)
490 490 return NULL;
491 491
492 492 for (i = 0; i < len; i++) {
493 493 PyObject *obj = PyList_GET_ITEM(list, i);
494 494 Py_INCREF(obj);
495 495 PyList_SET_ITEM(newlist, i, obj);
496 496 }
497 497
498 498 return newlist;
499 499 }
500 500
501 501 static int check_filter(PyObject *filter, Py_ssize_t arg)
502 502 {
503 503 if (filter) {
504 504 PyObject *arglist, *result;
505 505 int isfiltered;
506 506
507 507 arglist = Py_BuildValue("(n)", arg);
508 508 if (!arglist) {
509 509 return -1;
510 510 }
511 511
512 512 result = PyEval_CallObject(filter, arglist);
513 513 Py_DECREF(arglist);
514 514 if (!result) {
515 515 return -1;
516 516 }
517 517
518 518 /* PyObject_IsTrue returns 1 if true, 0 if false, -1 if error,
519 519 * same as this function, so we can just return it directly.*/
520 520 isfiltered = PyObject_IsTrue(result);
521 521 Py_DECREF(result);
522 522 return isfiltered;
523 523 } else {
524 524 return 0;
525 525 }
526 526 }
527 527
528 528 static Py_ssize_t add_roots_get_min(indexObject *self, PyObject *list,
529 529 Py_ssize_t marker, char *phases)
530 530 {
531 531 PyObject *iter = NULL;
532 532 PyObject *iter_item = NULL;
533 533 Py_ssize_t min_idx = index_length(self) + 2;
534 534 long iter_item_long;
535 535
536 536 if (PyList_GET_SIZE(list) != 0) {
537 537 iter = PyObject_GetIter(list);
538 538 if (iter == NULL)
539 539 return -2;
540 540 while ((iter_item = PyIter_Next(iter))) {
541 541 if (!pylong_to_long(iter_item, &iter_item_long)) {
542 542 Py_DECREF(iter_item);
543 543 return -2;
544 544 }
545 545 Py_DECREF(iter_item);
546 546 if (iter_item_long < min_idx)
547 547 min_idx = iter_item_long;
548 548 phases[iter_item_long] = (char)marker;
549 549 }
550 550 Py_DECREF(iter);
551 551 }
552 552
553 553 return min_idx;
554 554 }
555 555
556 556 static inline void set_phase_from_parents(char *phases, int parent_1,
557 557 int parent_2, Py_ssize_t i)
558 558 {
559 559 if (parent_1 >= 0 && phases[parent_1] > phases[i])
560 560 phases[i] = phases[parent_1];
561 561 if (parent_2 >= 0 && phases[parent_2] > phases[i])
562 562 phases[i] = phases[parent_2];
563 563 }
564 564
565 565 static PyObject *reachableroots2(indexObject *self, PyObject *args)
566 566 {
567 567
568 568 /* Input */
569 569 long minroot;
570 570 PyObject *includepatharg = NULL;
571 571 int includepath = 0;
572 572 /* heads and roots are lists */
573 573 PyObject *heads = NULL;
574 574 PyObject *roots = NULL;
575 575 PyObject *reachable = NULL;
576 576
577 577 PyObject *val;
578 578 Py_ssize_t len = index_length(self);
579 579 long revnum;
580 580 Py_ssize_t k;
581 581 Py_ssize_t i;
582 582 Py_ssize_t l;
583 583 int r;
584 584 int parents[2];
585 585
586 586 /* Internal data structure:
587 587 * tovisit: array of length len+1 (all revs + nullrev), filled upto
588 588 * lentovisit
589 589 *
590 590 * revstates: array of length len+1 (all revs + nullrev) */
591 591 int *tovisit = NULL;
592 592 long lentovisit = 0;
593 593 enum { RS_SEEN = 1, RS_ROOT = 2, RS_REACHABLE = 4 };
594 594 char *revstates = NULL;
595 595
596 596 /* Get arguments */
597 597 if (!PyArg_ParseTuple(args, "lO!O!O!", &minroot, &PyList_Type, &heads,
598 598 &PyList_Type, &roots, &PyBool_Type,
599 599 &includepatharg))
600 600 goto bail;
601 601
602 602 if (includepatharg == Py_True)
603 603 includepath = 1;
604 604
605 605 /* Initialize return set */
606 606 reachable = PyList_New(0);
607 607 if (reachable == NULL)
608 608 goto bail;
609 609
610 610 /* Initialize internal datastructures */
611 611 tovisit = (int *)malloc((len + 1) * sizeof(int));
612 612 if (tovisit == NULL) {
613 613 PyErr_NoMemory();
614 614 goto bail;
615 615 }
616 616
617 617 revstates = (char *)calloc(len + 1, 1);
618 618 if (revstates == NULL) {
619 619 PyErr_NoMemory();
620 620 goto bail;
621 621 }
622 622
623 623 l = PyList_GET_SIZE(roots);
624 624 for (i = 0; i < l; i++) {
625 625 revnum = PyInt_AsLong(PyList_GET_ITEM(roots, i));
626 626 if (revnum == -1 && PyErr_Occurred())
627 627 goto bail;
628 628 /* If root is out of range, e.g. wdir(), it must be unreachable
629 629 * from heads. So we can just ignore it. */
630 630 if (revnum + 1 < 0 || revnum + 1 >= len + 1)
631 631 continue;
632 632 revstates[revnum + 1] |= RS_ROOT;
633 633 }
634 634
635 635 /* Populate tovisit with all the heads */
636 636 l = PyList_GET_SIZE(heads);
637 637 for (i = 0; i < l; i++) {
638 638 revnum = PyInt_AsLong(PyList_GET_ITEM(heads, i));
639 639 if (revnum == -1 && PyErr_Occurred())
640 640 goto bail;
641 641 if (revnum + 1 < 0 || revnum + 1 >= len + 1) {
642 642 PyErr_SetString(PyExc_IndexError, "head out of range");
643 643 goto bail;
644 644 }
645 645 if (!(revstates[revnum + 1] & RS_SEEN)) {
646 646 tovisit[lentovisit++] = (int)revnum;
647 647 revstates[revnum + 1] |= RS_SEEN;
648 648 }
649 649 }
650 650
651 651 /* Visit the tovisit list and find the reachable roots */
652 652 k = 0;
653 653 while (k < lentovisit) {
654 654 /* Add the node to reachable if it is a root*/
655 655 revnum = tovisit[k++];
656 656 if (revstates[revnum + 1] & RS_ROOT) {
657 657 revstates[revnum + 1] |= RS_REACHABLE;
658 658 val = PyInt_FromLong(revnum);
659 659 if (val == NULL)
660 660 goto bail;
661 661 r = PyList_Append(reachable, val);
662 662 Py_DECREF(val);
663 663 if (r < 0)
664 664 goto bail;
665 665 if (includepath == 0)
666 666 continue;
667 667 }
668 668
669 669 /* Add its parents to the list of nodes to visit */
670 670 if (revnum == -1)
671 671 continue;
672 672 r = index_get_parents(self, revnum, parents, (int)len - 1);
673 673 if (r < 0)
674 674 goto bail;
675 675 for (i = 0; i < 2; i++) {
676 676 if (!(revstates[parents[i] + 1] & RS_SEEN) &&
677 677 parents[i] >= minroot) {
678 678 tovisit[lentovisit++] = parents[i];
679 679 revstates[parents[i] + 1] |= RS_SEEN;
680 680 }
681 681 }
682 682 }
683 683
684 684 /* Find all the nodes in between the roots we found and the heads
685 685 * and add them to the reachable set */
686 686 if (includepath == 1) {
687 687 long minidx = minroot;
688 688 if (minidx < 0)
689 689 minidx = 0;
690 690 for (i = minidx; i < len; i++) {
691 691 if (!(revstates[i + 1] & RS_SEEN))
692 692 continue;
693 693 r = index_get_parents(self, i, parents, (int)len - 1);
694 694 /* Corrupted index file, error is set from
695 695 * index_get_parents */
696 696 if (r < 0)
697 697 goto bail;
698 698 if (((revstates[parents[0] + 1] |
699 699 revstates[parents[1] + 1]) &
700 700 RS_REACHABLE) &&
701 701 !(revstates[i + 1] & RS_REACHABLE)) {
702 702 revstates[i + 1] |= RS_REACHABLE;
703 703 val = PyInt_FromSsize_t(i);
704 704 if (val == NULL)
705 705 goto bail;
706 706 r = PyList_Append(reachable, val);
707 707 Py_DECREF(val);
708 708 if (r < 0)
709 709 goto bail;
710 710 }
711 711 }
712 712 }
713 713
714 714 free(revstates);
715 715 free(tovisit);
716 716 return reachable;
717 717 bail:
718 718 Py_XDECREF(reachable);
719 719 free(revstates);
720 720 free(tovisit);
721 721 return NULL;
722 722 }
723 723
724 724 static PyObject *compute_phases_map_sets(indexObject *self, PyObject *args)
725 725 {
726 726 PyObject *roots = Py_None;
727 727 PyObject *ret = NULL;
728 728 PyObject *phasessize = NULL;
729 729 PyObject *phaseroots = NULL;
730 730 PyObject *phaseset = NULL;
731 731 PyObject *phasessetlist = NULL;
732 732 PyObject *rev = NULL;
733 733 Py_ssize_t len = index_length(self);
734 734 Py_ssize_t numphase = 0;
735 735 Py_ssize_t minrevallphases = 0;
736 736 Py_ssize_t minrevphase = 0;
737 737 Py_ssize_t i = 0;
738 738 char *phases = NULL;
739 739 long phase;
740 740
741 741 if (!PyArg_ParseTuple(args, "O", &roots))
742 742 goto done;
743 743 if (roots == NULL || !PyList_Check(roots)) {
744 744 PyErr_SetString(PyExc_TypeError, "roots must be a list");
745 745 goto done;
746 746 }
747 747
748 748 phases = calloc(
749 749 len, 1); /* phase per rev: {0: public, 1: draft, 2: secret} */
750 750 if (phases == NULL) {
751 751 PyErr_NoMemory();
752 752 goto done;
753 753 }
754 754 /* Put the phase information of all the roots in phases */
755 755 numphase = PyList_GET_SIZE(roots) + 1;
756 756 minrevallphases = len + 1;
757 757 phasessetlist = PyList_New(numphase);
758 758 if (phasessetlist == NULL)
759 759 goto done;
760 760
761 761 PyList_SET_ITEM(phasessetlist, 0, Py_None);
762 762 Py_INCREF(Py_None);
763 763
764 764 for (i = 0; i < numphase - 1; i++) {
765 765 phaseroots = PyList_GET_ITEM(roots, i);
766 766 phaseset = PySet_New(NULL);
767 767 if (phaseset == NULL)
768 768 goto release;
769 769 PyList_SET_ITEM(phasessetlist, i + 1, phaseset);
770 770 if (!PyList_Check(phaseroots)) {
771 771 PyErr_SetString(PyExc_TypeError,
772 772 "roots item must be a list");
773 773 goto release;
774 774 }
775 775 minrevphase =
776 776 add_roots_get_min(self, phaseroots, i + 1, phases);
777 777 if (minrevphase == -2) /* Error from add_roots_get_min */
778 778 goto release;
779 779 minrevallphases = MIN(minrevallphases, minrevphase);
780 780 }
781 781 /* Propagate the phase information from the roots to the revs */
782 782 if (minrevallphases != -1) {
783 783 int parents[2];
784 784 for (i = minrevallphases; i < len; i++) {
785 785 if (index_get_parents(self, i, parents, (int)len - 1) <
786 786 0)
787 787 goto release;
788 788 set_phase_from_parents(phases, parents[0], parents[1],
789 789 i);
790 790 }
791 791 }
792 792 /* Transform phase list to a python list */
793 793 phasessize = PyInt_FromSsize_t(len);
794 794 if (phasessize == NULL)
795 795 goto release;
796 796 for (i = 0; i < len; i++) {
797 797 phase = phases[i];
798 798 /* We only store the sets of phase for non public phase, the
799 799 * public phase is computed as a difference */
800 800 if (phase != 0) {
801 801 phaseset = PyList_GET_ITEM(phasessetlist, phase);
802 802 rev = PyInt_FromSsize_t(i);
803 803 if (rev == NULL)
804 804 goto release;
805 805 PySet_Add(phaseset, rev);
806 806 Py_XDECREF(rev);
807 807 }
808 808 }
809 809 ret = PyTuple_Pack(2, phasessize, phasessetlist);
810 810
811 811 release:
812 812 Py_XDECREF(phasessize);
813 813 Py_XDECREF(phasessetlist);
814 814 done:
815 815 free(phases);
816 816 return ret;
817 817 }
818 818
819 819 static PyObject *index_headrevs(indexObject *self, PyObject *args)
820 820 {
821 821 Py_ssize_t i, j, len;
822 822 char *nothead = NULL;
823 823 PyObject *heads = NULL;
824 824 PyObject *filter = NULL;
825 825 PyObject *filteredrevs = Py_None;
826 826
827 827 if (!PyArg_ParseTuple(args, "|O", &filteredrevs)) {
828 828 return NULL;
829 829 }
830 830
831 831 if (self->headrevs && filteredrevs == self->filteredrevs)
832 832 return list_copy(self->headrevs);
833 833
834 834 Py_DECREF(self->filteredrevs);
835 835 self->filteredrevs = filteredrevs;
836 836 Py_INCREF(filteredrevs);
837 837
838 838 if (filteredrevs != Py_None) {
839 839 filter = PyObject_GetAttrString(filteredrevs, "__contains__");
840 840 if (!filter) {
841 841 PyErr_SetString(
842 842 PyExc_TypeError,
843 843 "filteredrevs has no attribute __contains__");
844 844 goto bail;
845 845 }
846 846 }
847 847
848 848 len = index_length(self);
849 849 heads = PyList_New(0);
850 850 if (heads == NULL)
851 851 goto bail;
852 852 if (len == 0) {
853 853 PyObject *nullid = PyInt_FromLong(-1);
854 854 if (nullid == NULL || PyList_Append(heads, nullid) == -1) {
855 855 Py_XDECREF(nullid);
856 856 goto bail;
857 857 }
858 858 goto done;
859 859 }
860 860
861 861 nothead = calloc(len, 1);
862 862 if (nothead == NULL) {
863 863 PyErr_NoMemory();
864 864 goto bail;
865 865 }
866 866
867 867 for (i = len - 1; i >= 0; i--) {
868 868 int isfiltered;
869 869 int parents[2];
870 870
871 871 /* If nothead[i] == 1, it means we've seen an unfiltered child
872 872 * of this node already, and therefore this node is not
873 873 * filtered. So we can skip the expensive check_filter step.
874 874 */
875 875 if (nothead[i] != 1) {
876 876 isfiltered = check_filter(filter, i);
877 877 if (isfiltered == -1) {
878 878 PyErr_SetString(PyExc_TypeError,
879 879 "unable to check filter");
880 880 goto bail;
881 881 }
882 882
883 883 if (isfiltered) {
884 884 nothead[i] = 1;
885 885 continue;
886 886 }
887 887 }
888 888
889 889 if (index_get_parents(self, i, parents, (int)len - 1) < 0)
890 890 goto bail;
891 891 for (j = 0; j < 2; j++) {
892 892 if (parents[j] >= 0)
893 893 nothead[parents[j]] = 1;
894 894 }
895 895 }
896 896
897 897 for (i = 0; i < len; i++) {
898 898 PyObject *head;
899 899
900 900 if (nothead[i])
901 901 continue;
902 902 head = PyInt_FromSsize_t(i);
903 903 if (head == NULL || PyList_Append(heads, head) == -1) {
904 904 Py_XDECREF(head);
905 905 goto bail;
906 906 }
907 907 }
908 908
909 909 done:
910 910 self->headrevs = heads;
911 911 Py_XDECREF(filter);
912 912 free(nothead);
913 913 return list_copy(self->headrevs);
914 914 bail:
915 915 Py_XDECREF(filter);
916 916 Py_XDECREF(heads);
917 917 free(nothead);
918 918 return NULL;
919 919 }
920 920
921 921 /**
922 922 * Obtain the base revision index entry.
923 923 *
924 924 * Callers must ensure that rev >= 0 or illegal memory access may occur.
925 925 */
926 926 static inline int index_baserev(indexObject *self, int rev)
927 927 {
928 928 const char *data;
929 929
930 930 if (rev >= self->length) {
931 931 PyObject *tuple =
932 932 PyList_GET_ITEM(self->added, rev - self->length);
933 933 long ret;
934 934 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 3), &ret)) {
935 935 return -2;
936 936 }
937 937 return (int)ret;
938 938 } else {
939 939 data = index_deref(self, rev);
940 940 if (data == NULL) {
941 941 return -2;
942 942 }
943 943
944 944 return getbe32(data + 16);
945 945 }
946 946 }
947 947
948 948 static PyObject *index_deltachain(indexObject *self, PyObject *args)
949 949 {
950 950 int rev, generaldelta;
951 951 PyObject *stoparg;
952 952 int stoprev, iterrev, baserev = -1;
953 953 int stopped;
954 954 PyObject *chain = NULL, *result = NULL;
955 955 const Py_ssize_t length = index_length(self);
956 956
957 957 if (!PyArg_ParseTuple(args, "iOi", &rev, &stoparg, &generaldelta)) {
958 958 return NULL;
959 959 }
960 960
961 961 if (PyInt_Check(stoparg)) {
962 962 stoprev = (int)PyInt_AsLong(stoparg);
963 963 if (stoprev == -1 && PyErr_Occurred()) {
964 964 return NULL;
965 965 }
966 966 } else if (stoparg == Py_None) {
967 967 stoprev = -2;
968 968 } else {
969 969 PyErr_SetString(PyExc_ValueError,
970 970 "stoprev must be integer or None");
971 971 return NULL;
972 972 }
973 973
974 974 if (rev < 0 || rev >= length) {
975 975 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
976 976 return NULL;
977 977 }
978 978
979 979 chain = PyList_New(0);
980 980 if (chain == NULL) {
981 981 return NULL;
982 982 }
983 983
984 984 baserev = index_baserev(self, rev);
985 985
986 986 /* This should never happen. */
987 987 if (baserev <= -2) {
988 988 /* Error should be set by index_deref() */
989 989 assert(PyErr_Occurred());
990 990 goto bail;
991 991 }
992 992
993 993 iterrev = rev;
994 994
995 995 while (iterrev != baserev && iterrev != stoprev) {
996 996 PyObject *value = PyInt_FromLong(iterrev);
997 997 if (value == NULL) {
998 998 goto bail;
999 999 }
1000 1000 if (PyList_Append(chain, value)) {
1001 1001 Py_DECREF(value);
1002 1002 goto bail;
1003 1003 }
1004 1004 Py_DECREF(value);
1005 1005
1006 1006 if (generaldelta) {
1007 1007 iterrev = baserev;
1008 1008 } else {
1009 1009 iterrev--;
1010 1010 }
1011 1011
1012 1012 if (iterrev < 0) {
1013 1013 break;
1014 1014 }
1015 1015
1016 1016 if (iterrev >= length) {
1017 1017 PyErr_SetString(PyExc_IndexError,
1018 1018 "revision outside index");
1019 1019 return NULL;
1020 1020 }
1021 1021
1022 1022 baserev = index_baserev(self, iterrev);
1023 1023
1024 1024 /* This should never happen. */
1025 1025 if (baserev <= -2) {
1026 1026 /* Error should be set by index_deref() */
1027 1027 assert(PyErr_Occurred());
1028 1028 goto bail;
1029 1029 }
1030 1030 }
1031 1031
1032 1032 if (iterrev == stoprev) {
1033 1033 stopped = 1;
1034 1034 } else {
1035 1035 PyObject *value = PyInt_FromLong(iterrev);
1036 1036 if (value == NULL) {
1037 1037 goto bail;
1038 1038 }
1039 1039 if (PyList_Append(chain, value)) {
1040 1040 Py_DECREF(value);
1041 1041 goto bail;
1042 1042 }
1043 1043 Py_DECREF(value);
1044 1044
1045 1045 stopped = 0;
1046 1046 }
1047 1047
1048 1048 if (PyList_Reverse(chain)) {
1049 1049 goto bail;
1050 1050 }
1051 1051
1052 1052 result = Py_BuildValue("OO", chain, stopped ? Py_True : Py_False);
1053 1053 Py_DECREF(chain);
1054 1054 return result;
1055 1055
1056 1056 bail:
1057 1057 Py_DECREF(chain);
1058 1058 return NULL;
1059 1059 }
1060 1060
1061 1061 static inline int64_t
1062 1062 index_segment_span(indexObject *self, Py_ssize_t start_rev, Py_ssize_t end_rev)
1063 1063 {
1064 1064 int64_t start_offset;
1065 1065 int64_t end_offset;
1066 1066 int end_size;
1067 1067 start_offset = index_get_start(self, start_rev);
1068 1068 if (start_offset < 0) {
1069 1069 return -1;
1070 1070 }
1071 1071 end_offset = index_get_start(self, end_rev);
1072 1072 if (end_offset < 0) {
1073 1073 return -1;
1074 1074 }
1075 1075 end_size = index_get_length(self, end_rev);
1076 1076 if (end_size < 0) {
1077 1077 return -1;
1078 1078 }
1079 1079 if (end_offset < start_offset) {
1080 1080 PyErr_Format(PyExc_ValueError,
1081 1081 "corrupted revlog index: inconsistent offset "
1082 1082 "between revisions (%zd) and (%zd)",
1083 1083 start_rev, end_rev);
1084 1084 return -1;
1085 1085 }
1086 1086 return (end_offset - start_offset) + (int64_t)end_size;
1087 1087 }
1088 1088
1089 /* returns revs[startidx:endidx] without empty trailing revs */
1089 /* returns endidx so that revs[startidx:endidx] has no empty trailing revs */
1090 1090 static Py_ssize_t trim_endidx(indexObject *self, const Py_ssize_t *revs,
1091 1091 Py_ssize_t startidx, Py_ssize_t endidx)
1092 1092 {
1093 1093 int length;
1094 1094 while (endidx > 1 && endidx > startidx) {
1095 1095 length = index_get_length(self, revs[endidx - 1]);
1096 1096 if (length < 0) {
1097 1097 return -1;
1098 1098 }
1099 1099 if (length != 0) {
1100 1100 break;
1101 1101 }
1102 1102 endidx -= 1;
1103 1103 }
1104 1104 return endidx;
1105 1105 }
1106 1106
1107 1107 struct Gap {
1108 1108 int64_t size;
1109 1109 Py_ssize_t idx;
1110 1110 };
1111 1111
1112 1112 static int gap_compare(const void *left, const void *right)
1113 1113 {
1114 1114 const struct Gap *l_left = ((const struct Gap *)left);
1115 1115 const struct Gap *l_right = ((const struct Gap *)right);
1116 1116 if (l_left->size < l_right->size) {
1117 1117 return -1;
1118 1118 } else if (l_left->size > l_right->size) {
1119 1119 return 1;
1120 1120 }
1121 1121 return 0;
1122 1122 }
1123 1123 static int Py_ssize_t_compare(const void *left, const void *right)
1124 1124 {
1125 1125 const Py_ssize_t l_left = *(const Py_ssize_t *)left;
1126 1126 const Py_ssize_t l_right = *(const Py_ssize_t *)right;
1127 1127 if (l_left < l_right) {
1128 1128 return -1;
1129 1129 } else if (l_left > l_right) {
1130 1130 return 1;
1131 1131 }
1132 1132 return 0;
1133 1133 }
1134 1134
1135 1135 static PyObject *index_slicechunktodensity(indexObject *self, PyObject *args)
1136 1136 {
1137 1137 /* method arguments */
1138 1138 PyObject *list_revs = NULL; /* revisions in the chain */
1139 1139 double targetdensity = 0; /* min density to achieve */
1140 1140 Py_ssize_t mingapsize = 0; /* threshold to ignore gaps */
1141 1141
1142 1142 /* other core variables */
1143 1143 Py_ssize_t idxlen = index_length(self);
1144 1144 Py_ssize_t i; /* used for various iteration */
1145 1145 PyObject *result = NULL; /* the final return of the function */
1146 1146
1147 1147 /* generic information about the delta chain being slice */
1148 1148 Py_ssize_t num_revs = 0; /* size of the full delta chain */
1149 1149 Py_ssize_t *revs = NULL; /* native array of revision in the chain */
1150 1150 int64_t chainpayload = 0; /* sum of all delta in the chain */
1151 1151 int64_t deltachainspan = 0; /* distance from first byte to last byte */
1152 1152
1153 1153 /* variable used for slicing the delta chain */
1154 1154 int64_t readdata = 0; /* amount of data currently planned to be read */
1155 1155 double density = 0; /* ration of payload data compared to read ones */
1156 1156 int64_t previous_end;
1157 1157 struct Gap *gaps = NULL; /* array of notable gap in the chain */
1158 1158 Py_ssize_t num_gaps =
1159 1159 0; /* total number of notable gap recorded so far */
1160 1160 Py_ssize_t *selected_indices = NULL; /* indices of gap skipped over */
1161 1161 Py_ssize_t num_selected = 0; /* number of gaps skipped */
1162 1162 PyObject *chunk = NULL; /* individual slice */
1163 1163 PyObject *allchunks = NULL; /* all slices */
1164 1164 Py_ssize_t previdx;
1165 1165
1166 1166 /* parsing argument */
1167 1167 if (!PyArg_ParseTuple(args, "O!dn", &PyList_Type, &list_revs,
1168 1168 &targetdensity, &mingapsize)) {
1169 1169 goto bail;
1170 1170 }
1171 1171
1172 1172 /* If the delta chain contains a single element, we do not need slicing
1173 1173 */
1174 1174 num_revs = PyList_GET_SIZE(list_revs);
1175 1175 if (num_revs <= 1) {
1176 1176 result = PyTuple_Pack(1, list_revs);
1177 1177 goto done;
1178 1178 }
1179 1179
1180 1180 /* Turn the python list into a native integer array (for efficiency) */
1181 1181 revs = (Py_ssize_t *)calloc(num_revs, sizeof(Py_ssize_t));
1182 1182 if (revs == NULL) {
1183 1183 PyErr_NoMemory();
1184 1184 goto bail;
1185 1185 }
1186 1186 for (i = 0; i < num_revs; i++) {
1187 1187 Py_ssize_t revnum = PyInt_AsLong(PyList_GET_ITEM(list_revs, i));
1188 1188 if (revnum == -1 && PyErr_Occurred()) {
1189 1189 goto bail;
1190 1190 }
1191 1191 if (revnum < 0 || revnum >= idxlen) {
1192 1192 PyErr_SetString(PyExc_IndexError, "index out of range");
1193 1193 goto bail;
1194 1194 }
1195 1195 revs[i] = revnum;
1196 1196 }
1197 1197
1198 1198 /* Compute and check various property of the unsliced delta chain */
1199 1199 deltachainspan = index_segment_span(self, revs[0], revs[num_revs - 1]);
1200 1200 if (deltachainspan < 0) {
1201 1201 goto bail;
1202 1202 }
1203 1203
1204 1204 if (deltachainspan <= mingapsize) {
1205 1205 result = PyTuple_Pack(1, list_revs);
1206 1206 goto done;
1207 1207 }
1208 1208 chainpayload = 0;
1209 1209 for (i = 0; i < num_revs; i++) {
1210 1210 int tmp = index_get_length(self, revs[i]);
1211 1211 if (tmp < 0) {
1212 1212 goto bail;
1213 1213 }
1214 1214 chainpayload += tmp;
1215 1215 }
1216 1216
1217 1217 readdata = deltachainspan;
1218 1218 density = 1.0;
1219 1219
1220 1220 if (0 < deltachainspan) {
1221 1221 density = (double)chainpayload / (double)deltachainspan;
1222 1222 }
1223 1223
1224 1224 if (density >= targetdensity) {
1225 1225 result = PyTuple_Pack(1, list_revs);
1226 1226 goto done;
1227 1227 }
1228 1228
1229 1229 /* if chain is too sparse, look for relevant gaps */
1230 1230 gaps = (struct Gap *)calloc(num_revs, sizeof(struct Gap));
1231 1231 if (gaps == NULL) {
1232 1232 PyErr_NoMemory();
1233 1233 goto bail;
1234 1234 }
1235 1235
1236 1236 previous_end = -1;
1237 1237 for (i = 0; i < num_revs; i++) {
1238 1238 int64_t revstart;
1239 1239 int revsize;
1240 1240 revstart = index_get_start(self, revs[i]);
1241 1241 if (revstart < 0) {
1242 1242 goto bail;
1243 1243 };
1244 1244 revsize = index_get_length(self, revs[i]);
1245 1245 if (revsize < 0) {
1246 1246 goto bail;
1247 1247 };
1248 1248 if (revsize == 0) {
1249 1249 continue;
1250 1250 }
1251 1251 if (previous_end >= 0) {
1252 1252 int64_t gapsize = revstart - previous_end;
1253 1253 if (gapsize > mingapsize) {
1254 1254 gaps[num_gaps].size = gapsize;
1255 1255 gaps[num_gaps].idx = i;
1256 1256 num_gaps += 1;
1257 1257 }
1258 1258 }
1259 1259 previous_end = revstart + revsize;
1260 1260 }
1261 1261 if (num_gaps == 0) {
1262 1262 result = PyTuple_Pack(1, list_revs);
1263 1263 goto done;
1264 1264 }
1265 1265 qsort(gaps, num_gaps, sizeof(struct Gap), &gap_compare);
1266 1266
1267 1267 /* Slice the largest gap first, they improve the density the most */
1268 1268 selected_indices =
1269 1269 (Py_ssize_t *)malloc((num_gaps + 1) * sizeof(Py_ssize_t));
1270 1270 if (selected_indices == NULL) {
1271 1271 PyErr_NoMemory();
1272 1272 goto bail;
1273 1273 }
1274 1274
1275 1275 for (i = num_gaps - 1; i >= 0; i--) {
1276 1276 selected_indices[num_selected] = gaps[i].idx;
1277 1277 readdata -= gaps[i].size;
1278 1278 num_selected += 1;
1279 1279 if (readdata <= 0) {
1280 1280 density = 1.0;
1281 1281 } else {
1282 1282 density = (double)chainpayload / (double)readdata;
1283 1283 }
1284 1284 if (density >= targetdensity) {
1285 1285 break;
1286 1286 }
1287 1287 }
1288 1288 qsort(selected_indices, num_selected, sizeof(Py_ssize_t),
1289 1289 &Py_ssize_t_compare);
1290 1290
1291 1291 /* create the resulting slice */
1292 1292 allchunks = PyList_New(0);
1293 1293 if (allchunks == NULL) {
1294 1294 goto bail;
1295 1295 }
1296 1296 previdx = 0;
1297 1297 selected_indices[num_selected] = num_revs;
1298 1298 for (i = 0; i <= num_selected; i++) {
1299 1299 Py_ssize_t idx = selected_indices[i];
1300 1300 Py_ssize_t endidx = trim_endidx(self, revs, previdx, idx);
1301 1301 if (endidx < 0) {
1302 1302 goto bail;
1303 1303 }
1304 1304 if (previdx < endidx) {
1305 1305 chunk = PyList_GetSlice(list_revs, previdx, endidx);
1306 1306 if (chunk == NULL) {
1307 1307 goto bail;
1308 1308 }
1309 1309 if (PyList_Append(allchunks, chunk) == -1) {
1310 1310 goto bail;
1311 1311 }
1312 1312 Py_DECREF(chunk);
1313 1313 chunk = NULL;
1314 1314 }
1315 1315 previdx = idx;
1316 1316 }
1317 1317 result = allchunks;
1318 1318 goto done;
1319 1319
1320 1320 bail:
1321 1321 Py_XDECREF(allchunks);
1322 1322 Py_XDECREF(chunk);
1323 1323 done:
1324 1324 free(revs);
1325 1325 free(gaps);
1326 1326 free(selected_indices);
1327 1327 return result;
1328 1328 }
1329 1329
1330 1330 static inline int nt_level(const char *node, Py_ssize_t level)
1331 1331 {
1332 1332 int v = node[level >> 1];
1333 1333 if (!(level & 1))
1334 1334 v >>= 4;
1335 1335 return v & 0xf;
1336 1336 }
1337 1337
1338 1338 /*
1339 1339 * Return values:
1340 1340 *
1341 1341 * -4: match is ambiguous (multiple candidates)
1342 1342 * -2: not found
1343 1343 * rest: valid rev
1344 1344 */
1345 1345 static int nt_find(nodetree *self, const char *node, Py_ssize_t nodelen,
1346 1346 int hex)
1347 1347 {
1348 1348 int (*getnybble)(const char *, Py_ssize_t) = hex ? hexdigit : nt_level;
1349 1349 int level, maxlevel, off;
1350 1350
1351 1351 if (nodelen == 20 && node[0] == '\0' && memcmp(node, nullid, 20) == 0)
1352 1352 return -1;
1353 1353
1354 1354 if (hex)
1355 1355 maxlevel = nodelen > 40 ? 40 : (int)nodelen;
1356 1356 else
1357 1357 maxlevel = nodelen > 20 ? 40 : ((int)nodelen * 2);
1358 1358
1359 1359 for (level = off = 0; level < maxlevel; level++) {
1360 1360 int k = getnybble(node, level);
1361 1361 nodetreenode *n = &self->nodes[off];
1362 1362 int v = n->children[k];
1363 1363
1364 1364 if (v < 0) {
1365 1365 const char *n;
1366 1366 Py_ssize_t i;
1367 1367
1368 1368 v = -(v + 2);
1369 1369 n = index_node(self->index, v);
1370 1370 if (n == NULL)
1371 1371 return -2;
1372 1372 for (i = level; i < maxlevel; i++)
1373 1373 if (getnybble(node, i) != nt_level(n, i))
1374 1374 return -2;
1375 1375 return v;
1376 1376 }
1377 1377 if (v == 0)
1378 1378 return -2;
1379 1379 off = v;
1380 1380 }
1381 1381 /* multiple matches against an ambiguous prefix */
1382 1382 return -4;
1383 1383 }
1384 1384
1385 1385 static int nt_new(nodetree *self)
1386 1386 {
1387 1387 if (self->length == self->capacity) {
1388 1388 unsigned newcapacity;
1389 1389 nodetreenode *newnodes;
1390 1390 newcapacity = self->capacity * 2;
1391 1391 if (newcapacity >= INT_MAX / sizeof(nodetreenode)) {
1392 1392 PyErr_SetString(PyExc_MemoryError,
1393 1393 "overflow in nt_new");
1394 1394 return -1;
1395 1395 }
1396 1396 newnodes =
1397 1397 realloc(self->nodes, newcapacity * sizeof(nodetreenode));
1398 1398 if (newnodes == NULL) {
1399 1399 PyErr_SetString(PyExc_MemoryError, "out of memory");
1400 1400 return -1;
1401 1401 }
1402 1402 self->capacity = newcapacity;
1403 1403 self->nodes = newnodes;
1404 1404 memset(&self->nodes[self->length], 0,
1405 1405 sizeof(nodetreenode) * (self->capacity - self->length));
1406 1406 }
1407 1407 return self->length++;
1408 1408 }
1409 1409
1410 1410 static int nt_insert(nodetree *self, const char *node, int rev)
1411 1411 {
1412 1412 int level = 0;
1413 1413 int off = 0;
1414 1414
1415 1415 while (level < 40) {
1416 1416 int k = nt_level(node, level);
1417 1417 nodetreenode *n;
1418 1418 int v;
1419 1419
1420 1420 n = &self->nodes[off];
1421 1421 v = n->children[k];
1422 1422
1423 1423 if (v == 0) {
1424 1424 n->children[k] = -rev - 2;
1425 1425 return 0;
1426 1426 }
1427 1427 if (v < 0) {
1428 1428 const char *oldnode =
1429 1429 index_node_existing(self->index, -(v + 2));
1430 1430 int noff;
1431 1431
1432 1432 if (oldnode == NULL)
1433 1433 return -1;
1434 1434 if (!memcmp(oldnode, node, 20)) {
1435 1435 n->children[k] = -rev - 2;
1436 1436 return 0;
1437 1437 }
1438 1438 noff = nt_new(self);
1439 1439 if (noff == -1)
1440 1440 return -1;
1441 1441 /* self->nodes may have been changed by realloc */
1442 1442 self->nodes[off].children[k] = noff;
1443 1443 off = noff;
1444 1444 n = &self->nodes[off];
1445 1445 n->children[nt_level(oldnode, ++level)] = v;
1446 1446 if (level > self->depth)
1447 1447 self->depth = level;
1448 1448 self->splits += 1;
1449 1449 } else {
1450 1450 level += 1;
1451 1451 off = v;
1452 1452 }
1453 1453 }
1454 1454
1455 1455 return -1;
1456 1456 }
1457 1457
1458 1458 static PyObject *ntobj_insert(nodetreeObject *self, PyObject *args)
1459 1459 {
1460 1460 Py_ssize_t rev;
1461 1461 const char *node;
1462 1462 Py_ssize_t length;
1463 1463 if (!PyArg_ParseTuple(args, "n", &rev))
1464 1464 return NULL;
1465 1465 length = index_length(self->nt.index);
1466 1466 if (rev < 0 || rev >= length) {
1467 1467 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
1468 1468 return NULL;
1469 1469 }
1470 1470 node = index_node_existing(self->nt.index, rev);
1471 1471 if (nt_insert(&self->nt, node, (int)rev) == -1)
1472 1472 return NULL;
1473 1473 Py_RETURN_NONE;
1474 1474 }
1475 1475
1476 1476 static int nt_delete_node(nodetree *self, const char *node)
1477 1477 {
1478 1478 /* rev==-2 happens to get encoded as 0, which is interpreted as not set
1479 1479 */
1480 1480 return nt_insert(self, node, -2);
1481 1481 }
1482 1482
1483 1483 static int nt_init(nodetree *self, indexObject *index, unsigned capacity)
1484 1484 {
1485 1485 /* Initialize before overflow-checking to avoid nt_dealloc() crash. */
1486 1486 self->nodes = NULL;
1487 1487
1488 1488 self->index = index;
1489 1489 /* The input capacity is in terms of revisions, while the field is in
1490 1490 * terms of nodetree nodes. */
1491 1491 self->capacity = (capacity < 4 ? 4 : capacity / 2);
1492 1492 self->depth = 0;
1493 1493 self->splits = 0;
1494 1494 if ((size_t)self->capacity > INT_MAX / sizeof(nodetreenode)) {
1495 1495 PyErr_SetString(PyExc_ValueError, "overflow in init_nt");
1496 1496 return -1;
1497 1497 }
1498 1498 self->nodes = calloc(self->capacity, sizeof(nodetreenode));
1499 1499 if (self->nodes == NULL) {
1500 1500 PyErr_NoMemory();
1501 1501 return -1;
1502 1502 }
1503 1503 self->length = 1;
1504 1504 return 0;
1505 1505 }
1506 1506
1507 1507 static PyTypeObject indexType;
1508 1508
1509 1509 static int ntobj_init(nodetreeObject *self, PyObject *args)
1510 1510 {
1511 1511 PyObject *index;
1512 1512 unsigned capacity;
1513 1513 if (!PyArg_ParseTuple(args, "O!I", &indexType, &index, &capacity))
1514 1514 return -1;
1515 1515 Py_INCREF(index);
1516 1516 return nt_init(&self->nt, (indexObject *)index, capacity);
1517 1517 }
1518 1518
1519 1519 static int nt_partialmatch(nodetree *self, const char *node, Py_ssize_t nodelen)
1520 1520 {
1521 1521 return nt_find(self, node, nodelen, 1);
1522 1522 }
1523 1523
1524 1524 /*
1525 1525 * Find the length of the shortest unique prefix of node.
1526 1526 *
1527 1527 * Return values:
1528 1528 *
1529 1529 * -3: error (exception set)
1530 1530 * -2: not found (no exception set)
1531 1531 * rest: length of shortest prefix
1532 1532 */
1533 1533 static int nt_shortest(nodetree *self, const char *node)
1534 1534 {
1535 1535 int level, off;
1536 1536
1537 1537 for (level = off = 0; level < 40; level++) {
1538 1538 int k, v;
1539 1539 nodetreenode *n = &self->nodes[off];
1540 1540 k = nt_level(node, level);
1541 1541 v = n->children[k];
1542 1542 if (v < 0) {
1543 1543 const char *n;
1544 1544 v = -(v + 2);
1545 1545 n = index_node_existing(self->index, v);
1546 1546 if (n == NULL)
1547 1547 return -3;
1548 1548 if (memcmp(node, n, 20) != 0)
1549 1549 /*
1550 1550 * Found a unique prefix, but it wasn't for the
1551 1551 * requested node (i.e the requested node does
1552 1552 * not exist).
1553 1553 */
1554 1554 return -2;
1555 1555 return level + 1;
1556 1556 }
1557 1557 if (v == 0)
1558 1558 return -2;
1559 1559 off = v;
1560 1560 }
1561 1561 /*
1562 1562 * The node was still not unique after 40 hex digits, so this won't
1563 1563 * happen. Also, if we get here, then there's a programming error in
1564 1564 * this file that made us insert a node longer than 40 hex digits.
1565 1565 */
1566 1566 PyErr_SetString(PyExc_Exception, "broken node tree");
1567 1567 return -3;
1568 1568 }
1569 1569
1570 1570 static PyObject *ntobj_shortest(nodetreeObject *self, PyObject *args)
1571 1571 {
1572 1572 PyObject *val;
1573 1573 char *node;
1574 1574 int length;
1575 1575
1576 1576 if (!PyArg_ParseTuple(args, "O", &val))
1577 1577 return NULL;
1578 1578 if (node_check(val, &node) == -1)
1579 1579 return NULL;
1580 1580
1581 1581 length = nt_shortest(&self->nt, node);
1582 1582 if (length == -3)
1583 1583 return NULL;
1584 1584 if (length == -2) {
1585 1585 raise_revlog_error();
1586 1586 return NULL;
1587 1587 }
1588 1588 return PyInt_FromLong(length);
1589 1589 }
1590 1590
1591 1591 static void nt_dealloc(nodetree *self)
1592 1592 {
1593 1593 free(self->nodes);
1594 1594 self->nodes = NULL;
1595 1595 }
1596 1596
1597 1597 static void ntobj_dealloc(nodetreeObject *self)
1598 1598 {
1599 1599 Py_XDECREF(self->nt.index);
1600 1600 nt_dealloc(&self->nt);
1601 1601 PyObject_Del(self);
1602 1602 }
1603 1603
1604 1604 static PyMethodDef ntobj_methods[] = {
1605 1605 {"insert", (PyCFunction)ntobj_insert, METH_VARARGS,
1606 1606 "insert an index entry"},
1607 1607 {"shortest", (PyCFunction)ntobj_shortest, METH_VARARGS,
1608 1608 "find length of shortest hex nodeid of a binary ID"},
1609 1609 {NULL} /* Sentinel */
1610 1610 };
1611 1611
1612 1612 static PyTypeObject nodetreeType = {
1613 1613 PyVarObject_HEAD_INIT(NULL, 0) /* header */
1614 1614 "parsers.nodetree", /* tp_name */
1615 1615 sizeof(nodetreeObject), /* tp_basicsize */
1616 1616 0, /* tp_itemsize */
1617 1617 (destructor)ntobj_dealloc, /* tp_dealloc */
1618 1618 0, /* tp_print */
1619 1619 0, /* tp_getattr */
1620 1620 0, /* tp_setattr */
1621 1621 0, /* tp_compare */
1622 1622 0, /* tp_repr */
1623 1623 0, /* tp_as_number */
1624 1624 0, /* tp_as_sequence */
1625 1625 0, /* tp_as_mapping */
1626 1626 0, /* tp_hash */
1627 1627 0, /* tp_call */
1628 1628 0, /* tp_str */
1629 1629 0, /* tp_getattro */
1630 1630 0, /* tp_setattro */
1631 1631 0, /* tp_as_buffer */
1632 1632 Py_TPFLAGS_DEFAULT, /* tp_flags */
1633 1633 "nodetree", /* tp_doc */
1634 1634 0, /* tp_traverse */
1635 1635 0, /* tp_clear */
1636 1636 0, /* tp_richcompare */
1637 1637 0, /* tp_weaklistoffset */
1638 1638 0, /* tp_iter */
1639 1639 0, /* tp_iternext */
1640 1640 ntobj_methods, /* tp_methods */
1641 1641 0, /* tp_members */
1642 1642 0, /* tp_getset */
1643 1643 0, /* tp_base */
1644 1644 0, /* tp_dict */
1645 1645 0, /* tp_descr_get */
1646 1646 0, /* tp_descr_set */
1647 1647 0, /* tp_dictoffset */
1648 1648 (initproc)ntobj_init, /* tp_init */
1649 1649 0, /* tp_alloc */
1650 1650 };
1651 1651
1652 1652 static int index_init_nt(indexObject *self)
1653 1653 {
1654 1654 if (!self->ntinitialized) {
1655 1655 if (nt_init(&self->nt, self, (int)self->raw_length) == -1) {
1656 1656 nt_dealloc(&self->nt);
1657 1657 return -1;
1658 1658 }
1659 1659 if (nt_insert(&self->nt, nullid, -1) == -1) {
1660 1660 nt_dealloc(&self->nt);
1661 1661 return -1;
1662 1662 }
1663 1663 self->ntinitialized = 1;
1664 1664 self->ntrev = (int)index_length(self);
1665 1665 self->ntlookups = 1;
1666 1666 self->ntmisses = 0;
1667 1667 }
1668 1668 return 0;
1669 1669 }
1670 1670
1671 1671 /*
1672 1672 * Return values:
1673 1673 *
1674 1674 * -3: error (exception set)
1675 1675 * -2: not found (no exception set)
1676 1676 * rest: valid rev
1677 1677 */
1678 1678 static int index_find_node(indexObject *self, const char *node,
1679 1679 Py_ssize_t nodelen)
1680 1680 {
1681 1681 int rev;
1682 1682
1683 1683 if (index_init_nt(self) == -1)
1684 1684 return -3;
1685 1685
1686 1686 self->ntlookups++;
1687 1687 rev = nt_find(&self->nt, node, nodelen, 0);
1688 1688 if (rev >= -1)
1689 1689 return rev;
1690 1690
1691 1691 /*
1692 1692 * For the first handful of lookups, we scan the entire index,
1693 1693 * and cache only the matching nodes. This optimizes for cases
1694 1694 * like "hg tip", where only a few nodes are accessed.
1695 1695 *
1696 1696 * After that, we cache every node we visit, using a single
1697 1697 * scan amortized over multiple lookups. This gives the best
1698 1698 * bulk performance, e.g. for "hg log".
1699 1699 */
1700 1700 if (self->ntmisses++ < 4) {
1701 1701 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1702 1702 const char *n = index_node_existing(self, rev);
1703 1703 if (n == NULL)
1704 1704 return -3;
1705 1705 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1706 1706 if (nt_insert(&self->nt, n, rev) == -1)
1707 1707 return -3;
1708 1708 break;
1709 1709 }
1710 1710 }
1711 1711 } else {
1712 1712 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1713 1713 const char *n = index_node_existing(self, rev);
1714 1714 if (n == NULL)
1715 1715 return -3;
1716 1716 if (nt_insert(&self->nt, n, rev) == -1) {
1717 1717 self->ntrev = rev + 1;
1718 1718 return -3;
1719 1719 }
1720 1720 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1721 1721 break;
1722 1722 }
1723 1723 }
1724 1724 self->ntrev = rev;
1725 1725 }
1726 1726
1727 1727 if (rev >= 0)
1728 1728 return rev;
1729 1729 return -2;
1730 1730 }
1731 1731
1732 1732 static PyObject *index_getitem(indexObject *self, PyObject *value)
1733 1733 {
1734 1734 char *node;
1735 1735 int rev;
1736 1736
1737 1737 if (PyInt_Check(value)) {
1738 1738 long idx;
1739 1739 if (!pylong_to_long(value, &idx)) {
1740 1740 return NULL;
1741 1741 }
1742 1742 return index_get(self, idx);
1743 1743 }
1744 1744
1745 1745 if (node_check(value, &node) == -1)
1746 1746 return NULL;
1747 1747 rev = index_find_node(self, node, 20);
1748 1748 if (rev >= -1)
1749 1749 return PyInt_FromLong(rev);
1750 1750 if (rev == -2)
1751 1751 raise_revlog_error();
1752 1752 return NULL;
1753 1753 }
1754 1754
1755 1755 /*
1756 1756 * Fully populate the radix tree.
1757 1757 */
1758 1758 static int index_populate_nt(indexObject *self)
1759 1759 {
1760 1760 int rev;
1761 1761 if (self->ntrev > 0) {
1762 1762 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1763 1763 const char *n = index_node_existing(self, rev);
1764 1764 if (n == NULL)
1765 1765 return -1;
1766 1766 if (nt_insert(&self->nt, n, rev) == -1)
1767 1767 return -1;
1768 1768 }
1769 1769 self->ntrev = -1;
1770 1770 }
1771 1771 return 0;
1772 1772 }
1773 1773
1774 1774 static PyObject *index_partialmatch(indexObject *self, PyObject *args)
1775 1775 {
1776 1776 const char *fullnode;
1777 1777 int nodelen;
1778 1778 char *node;
1779 1779 int rev, i;
1780 1780
1781 1781 if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &node, &nodelen))
1782 1782 return NULL;
1783 1783
1784 1784 if (nodelen < 1) {
1785 1785 PyErr_SetString(PyExc_ValueError, "key too short");
1786 1786 return NULL;
1787 1787 }
1788 1788
1789 1789 if (nodelen > 40) {
1790 1790 PyErr_SetString(PyExc_ValueError, "key too long");
1791 1791 return NULL;
1792 1792 }
1793 1793
1794 1794 for (i = 0; i < nodelen; i++)
1795 1795 hexdigit(node, i);
1796 1796 if (PyErr_Occurred()) {
1797 1797 /* input contains non-hex characters */
1798 1798 PyErr_Clear();
1799 1799 Py_RETURN_NONE;
1800 1800 }
1801 1801
1802 1802 if (index_init_nt(self) == -1)
1803 1803 return NULL;
1804 1804 if (index_populate_nt(self) == -1)
1805 1805 return NULL;
1806 1806 rev = nt_partialmatch(&self->nt, node, nodelen);
1807 1807
1808 1808 switch (rev) {
1809 1809 case -4:
1810 1810 raise_revlog_error();
1811 1811 return NULL;
1812 1812 case -2:
1813 1813 Py_RETURN_NONE;
1814 1814 case -1:
1815 1815 return PyBytes_FromStringAndSize(nullid, 20);
1816 1816 }
1817 1817
1818 1818 fullnode = index_node_existing(self, rev);
1819 1819 if (fullnode == NULL) {
1820 1820 return NULL;
1821 1821 }
1822 1822 return PyBytes_FromStringAndSize(fullnode, 20);
1823 1823 }
1824 1824
1825 1825 static PyObject *index_shortest(indexObject *self, PyObject *args)
1826 1826 {
1827 1827 PyObject *val;
1828 1828 char *node;
1829 1829 int length;
1830 1830
1831 1831 if (!PyArg_ParseTuple(args, "O", &val))
1832 1832 return NULL;
1833 1833 if (node_check(val, &node) == -1)
1834 1834 return NULL;
1835 1835
1836 1836 self->ntlookups++;
1837 1837 if (index_init_nt(self) == -1)
1838 1838 return NULL;
1839 1839 if (index_populate_nt(self) == -1)
1840 1840 return NULL;
1841 1841 length = nt_shortest(&self->nt, node);
1842 1842 if (length == -3)
1843 1843 return NULL;
1844 1844 if (length == -2) {
1845 1845 raise_revlog_error();
1846 1846 return NULL;
1847 1847 }
1848 1848 return PyInt_FromLong(length);
1849 1849 }
1850 1850
1851 1851 static PyObject *index_m_get(indexObject *self, PyObject *args)
1852 1852 {
1853 1853 PyObject *val;
1854 1854 char *node;
1855 1855 int rev;
1856 1856
1857 1857 if (!PyArg_ParseTuple(args, "O", &val))
1858 1858 return NULL;
1859 1859 if (node_check(val, &node) == -1)
1860 1860 return NULL;
1861 1861 rev = index_find_node(self, node, 20);
1862 1862 if (rev == -3)
1863 1863 return NULL;
1864 1864 if (rev == -2)
1865 1865 Py_RETURN_NONE;
1866 1866 return PyInt_FromLong(rev);
1867 1867 }
1868 1868
1869 1869 static int index_contains(indexObject *self, PyObject *value)
1870 1870 {
1871 1871 char *node;
1872 1872
1873 1873 if (PyInt_Check(value)) {
1874 1874 long rev;
1875 1875 if (!pylong_to_long(value, &rev)) {
1876 1876 return -1;
1877 1877 }
1878 1878 return rev >= -1 && rev < index_length(self);
1879 1879 }
1880 1880
1881 1881 if (node_check(value, &node) == -1)
1882 1882 return -1;
1883 1883
1884 1884 switch (index_find_node(self, node, 20)) {
1885 1885 case -3:
1886 1886 return -1;
1887 1887 case -2:
1888 1888 return 0;
1889 1889 default:
1890 1890 return 1;
1891 1891 }
1892 1892 }
1893 1893
1894 1894 typedef uint64_t bitmask;
1895 1895
1896 1896 /*
1897 1897 * Given a disjoint set of revs, return all candidates for the
1898 1898 * greatest common ancestor. In revset notation, this is the set
1899 1899 * "heads(::a and ::b and ...)"
1900 1900 */
1901 1901 static PyObject *find_gca_candidates(indexObject *self, const int *revs,
1902 1902 int revcount)
1903 1903 {
1904 1904 const bitmask allseen = (1ull << revcount) - 1;
1905 1905 const bitmask poison = 1ull << revcount;
1906 1906 PyObject *gca = PyList_New(0);
1907 1907 int i, v, interesting;
1908 1908 int maxrev = -1;
1909 1909 bitmask sp;
1910 1910 bitmask *seen;
1911 1911
1912 1912 if (gca == NULL)
1913 1913 return PyErr_NoMemory();
1914 1914
1915 1915 for (i = 0; i < revcount; i++) {
1916 1916 if (revs[i] > maxrev)
1917 1917 maxrev = revs[i];
1918 1918 }
1919 1919
1920 1920 seen = calloc(sizeof(*seen), maxrev + 1);
1921 1921 if (seen == NULL) {
1922 1922 Py_DECREF(gca);
1923 1923 return PyErr_NoMemory();
1924 1924 }
1925 1925
1926 1926 for (i = 0; i < revcount; i++)
1927 1927 seen[revs[i]] = 1ull << i;
1928 1928
1929 1929 interesting = revcount;
1930 1930
1931 1931 for (v = maxrev; v >= 0 && interesting; v--) {
1932 1932 bitmask sv = seen[v];
1933 1933 int parents[2];
1934 1934
1935 1935 if (!sv)
1936 1936 continue;
1937 1937
1938 1938 if (sv < poison) {
1939 1939 interesting -= 1;
1940 1940 if (sv == allseen) {
1941 1941 PyObject *obj = PyInt_FromLong(v);
1942 1942 if (obj == NULL)
1943 1943 goto bail;
1944 1944 if (PyList_Append(gca, obj) == -1) {
1945 1945 Py_DECREF(obj);
1946 1946 goto bail;
1947 1947 }
1948 1948 sv |= poison;
1949 1949 for (i = 0; i < revcount; i++) {
1950 1950 if (revs[i] == v)
1951 1951 goto done;
1952 1952 }
1953 1953 }
1954 1954 }
1955 1955 if (index_get_parents(self, v, parents, maxrev) < 0)
1956 1956 goto bail;
1957 1957
1958 1958 for (i = 0; i < 2; i++) {
1959 1959 int p = parents[i];
1960 1960 if (p == -1)
1961 1961 continue;
1962 1962 sp = seen[p];
1963 1963 if (sv < poison) {
1964 1964 if (sp == 0) {
1965 1965 seen[p] = sv;
1966 1966 interesting++;
1967 1967 } else if (sp != sv)
1968 1968 seen[p] |= sv;
1969 1969 } else {
1970 1970 if (sp && sp < poison)
1971 1971 interesting--;
1972 1972 seen[p] = sv;
1973 1973 }
1974 1974 }
1975 1975 }
1976 1976
1977 1977 done:
1978 1978 free(seen);
1979 1979 return gca;
1980 1980 bail:
1981 1981 free(seen);
1982 1982 Py_XDECREF(gca);
1983 1983 return NULL;
1984 1984 }
1985 1985
1986 1986 /*
1987 1987 * Given a disjoint set of revs, return the subset with the longest
1988 1988 * path to the root.
1989 1989 */
1990 1990 static PyObject *find_deepest(indexObject *self, PyObject *revs)
1991 1991 {
1992 1992 const Py_ssize_t revcount = PyList_GET_SIZE(revs);
1993 1993 static const Py_ssize_t capacity = 24;
1994 1994 int *depth, *interesting = NULL;
1995 1995 int i, j, v, ninteresting;
1996 1996 PyObject *dict = NULL, *keys = NULL;
1997 1997 long *seen = NULL;
1998 1998 int maxrev = -1;
1999 1999 long final;
2000 2000
2001 2001 if (revcount > capacity) {
2002 2002 PyErr_Format(PyExc_OverflowError,
2003 2003 "bitset size (%ld) > capacity (%ld)",
2004 2004 (long)revcount, (long)capacity);
2005 2005 return NULL;
2006 2006 }
2007 2007
2008 2008 for (i = 0; i < revcount; i++) {
2009 2009 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
2010 2010 if (n > maxrev)
2011 2011 maxrev = n;
2012 2012 }
2013 2013
2014 2014 depth = calloc(sizeof(*depth), maxrev + 1);
2015 2015 if (depth == NULL)
2016 2016 return PyErr_NoMemory();
2017 2017
2018 2018 seen = calloc(sizeof(*seen), maxrev + 1);
2019 2019 if (seen == NULL) {
2020 2020 PyErr_NoMemory();
2021 2021 goto bail;
2022 2022 }
2023 2023
2024 2024 interesting = calloc(sizeof(*interesting), ((size_t)1) << revcount);
2025 2025 if (interesting == NULL) {
2026 2026 PyErr_NoMemory();
2027 2027 goto bail;
2028 2028 }
2029 2029
2030 2030 if (PyList_Sort(revs) == -1)
2031 2031 goto bail;
2032 2032
2033 2033 for (i = 0; i < revcount; i++) {
2034 2034 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
2035 2035 long b = 1l << i;
2036 2036 depth[n] = 1;
2037 2037 seen[n] = b;
2038 2038 interesting[b] = 1;
2039 2039 }
2040 2040
2041 2041 /* invariant: ninteresting is the number of non-zero entries in
2042 2042 * interesting. */
2043 2043 ninteresting = (int)revcount;
2044 2044
2045 2045 for (v = maxrev; v >= 0 && ninteresting > 1; v--) {
2046 2046 int dv = depth[v];
2047 2047 int parents[2];
2048 2048 long sv;
2049 2049
2050 2050 if (dv == 0)
2051 2051 continue;
2052 2052
2053 2053 sv = seen[v];
2054 2054 if (index_get_parents(self, v, parents, maxrev) < 0)
2055 2055 goto bail;
2056 2056
2057 2057 for (i = 0; i < 2; i++) {
2058 2058 int p = parents[i];
2059 2059 long sp;
2060 2060 int dp;
2061 2061
2062 2062 if (p == -1)
2063 2063 continue;
2064 2064
2065 2065 dp = depth[p];
2066 2066 sp = seen[p];
2067 2067 if (dp <= dv) {
2068 2068 depth[p] = dv + 1;
2069 2069 if (sp != sv) {
2070 2070 interesting[sv] += 1;
2071 2071 seen[p] = sv;
2072 2072 if (sp) {
2073 2073 interesting[sp] -= 1;
2074 2074 if (interesting[sp] == 0)
2075 2075 ninteresting -= 1;
2076 2076 }
2077 2077 }
2078 2078 } else if (dv == dp - 1) {
2079 2079 long nsp = sp | sv;
2080 2080 if (nsp == sp)
2081 2081 continue;
2082 2082 seen[p] = nsp;
2083 2083 interesting[sp] -= 1;
2084 2084 if (interesting[sp] == 0)
2085 2085 ninteresting -= 1;
2086 2086 if (interesting[nsp] == 0)
2087 2087 ninteresting += 1;
2088 2088 interesting[nsp] += 1;
2089 2089 }
2090 2090 }
2091 2091 interesting[sv] -= 1;
2092 2092 if (interesting[sv] == 0)
2093 2093 ninteresting -= 1;
2094 2094 }
2095 2095
2096 2096 final = 0;
2097 2097 j = ninteresting;
2098 2098 for (i = 0; i < (int)(2 << revcount) && j > 0; i++) {
2099 2099 if (interesting[i] == 0)
2100 2100 continue;
2101 2101 final |= i;
2102 2102 j -= 1;
2103 2103 }
2104 2104 if (final == 0) {
2105 2105 keys = PyList_New(0);
2106 2106 goto bail;
2107 2107 }
2108 2108
2109 2109 dict = PyDict_New();
2110 2110 if (dict == NULL)
2111 2111 goto bail;
2112 2112
2113 2113 for (i = 0; i < revcount; i++) {
2114 2114 PyObject *key;
2115 2115
2116 2116 if ((final & (1 << i)) == 0)
2117 2117 continue;
2118 2118
2119 2119 key = PyList_GET_ITEM(revs, i);
2120 2120 Py_INCREF(key);
2121 2121 Py_INCREF(Py_None);
2122 2122 if (PyDict_SetItem(dict, key, Py_None) == -1) {
2123 2123 Py_DECREF(key);
2124 2124 Py_DECREF(Py_None);
2125 2125 goto bail;
2126 2126 }
2127 2127 }
2128 2128
2129 2129 keys = PyDict_Keys(dict);
2130 2130
2131 2131 bail:
2132 2132 free(depth);
2133 2133 free(seen);
2134 2134 free(interesting);
2135 2135 Py_XDECREF(dict);
2136 2136
2137 2137 return keys;
2138 2138 }
2139 2139
2140 2140 /*
2141 2141 * Given a (possibly overlapping) set of revs, return all the
2142 2142 * common ancestors heads: heads(::args[0] and ::a[1] and ...)
2143 2143 */
2144 2144 static PyObject *index_commonancestorsheads(indexObject *self, PyObject *args)
2145 2145 {
2146 2146 PyObject *ret = NULL;
2147 2147 Py_ssize_t argcount, i, len;
2148 2148 bitmask repeat = 0;
2149 2149 int revcount = 0;
2150 2150 int *revs;
2151 2151
2152 2152 argcount = PySequence_Length(args);
2153 2153 revs = PyMem_Malloc(argcount * sizeof(*revs));
2154 2154 if (argcount > 0 && revs == NULL)
2155 2155 return PyErr_NoMemory();
2156 2156 len = index_length(self);
2157 2157
2158 2158 for (i = 0; i < argcount; i++) {
2159 2159 static const int capacity = 24;
2160 2160 PyObject *obj = PySequence_GetItem(args, i);
2161 2161 bitmask x;
2162 2162 long val;
2163 2163
2164 2164 if (!PyInt_Check(obj)) {
2165 2165 PyErr_SetString(PyExc_TypeError,
2166 2166 "arguments must all be ints");
2167 2167 Py_DECREF(obj);
2168 2168 goto bail;
2169 2169 }
2170 2170 val = PyInt_AsLong(obj);
2171 2171 Py_DECREF(obj);
2172 2172 if (val == -1) {
2173 2173 ret = PyList_New(0);
2174 2174 goto done;
2175 2175 }
2176 2176 if (val < 0 || val >= len) {
2177 2177 PyErr_SetString(PyExc_IndexError, "index out of range");
2178 2178 goto bail;
2179 2179 }
2180 2180 /* this cheesy bloom filter lets us avoid some more
2181 2181 * expensive duplicate checks in the common set-is-disjoint
2182 2182 * case */
2183 2183 x = 1ull << (val & 0x3f);
2184 2184 if (repeat & x) {
2185 2185 int k;
2186 2186 for (k = 0; k < revcount; k++) {
2187 2187 if (val == revs[k])
2188 2188 goto duplicate;
2189 2189 }
2190 2190 } else
2191 2191 repeat |= x;
2192 2192 if (revcount >= capacity) {
2193 2193 PyErr_Format(PyExc_OverflowError,
2194 2194 "bitset size (%d) > capacity (%d)",
2195 2195 revcount, capacity);
2196 2196 goto bail;
2197 2197 }
2198 2198 revs[revcount++] = (int)val;
2199 2199 duplicate:;
2200 2200 }
2201 2201
2202 2202 if (revcount == 0) {
2203 2203 ret = PyList_New(0);
2204 2204 goto done;
2205 2205 }
2206 2206 if (revcount == 1) {
2207 2207 PyObject *obj;
2208 2208 ret = PyList_New(1);
2209 2209 if (ret == NULL)
2210 2210 goto bail;
2211 2211 obj = PyInt_FromLong(revs[0]);
2212 2212 if (obj == NULL)
2213 2213 goto bail;
2214 2214 PyList_SET_ITEM(ret, 0, obj);
2215 2215 goto done;
2216 2216 }
2217 2217
2218 2218 ret = find_gca_candidates(self, revs, revcount);
2219 2219 if (ret == NULL)
2220 2220 goto bail;
2221 2221
2222 2222 done:
2223 2223 PyMem_Free(revs);
2224 2224 return ret;
2225 2225
2226 2226 bail:
2227 2227 PyMem_Free(revs);
2228 2228 Py_XDECREF(ret);
2229 2229 return NULL;
2230 2230 }
2231 2231
2232 2232 /*
2233 2233 * Given a (possibly overlapping) set of revs, return the greatest
2234 2234 * common ancestors: those with the longest path to the root.
2235 2235 */
2236 2236 static PyObject *index_ancestors(indexObject *self, PyObject *args)
2237 2237 {
2238 2238 PyObject *ret;
2239 2239 PyObject *gca = index_commonancestorsheads(self, args);
2240 2240 if (gca == NULL)
2241 2241 return NULL;
2242 2242
2243 2243 if (PyList_GET_SIZE(gca) <= 1) {
2244 2244 return gca;
2245 2245 }
2246 2246
2247 2247 ret = find_deepest(self, gca);
2248 2248 Py_DECREF(gca);
2249 2249 return ret;
2250 2250 }
2251 2251
2252 2252 /*
2253 2253 * Invalidate any trie entries introduced by added revs.
2254 2254 */
2255 2255 static void index_invalidate_added(indexObject *self, Py_ssize_t start)
2256 2256 {
2257 2257 Py_ssize_t i, len = PyList_GET_SIZE(self->added);
2258 2258
2259 2259 for (i = start; i < len; i++) {
2260 2260 PyObject *tuple = PyList_GET_ITEM(self->added, i);
2261 2261 PyObject *node = PyTuple_GET_ITEM(tuple, 7);
2262 2262
2263 2263 nt_delete_node(&self->nt, PyBytes_AS_STRING(node));
2264 2264 }
2265 2265
2266 2266 if (start == 0)
2267 2267 Py_CLEAR(self->added);
2268 2268 }
2269 2269
2270 2270 /*
2271 2271 * Delete a numeric range of revs, which must be at the end of the
2272 2272 * range, but exclude the sentinel nullid entry.
2273 2273 */
2274 2274 static int index_slice_del(indexObject *self, PyObject *item)
2275 2275 {
2276 2276 Py_ssize_t start, stop, step, slicelength;
2277 2277 Py_ssize_t length = index_length(self) + 1;
2278 2278 int ret = 0;
2279 2279
2280 2280 /* Argument changed from PySliceObject* to PyObject* in Python 3. */
2281 2281 #ifdef IS_PY3K
2282 2282 if (PySlice_GetIndicesEx(item, length, &start, &stop, &step,
2283 2283 &slicelength) < 0)
2284 2284 #else
2285 2285 if (PySlice_GetIndicesEx((PySliceObject *)item, length, &start, &stop,
2286 2286 &step, &slicelength) < 0)
2287 2287 #endif
2288 2288 return -1;
2289 2289
2290 2290 if (slicelength <= 0)
2291 2291 return 0;
2292 2292
2293 2293 if ((step < 0 && start < stop) || (step > 0 && start > stop))
2294 2294 stop = start;
2295 2295
2296 2296 if (step < 0) {
2297 2297 stop = start + 1;
2298 2298 start = stop + step * (slicelength - 1) - 1;
2299 2299 step = -step;
2300 2300 }
2301 2301
2302 2302 if (step != 1) {
2303 2303 PyErr_SetString(PyExc_ValueError,
2304 2304 "revlog index delete requires step size of 1");
2305 2305 return -1;
2306 2306 }
2307 2307
2308 2308 if (stop != length - 1) {
2309 2309 PyErr_SetString(PyExc_IndexError,
2310 2310 "revlog index deletion indices are invalid");
2311 2311 return -1;
2312 2312 }
2313 2313
2314 2314 if (start < self->length) {
2315 2315 if (self->ntinitialized) {
2316 2316 Py_ssize_t i;
2317 2317
2318 2318 for (i = start + 1; i < self->length; i++) {
2319 2319 const char *node = index_node_existing(self, i);
2320 2320 if (node == NULL)
2321 2321 return -1;
2322 2322
2323 2323 nt_delete_node(&self->nt, node);
2324 2324 }
2325 2325 if (self->added)
2326 2326 index_invalidate_added(self, 0);
2327 2327 if (self->ntrev > start)
2328 2328 self->ntrev = (int)start;
2329 2329 }
2330 2330 self->length = start;
2331 2331 if (start < self->raw_length) {
2332 2332 if (self->cache) {
2333 2333 Py_ssize_t i;
2334 2334 for (i = start; i < self->raw_length; i++)
2335 2335 Py_CLEAR(self->cache[i]);
2336 2336 }
2337 2337 self->raw_length = start;
2338 2338 }
2339 2339 goto done;
2340 2340 }
2341 2341
2342 2342 if (self->ntinitialized) {
2343 2343 index_invalidate_added(self, start - self->length);
2344 2344 if (self->ntrev > start)
2345 2345 self->ntrev = (int)start;
2346 2346 }
2347 2347 if (self->added)
2348 2348 ret = PyList_SetSlice(self->added, start - self->length,
2349 2349 PyList_GET_SIZE(self->added), NULL);
2350 2350 done:
2351 2351 Py_CLEAR(self->headrevs);
2352 2352 return ret;
2353 2353 }
2354 2354
2355 2355 /*
2356 2356 * Supported ops:
2357 2357 *
2358 2358 * slice deletion
2359 2359 * string assignment (extend node->rev mapping)
2360 2360 * string deletion (shrink node->rev mapping)
2361 2361 */
2362 2362 static int index_assign_subscript(indexObject *self, PyObject *item,
2363 2363 PyObject *value)
2364 2364 {
2365 2365 char *node;
2366 2366 long rev;
2367 2367
2368 2368 if (PySlice_Check(item) && value == NULL)
2369 2369 return index_slice_del(self, item);
2370 2370
2371 2371 if (node_check(item, &node) == -1)
2372 2372 return -1;
2373 2373
2374 2374 if (value == NULL)
2375 2375 return self->ntinitialized ? nt_delete_node(&self->nt, node)
2376 2376 : 0;
2377 2377 rev = PyInt_AsLong(value);
2378 2378 if (rev > INT_MAX || rev < 0) {
2379 2379 if (!PyErr_Occurred())
2380 2380 PyErr_SetString(PyExc_ValueError, "rev out of range");
2381 2381 return -1;
2382 2382 }
2383 2383
2384 2384 if (index_init_nt(self) == -1)
2385 2385 return -1;
2386 2386 return nt_insert(&self->nt, node, (int)rev);
2387 2387 }
2388 2388
2389 2389 /*
2390 2390 * Find all RevlogNG entries in an index that has inline data. Update
2391 2391 * the optional "offsets" table with those entries.
2392 2392 */
2393 2393 static Py_ssize_t inline_scan(indexObject *self, const char **offsets)
2394 2394 {
2395 2395 const char *data = (const char *)self->buf.buf;
2396 2396 Py_ssize_t pos = 0;
2397 2397 Py_ssize_t end = self->buf.len;
2398 2398 long incr = v1_hdrsize;
2399 2399 Py_ssize_t len = 0;
2400 2400
2401 2401 while (pos + v1_hdrsize <= end && pos >= 0) {
2402 2402 uint32_t comp_len;
2403 2403 /* 3rd element of header is length of compressed inline data */
2404 2404 comp_len = getbe32(data + pos + 8);
2405 2405 incr = v1_hdrsize + comp_len;
2406 2406 if (offsets)
2407 2407 offsets[len] = data + pos;
2408 2408 len++;
2409 2409 pos += incr;
2410 2410 }
2411 2411
2412 2412 if (pos != end) {
2413 2413 if (!PyErr_Occurred())
2414 2414 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2415 2415 return -1;
2416 2416 }
2417 2417
2418 2418 return len;
2419 2419 }
2420 2420
2421 2421 static int index_init(indexObject *self, PyObject *args)
2422 2422 {
2423 2423 PyObject *data_obj, *inlined_obj;
2424 2424 Py_ssize_t size;
2425 2425
2426 2426 /* Initialize before argument-checking to avoid index_dealloc() crash.
2427 2427 */
2428 2428 self->raw_length = 0;
2429 2429 self->added = NULL;
2430 2430 self->cache = NULL;
2431 2431 self->data = NULL;
2432 2432 memset(&self->buf, 0, sizeof(self->buf));
2433 2433 self->headrevs = NULL;
2434 2434 self->filteredrevs = Py_None;
2435 2435 Py_INCREF(Py_None);
2436 2436 self->ntinitialized = 0;
2437 2437 self->offsets = NULL;
2438 2438
2439 2439 if (!PyArg_ParseTuple(args, "OO", &data_obj, &inlined_obj))
2440 2440 return -1;
2441 2441 if (!PyObject_CheckBuffer(data_obj)) {
2442 2442 PyErr_SetString(PyExc_TypeError,
2443 2443 "data does not support buffer interface");
2444 2444 return -1;
2445 2445 }
2446 2446
2447 2447 if (PyObject_GetBuffer(data_obj, &self->buf, PyBUF_SIMPLE) == -1)
2448 2448 return -1;
2449 2449 size = self->buf.len;
2450 2450
2451 2451 self->inlined = inlined_obj && PyObject_IsTrue(inlined_obj);
2452 2452 self->data = data_obj;
2453 2453
2454 2454 self->ntlookups = self->ntmisses = 0;
2455 2455 self->ntrev = -1;
2456 2456 Py_INCREF(self->data);
2457 2457
2458 2458 if (self->inlined) {
2459 2459 Py_ssize_t len = inline_scan(self, NULL);
2460 2460 if (len == -1)
2461 2461 goto bail;
2462 2462 self->raw_length = len;
2463 2463 self->length = len;
2464 2464 } else {
2465 2465 if (size % v1_hdrsize) {
2466 2466 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2467 2467 goto bail;
2468 2468 }
2469 2469 self->raw_length = size / v1_hdrsize;
2470 2470 self->length = self->raw_length;
2471 2471 }
2472 2472
2473 2473 return 0;
2474 2474 bail:
2475 2475 return -1;
2476 2476 }
2477 2477
2478 2478 static PyObject *index_nodemap(indexObject *self)
2479 2479 {
2480 2480 Py_INCREF(self);
2481 2481 return (PyObject *)self;
2482 2482 }
2483 2483
2484 2484 static void _index_clearcaches(indexObject *self)
2485 2485 {
2486 2486 if (self->cache) {
2487 2487 Py_ssize_t i;
2488 2488
2489 2489 for (i = 0; i < self->raw_length; i++)
2490 2490 Py_CLEAR(self->cache[i]);
2491 2491 free(self->cache);
2492 2492 self->cache = NULL;
2493 2493 }
2494 2494 if (self->offsets) {
2495 2495 PyMem_Free((void *)self->offsets);
2496 2496 self->offsets = NULL;
2497 2497 }
2498 2498 if (self->ntinitialized) {
2499 2499 nt_dealloc(&self->nt);
2500 2500 }
2501 2501 self->ntinitialized = 0;
2502 2502 Py_CLEAR(self->headrevs);
2503 2503 }
2504 2504
2505 2505 static PyObject *index_clearcaches(indexObject *self)
2506 2506 {
2507 2507 _index_clearcaches(self);
2508 2508 self->ntrev = -1;
2509 2509 self->ntlookups = self->ntmisses = 0;
2510 2510 Py_RETURN_NONE;
2511 2511 }
2512 2512
2513 2513 static void index_dealloc(indexObject *self)
2514 2514 {
2515 2515 _index_clearcaches(self);
2516 2516 Py_XDECREF(self->filteredrevs);
2517 2517 if (self->buf.buf) {
2518 2518 PyBuffer_Release(&self->buf);
2519 2519 memset(&self->buf, 0, sizeof(self->buf));
2520 2520 }
2521 2521 Py_XDECREF(self->data);
2522 2522 Py_XDECREF(self->added);
2523 2523 PyObject_Del(self);
2524 2524 }
2525 2525
2526 2526 static PySequenceMethods index_sequence_methods = {
2527 2527 (lenfunc)index_length, /* sq_length */
2528 2528 0, /* sq_concat */
2529 2529 0, /* sq_repeat */
2530 2530 (ssizeargfunc)index_get, /* sq_item */
2531 2531 0, /* sq_slice */
2532 2532 0, /* sq_ass_item */
2533 2533 0, /* sq_ass_slice */
2534 2534 (objobjproc)index_contains, /* sq_contains */
2535 2535 };
2536 2536
2537 2537 static PyMappingMethods index_mapping_methods = {
2538 2538 (lenfunc)index_length, /* mp_length */
2539 2539 (binaryfunc)index_getitem, /* mp_subscript */
2540 2540 (objobjargproc)index_assign_subscript, /* mp_ass_subscript */
2541 2541 };
2542 2542
2543 2543 static PyMethodDef index_methods[] = {
2544 2544 {"ancestors", (PyCFunction)index_ancestors, METH_VARARGS,
2545 2545 "return the gca set of the given revs"},
2546 2546 {"commonancestorsheads", (PyCFunction)index_commonancestorsheads,
2547 2547 METH_VARARGS,
2548 2548 "return the heads of the common ancestors of the given revs"},
2549 2549 {"clearcaches", (PyCFunction)index_clearcaches, METH_NOARGS,
2550 2550 "clear the index caches"},
2551 2551 {"get", (PyCFunction)index_m_get, METH_VARARGS, "get an index entry"},
2552 2552 {"computephasesmapsets", (PyCFunction)compute_phases_map_sets, METH_VARARGS,
2553 2553 "compute phases"},
2554 2554 {"reachableroots2", (PyCFunction)reachableroots2, METH_VARARGS,
2555 2555 "reachableroots"},
2556 2556 {"headrevs", (PyCFunction)index_headrevs, METH_VARARGS,
2557 2557 "get head revisions"}, /* Can do filtering since 3.2 */
2558 2558 {"headrevsfiltered", (PyCFunction)index_headrevs, METH_VARARGS,
2559 2559 "get filtered head revisions"}, /* Can always do filtering */
2560 2560 {"deltachain", (PyCFunction)index_deltachain, METH_VARARGS,
2561 2561 "determine revisions with deltas to reconstruct fulltext"},
2562 2562 {"slicechunktodensity", (PyCFunction)index_slicechunktodensity,
2563 2563 METH_VARARGS, "determine revisions with deltas to reconstruct fulltext"},
2564 2564 {"append", (PyCFunction)index_append, METH_O, "append an index entry"},
2565 2565 {"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS,
2566 2566 "match a potentially ambiguous node ID"},
2567 2567 {"shortest", (PyCFunction)index_shortest, METH_VARARGS,
2568 2568 "find length of shortest hex nodeid of a binary ID"},
2569 2569 {"stats", (PyCFunction)index_stats, METH_NOARGS, "stats for the index"},
2570 2570 {NULL} /* Sentinel */
2571 2571 };
2572 2572
2573 2573 static PyGetSetDef index_getset[] = {
2574 2574 {"nodemap", (getter)index_nodemap, NULL, "nodemap", NULL},
2575 2575 {NULL} /* Sentinel */
2576 2576 };
2577 2577
2578 2578 static PyTypeObject indexType = {
2579 2579 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2580 2580 "parsers.index", /* tp_name */
2581 2581 sizeof(indexObject), /* tp_basicsize */
2582 2582 0, /* tp_itemsize */
2583 2583 (destructor)index_dealloc, /* tp_dealloc */
2584 2584 0, /* tp_print */
2585 2585 0, /* tp_getattr */
2586 2586 0, /* tp_setattr */
2587 2587 0, /* tp_compare */
2588 2588 0, /* tp_repr */
2589 2589 0, /* tp_as_number */
2590 2590 &index_sequence_methods, /* tp_as_sequence */
2591 2591 &index_mapping_methods, /* tp_as_mapping */
2592 2592 0, /* tp_hash */
2593 2593 0, /* tp_call */
2594 2594 0, /* tp_str */
2595 2595 0, /* tp_getattro */
2596 2596 0, /* tp_setattro */
2597 2597 0, /* tp_as_buffer */
2598 2598 Py_TPFLAGS_DEFAULT, /* tp_flags */
2599 2599 "revlog index", /* tp_doc */
2600 2600 0, /* tp_traverse */
2601 2601 0, /* tp_clear */
2602 2602 0, /* tp_richcompare */
2603 2603 0, /* tp_weaklistoffset */
2604 2604 0, /* tp_iter */
2605 2605 0, /* tp_iternext */
2606 2606 index_methods, /* tp_methods */
2607 2607 0, /* tp_members */
2608 2608 index_getset, /* tp_getset */
2609 2609 0, /* tp_base */
2610 2610 0, /* tp_dict */
2611 2611 0, /* tp_descr_get */
2612 2612 0, /* tp_descr_set */
2613 2613 0, /* tp_dictoffset */
2614 2614 (initproc)index_init, /* tp_init */
2615 2615 0, /* tp_alloc */
2616 2616 };
2617 2617
2618 2618 /*
2619 2619 * returns a tuple of the form (index, index, cache) with elements as
2620 2620 * follows:
2621 2621 *
2622 2622 * index: an index object that lazily parses RevlogNG records
2623 2623 * cache: if data is inlined, a tuple (0, index_file_content), else None
2624 2624 * index_file_content could be a string, or a buffer
2625 2625 *
2626 2626 * added complications are for backwards compatibility
2627 2627 */
2628 2628 PyObject *parse_index2(PyObject *self, PyObject *args)
2629 2629 {
2630 2630 PyObject *tuple = NULL, *cache = NULL;
2631 2631 indexObject *idx;
2632 2632 int ret;
2633 2633
2634 2634 idx = PyObject_New(indexObject, &indexType);
2635 2635 if (idx == NULL)
2636 2636 goto bail;
2637 2637
2638 2638 ret = index_init(idx, args);
2639 2639 if (ret == -1)
2640 2640 goto bail;
2641 2641
2642 2642 if (idx->inlined) {
2643 2643 cache = Py_BuildValue("iO", 0, idx->data);
2644 2644 if (cache == NULL)
2645 2645 goto bail;
2646 2646 } else {
2647 2647 cache = Py_None;
2648 2648 Py_INCREF(cache);
2649 2649 }
2650 2650
2651 2651 tuple = Py_BuildValue("NN", idx, cache);
2652 2652 if (!tuple)
2653 2653 goto bail;
2654 2654 return tuple;
2655 2655
2656 2656 bail:
2657 2657 Py_XDECREF(idx);
2658 2658 Py_XDECREF(cache);
2659 2659 Py_XDECREF(tuple);
2660 2660 return NULL;
2661 2661 }
2662 2662
2663 2663 #ifdef WITH_RUST
2664 2664
2665 2665 /* rustlazyancestors: iteration over ancestors implemented in Rust
2666 2666 *
2667 2667 * This class holds a reference to an index and to the Rust iterator.
2668 2668 */
2669 2669 typedef struct rustlazyancestorsObjectStruct rustlazyancestorsObject;
2670 2670
2671 2671 struct rustlazyancestorsObjectStruct {
2672 2672 PyObject_HEAD
2673 2673 /* Type-specific fields go here. */
2674 2674 indexObject *index; /* Ref kept to avoid GC'ing the index */
2675 2675 void *iter; /* Rust iterator */
2676 2676 };
2677 2677
2678 2678 /* FFI exposed from Rust code */
2679 2679 rustlazyancestorsObject *
2680 2680 rustlazyancestors_init(indexObject *index,
2681 2681 /* to pass index_get_parents() */
2682 2682 int (*)(indexObject *, Py_ssize_t, int *, int),
2683 2683 /* intrevs vector */
2684 2684 Py_ssize_t initrevslen, long *initrevs, long stoprev,
2685 2685 int inclusive);
2686 2686 void rustlazyancestors_drop(rustlazyancestorsObject *self);
2687 2687 int rustlazyancestors_next(rustlazyancestorsObject *self);
2688 2688 int rustlazyancestors_contains(rustlazyancestorsObject *self, long rev);
2689 2689
2690 2690 /* CPython instance methods */
2691 2691 static int rustla_init(rustlazyancestorsObject *self, PyObject *args)
2692 2692 {
2693 2693 PyObject *initrevsarg = NULL;
2694 2694 PyObject *inclusivearg = NULL;
2695 2695 long stoprev = 0;
2696 2696 long *initrevs = NULL;
2697 2697 int inclusive = 0;
2698 2698 Py_ssize_t i;
2699 2699
2700 2700 indexObject *index;
2701 2701 if (!PyArg_ParseTuple(args, "O!O!lO!", &indexType, &index, &PyList_Type,
2702 2702 &initrevsarg, &stoprev, &PyBool_Type,
2703 2703 &inclusivearg))
2704 2704 return -1;
2705 2705
2706 2706 Py_INCREF(index);
2707 2707 self->index = index;
2708 2708
2709 2709 if (inclusivearg == Py_True)
2710 2710 inclusive = 1;
2711 2711
2712 2712 Py_ssize_t linit = PyList_GET_SIZE(initrevsarg);
2713 2713
2714 2714 initrevs = (long *)calloc(linit, sizeof(long));
2715 2715
2716 2716 if (initrevs == NULL) {
2717 2717 PyErr_NoMemory();
2718 2718 goto bail;
2719 2719 }
2720 2720
2721 2721 for (i = 0; i < linit; i++) {
2722 2722 initrevs[i] = PyInt_AsLong(PyList_GET_ITEM(initrevsarg, i));
2723 2723 }
2724 2724 if (PyErr_Occurred())
2725 2725 goto bail;
2726 2726
2727 2727 self->iter = rustlazyancestors_init(index, index_get_parents, linit,
2728 2728 initrevs, stoprev, inclusive);
2729 2729 if (self->iter == NULL) {
2730 2730 /* if this is because of GraphError::ParentOutOfRange
2731 2731 * index_get_parents() has already set the proper ValueError */
2732 2732 goto bail;
2733 2733 }
2734 2734
2735 2735 free(initrevs);
2736 2736 return 0;
2737 2737
2738 2738 bail:
2739 2739 free(initrevs);
2740 2740 return -1;
2741 2741 };
2742 2742
2743 2743 static void rustla_dealloc(rustlazyancestorsObject *self)
2744 2744 {
2745 2745 Py_XDECREF(self->index);
2746 2746 if (self->iter != NULL) { /* can happen if rustla_init failed */
2747 2747 rustlazyancestors_drop(self->iter);
2748 2748 }
2749 2749 PyObject_Del(self);
2750 2750 }
2751 2751
2752 2752 static PyObject *rustla_next(rustlazyancestorsObject *self)
2753 2753 {
2754 2754 int res = rustlazyancestors_next(self->iter);
2755 2755 if (res == -1) {
2756 2756 /* Setting an explicit exception seems unnecessary
2757 2757 * as examples from Python source code (Objects/rangeobjets.c
2758 2758 * and Modules/_io/stringio.c) seem to demonstrate.
2759 2759 */
2760 2760 return NULL;
2761 2761 }
2762 2762 return PyInt_FromLong(res);
2763 2763 }
2764 2764
2765 2765 static int rustla_contains(rustlazyancestorsObject *self, PyObject *rev)
2766 2766 {
2767 2767 long lrev;
2768 2768 if (!pylong_to_long(rev, &lrev)) {
2769 2769 PyErr_Clear();
2770 2770 return 0;
2771 2771 }
2772 2772 return rustlazyancestors_contains(self->iter, lrev);
2773 2773 }
2774 2774
2775 2775 static PySequenceMethods rustla_sequence_methods = {
2776 2776 0, /* sq_length */
2777 2777 0, /* sq_concat */
2778 2778 0, /* sq_repeat */
2779 2779 0, /* sq_item */
2780 2780 0, /* sq_slice */
2781 2781 0, /* sq_ass_item */
2782 2782 0, /* sq_ass_slice */
2783 2783 (objobjproc)rustla_contains, /* sq_contains */
2784 2784 };
2785 2785
2786 2786 static PyTypeObject rustlazyancestorsType = {
2787 2787 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2788 2788 "parsers.rustlazyancestors", /* tp_name */
2789 2789 sizeof(rustlazyancestorsObject), /* tp_basicsize */
2790 2790 0, /* tp_itemsize */
2791 2791 (destructor)rustla_dealloc, /* tp_dealloc */
2792 2792 0, /* tp_print */
2793 2793 0, /* tp_getattr */
2794 2794 0, /* tp_setattr */
2795 2795 0, /* tp_compare */
2796 2796 0, /* tp_repr */
2797 2797 0, /* tp_as_number */
2798 2798 &rustla_sequence_methods, /* tp_as_sequence */
2799 2799 0, /* tp_as_mapping */
2800 2800 0, /* tp_hash */
2801 2801 0, /* tp_call */
2802 2802 0, /* tp_str */
2803 2803 0, /* tp_getattro */
2804 2804 0, /* tp_setattro */
2805 2805 0, /* tp_as_buffer */
2806 2806 Py_TPFLAGS_DEFAULT, /* tp_flags */
2807 2807 "Iterator over ancestors, implemented in Rust", /* tp_doc */
2808 2808 0, /* tp_traverse */
2809 2809 0, /* tp_clear */
2810 2810 0, /* tp_richcompare */
2811 2811 0, /* tp_weaklistoffset */
2812 2812 0, /* tp_iter */
2813 2813 (iternextfunc)rustla_next, /* tp_iternext */
2814 2814 0, /* tp_methods */
2815 2815 0, /* tp_members */
2816 2816 0, /* tp_getset */
2817 2817 0, /* tp_base */
2818 2818 0, /* tp_dict */
2819 2819 0, /* tp_descr_get */
2820 2820 0, /* tp_descr_set */
2821 2821 0, /* tp_dictoffset */
2822 2822 (initproc)rustla_init, /* tp_init */
2823 2823 0, /* tp_alloc */
2824 2824 };
2825 2825 #endif /* WITH_RUST */
2826 2826
2827 2827 void revlog_module_init(PyObject *mod)
2828 2828 {
2829 2829 indexType.tp_new = PyType_GenericNew;
2830 2830 if (PyType_Ready(&indexType) < 0)
2831 2831 return;
2832 2832 Py_INCREF(&indexType);
2833 2833 PyModule_AddObject(mod, "index", (PyObject *)&indexType);
2834 2834
2835 2835 nodetreeType.tp_new = PyType_GenericNew;
2836 2836 if (PyType_Ready(&nodetreeType) < 0)
2837 2837 return;
2838 2838 Py_INCREF(&nodetreeType);
2839 2839 PyModule_AddObject(mod, "nodetree", (PyObject *)&nodetreeType);
2840 2840
2841 2841 if (!nullentry) {
2842 2842 nullentry = Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0,
2843 2843 0, -1, -1, -1, -1, nullid, 20);
2844 2844 }
2845 2845 if (nullentry)
2846 2846 PyObject_GC_UnTrack(nullentry);
2847 2847
2848 2848 #ifdef WITH_RUST
2849 2849 rustlazyancestorsType.tp_new = PyType_GenericNew;
2850 2850 if (PyType_Ready(&rustlazyancestorsType) < 0)
2851 2851 return;
2852 2852 Py_INCREF(&rustlazyancestorsType);
2853 2853 PyModule_AddObject(mod, "rustlazyancestors",
2854 2854 (PyObject *)&rustlazyancestorsType);
2855 2855 #endif
2856 2856 }
General Comments 0
You need to be logged in to leave comments. Login now