##// END OF EJS Templates
revlog: add a native implementation of issnapshot...
Boris Feld -
r41117:a6556b09 default
parent child Browse files
Show More
@@ -1,2912 +1,2946 b''
1 1 /*
2 2 parsers.c - efficient content parsing
3 3
4 4 Copyright 2008 Matt Mackall <mpm@selenic.com> and others
5 5
6 6 This software may be used and distributed according to the terms of
7 7 the GNU General Public License, incorporated herein by reference.
8 8 */
9 9
10 10 #include <Python.h>
11 11 #include <assert.h>
12 12 #include <ctype.h>
13 13 #include <limits.h>
14 14 #include <stddef.h>
15 15 #include <stdlib.h>
16 16 #include <string.h>
17 17
18 18 #include "bitmanipulation.h"
19 19 #include "charencode.h"
20 20 #include "revlog.h"
21 21 #include "util.h"
22 22
23 23 #ifdef IS_PY3K
24 24 /* The mapping of Python types is meant to be temporary to get Python
25 25 * 3 to compile. We should remove this once Python 3 support is fully
26 26 * supported and proper types are used in the extensions themselves. */
27 27 #define PyInt_Check PyLong_Check
28 28 #define PyInt_FromLong PyLong_FromLong
29 29 #define PyInt_FromSsize_t PyLong_FromSsize_t
30 30 #define PyInt_AsLong PyLong_AsLong
31 31 #endif
32 32
33 33 typedef struct indexObjectStruct indexObject;
34 34
35 35 typedef struct {
36 36 int children[16];
37 37 } nodetreenode;
38 38
39 39 /*
40 40 * A base-16 trie for fast node->rev mapping.
41 41 *
42 42 * Positive value is index of the next node in the trie
43 43 * Negative value is a leaf: -(rev + 2)
44 44 * Zero is empty
45 45 */
46 46 typedef struct {
47 47 indexObject *index;
48 48 nodetreenode *nodes;
49 49 unsigned length; /* # nodes in use */
50 50 unsigned capacity; /* # nodes allocated */
51 51 int depth; /* maximum depth of tree */
52 52 int splits; /* # splits performed */
53 53 } nodetree;
54 54
55 55 typedef struct {
56 56 PyObject_HEAD /* ; */
57 57 nodetree nt;
58 58 } nodetreeObject;
59 59
60 60 /*
61 61 * This class has two behaviors.
62 62 *
63 63 * When used in a list-like way (with integer keys), we decode an
64 64 * entry in a RevlogNG index file on demand. Our last entry is a
65 65 * sentinel, always a nullid. We have limited support for
66 66 * integer-keyed insert and delete, only at elements right before the
67 67 * sentinel.
68 68 *
69 69 * With string keys, we lazily perform a reverse mapping from node to
70 70 * rev, using a base-16 trie.
71 71 */
72 72 struct indexObjectStruct {
73 73 PyObject_HEAD
74 74 /* Type-specific fields go here. */
75 75 PyObject *data; /* raw bytes of index */
76 76 Py_buffer buf; /* buffer of data */
77 77 PyObject **cache; /* cached tuples */
78 78 const char **offsets; /* populated on demand */
79 79 Py_ssize_t raw_length; /* original number of elements */
80 80 Py_ssize_t length; /* current number of elements */
81 81 PyObject *added; /* populated on demand */
82 82 PyObject *headrevs; /* cache, invalidated on changes */
83 83 PyObject *filteredrevs; /* filtered revs set */
84 84 nodetree nt; /* base-16 trie */
85 85 int ntinitialized; /* 0 or 1 */
86 86 int ntrev; /* last rev scanned */
87 87 int ntlookups; /* # lookups */
88 88 int ntmisses; /* # lookups that miss the cache */
89 89 int inlined;
90 90 };
91 91
92 92 static Py_ssize_t index_length(const indexObject *self)
93 93 {
94 94 if (self->added == NULL)
95 95 return self->length;
96 96 return self->length + PyList_GET_SIZE(self->added);
97 97 }
98 98
99 99 static PyObject *nullentry = NULL;
100 100 static const char nullid[20] = {0};
101 101 static const Py_ssize_t nullrev = -1;
102 102
103 103 static Py_ssize_t inline_scan(indexObject *self, const char **offsets);
104 104
105 105 #if LONG_MAX == 0x7fffffffL
106 106 static const char *const tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
107 107 #else
108 108 static const char *const tuple_format = PY23("kiiiiiis#", "kiiiiiiy#");
109 109 #endif
110 110
111 111 /* A RevlogNG v1 index entry is 64 bytes long. */
112 112 static const long v1_hdrsize = 64;
113 113
114 114 static void raise_revlog_error(void)
115 115 {
116 116 PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
117 117
118 118 mod = PyImport_ImportModule("mercurial.error");
119 119 if (mod == NULL) {
120 120 goto cleanup;
121 121 }
122 122
123 123 dict = PyModule_GetDict(mod);
124 124 if (dict == NULL) {
125 125 goto cleanup;
126 126 }
127 127 Py_INCREF(dict);
128 128
129 129 errclass = PyDict_GetItemString(dict, "RevlogError");
130 130 if (errclass == NULL) {
131 131 PyErr_SetString(PyExc_SystemError,
132 132 "could not find RevlogError");
133 133 goto cleanup;
134 134 }
135 135
136 136 /* value of exception is ignored by callers */
137 137 PyErr_SetString(errclass, "RevlogError");
138 138
139 139 cleanup:
140 140 Py_XDECREF(dict);
141 141 Py_XDECREF(mod);
142 142 }
143 143
144 144 /*
145 145 * Return a pointer to the beginning of a RevlogNG record.
146 146 */
147 147 static const char *index_deref(indexObject *self, Py_ssize_t pos)
148 148 {
149 149 if (self->inlined && pos > 0) {
150 150 if (self->offsets == NULL) {
151 151 self->offsets = PyMem_Malloc(self->raw_length *
152 152 sizeof(*self->offsets));
153 153 if (self->offsets == NULL)
154 154 return (const char *)PyErr_NoMemory();
155 155 inline_scan(self, self->offsets);
156 156 }
157 157 return self->offsets[pos];
158 158 }
159 159
160 160 return (const char *)(self->buf.buf) + pos * v1_hdrsize;
161 161 }
162 162
163 163 /*
164 164 * Get parents of the given rev.
165 165 *
166 166 * The specified rev must be valid and must not be nullrev. A returned
167 167 * parent revision may be nullrev, but is guaranteed to be in valid range.
168 168 */
169 169 static inline int index_get_parents(indexObject *self, Py_ssize_t rev, int *ps,
170 170 int maxrev)
171 171 {
172 172 if (rev >= self->length) {
173 173 long tmp;
174 174 PyObject *tuple =
175 175 PyList_GET_ITEM(self->added, rev - self->length);
176 176 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 5), &tmp)) {
177 177 return -1;
178 178 }
179 179 ps[0] = (int)tmp;
180 180 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 6), &tmp)) {
181 181 return -1;
182 182 }
183 183 ps[1] = (int)tmp;
184 184 } else {
185 185 const char *data = index_deref(self, rev);
186 186 ps[0] = getbe32(data + 24);
187 187 ps[1] = getbe32(data + 28);
188 188 }
189 189 /* If index file is corrupted, ps[] may point to invalid revisions. So
190 190 * there is a risk of buffer overflow to trust them unconditionally. */
191 191 if (ps[0] < -1 || ps[0] > maxrev || ps[1] < -1 || ps[1] > maxrev) {
192 192 PyErr_SetString(PyExc_ValueError, "parent out of range");
193 193 return -1;
194 194 }
195 195 return 0;
196 196 }
197 197
198 198 /*
199 199 * Get parents of the given rev.
200 200 *
201 201 * If the specified rev is out of range, IndexError will be raised. If the
202 202 * revlog entry is corrupted, ValueError may be raised.
203 203 *
204 204 * Returns 0 on success or -1 on failure.
205 205 */
206 206 int HgRevlogIndex_GetParents(PyObject *op, int rev, int *ps)
207 207 {
208 208 int tiprev;
209 209 if (!op || !HgRevlogIndex_Check(op) || !ps) {
210 210 PyErr_BadInternalCall();
211 211 return -1;
212 212 }
213 213 tiprev = (int)index_length((indexObject *)op) - 1;
214 214 if (rev < -1 || rev > tiprev) {
215 215 PyErr_Format(PyExc_IndexError, "rev out of range: %d", rev);
216 216 return -1;
217 217 } else if (rev == -1) {
218 218 ps[0] = ps[1] = -1;
219 219 return 0;
220 220 } else {
221 221 return index_get_parents((indexObject *)op, rev, ps, tiprev);
222 222 }
223 223 }
224 224
225 225 static inline int64_t index_get_start(indexObject *self, Py_ssize_t rev)
226 226 {
227 227 uint64_t offset;
228 228 if (rev == nullrev) {
229 229 return 0;
230 230 }
231 231 if (rev >= self->length) {
232 232 PyObject *tuple;
233 233 PyObject *pylong;
234 234 PY_LONG_LONG tmp;
235 235 tuple = PyList_GET_ITEM(self->added, rev - self->length);
236 236 pylong = PyTuple_GET_ITEM(tuple, 0);
237 237 tmp = PyLong_AsLongLong(pylong);
238 238 if (tmp == -1 && PyErr_Occurred()) {
239 239 return -1;
240 240 }
241 241 if (tmp < 0) {
242 242 PyErr_Format(PyExc_OverflowError,
243 243 "revlog entry size out of bound (%lld)",
244 244 (long long)tmp);
245 245 return -1;
246 246 }
247 247 offset = (uint64_t)tmp;
248 248 } else {
249 249 const char *data = index_deref(self, rev);
250 250 offset = getbe32(data + 4);
251 251 if (rev == 0) {
252 252 /* mask out version number for the first entry */
253 253 offset &= 0xFFFF;
254 254 } else {
255 255 uint32_t offset_high = getbe32(data);
256 256 offset |= ((uint64_t)offset_high) << 32;
257 257 }
258 258 }
259 259 return (int64_t)(offset >> 16);
260 260 }
261 261
262 262 static inline int index_get_length(indexObject *self, Py_ssize_t rev)
263 263 {
264 264 if (rev == nullrev) {
265 265 return 0;
266 266 }
267 267 if (rev >= self->length) {
268 268 PyObject *tuple;
269 269 PyObject *pylong;
270 270 long ret;
271 271 tuple = PyList_GET_ITEM(self->added, rev - self->length);
272 272 pylong = PyTuple_GET_ITEM(tuple, 1);
273 273 ret = PyInt_AsLong(pylong);
274 274 if (ret == -1 && PyErr_Occurred()) {
275 275 return -1;
276 276 }
277 277 if (ret < 0 || ret > (long)INT_MAX) {
278 278 PyErr_Format(PyExc_OverflowError,
279 279 "revlog entry size out of bound (%ld)",
280 280 ret);
281 281 return -1;
282 282 }
283 283 return (int)ret;
284 284 } else {
285 285 const char *data = index_deref(self, rev);
286 286 int tmp = (int)getbe32(data + 8);
287 287 if (tmp < 0) {
288 288 PyErr_Format(PyExc_OverflowError,
289 289 "revlog entry size out of bound (%d)",
290 290 tmp);
291 291 return -1;
292 292 }
293 293 return tmp;
294 294 }
295 295 }
296 296
297 297 /*
298 298 * RevlogNG format (all in big endian, data may be inlined):
299 299 * 6 bytes: offset
300 300 * 2 bytes: flags
301 301 * 4 bytes: compressed length
302 302 * 4 bytes: uncompressed length
303 303 * 4 bytes: base revision
304 304 * 4 bytes: link revision
305 305 * 4 bytes: parent 1 revision
306 306 * 4 bytes: parent 2 revision
307 307 * 32 bytes: nodeid (only 20 bytes used)
308 308 */
309 309 static PyObject *index_get(indexObject *self, Py_ssize_t pos)
310 310 {
311 311 uint64_t offset_flags;
312 312 int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
313 313 const char *c_node_id;
314 314 const char *data;
315 315 Py_ssize_t length = index_length(self);
316 316 PyObject *entry;
317 317
318 318 if (pos == nullrev) {
319 319 Py_INCREF(nullentry);
320 320 return nullentry;
321 321 }
322 322
323 323 if (pos < 0 || pos >= length) {
324 324 PyErr_SetString(PyExc_IndexError, "revlog index out of range");
325 325 return NULL;
326 326 }
327 327
328 328 if (pos >= self->length) {
329 329 PyObject *obj;
330 330 obj = PyList_GET_ITEM(self->added, pos - self->length);
331 331 Py_INCREF(obj);
332 332 return obj;
333 333 }
334 334
335 335 if (self->cache) {
336 336 if (self->cache[pos]) {
337 337 Py_INCREF(self->cache[pos]);
338 338 return self->cache[pos];
339 339 }
340 340 } else {
341 341 self->cache = calloc(self->raw_length, sizeof(PyObject *));
342 342 if (self->cache == NULL)
343 343 return PyErr_NoMemory();
344 344 }
345 345
346 346 data = index_deref(self, pos);
347 347 if (data == NULL)
348 348 return NULL;
349 349
350 350 offset_flags = getbe32(data + 4);
351 351 if (pos == 0) /* mask out version number for the first entry */
352 352 offset_flags &= 0xFFFF;
353 353 else {
354 354 uint32_t offset_high = getbe32(data);
355 355 offset_flags |= ((uint64_t)offset_high) << 32;
356 356 }
357 357
358 358 comp_len = getbe32(data + 8);
359 359 uncomp_len = getbe32(data + 12);
360 360 base_rev = getbe32(data + 16);
361 361 link_rev = getbe32(data + 20);
362 362 parent_1 = getbe32(data + 24);
363 363 parent_2 = getbe32(data + 28);
364 364 c_node_id = data + 32;
365 365
366 366 entry = Py_BuildValue(tuple_format, offset_flags, comp_len, uncomp_len,
367 367 base_rev, link_rev, parent_1, parent_2, c_node_id,
368 368 20);
369 369
370 370 if (entry) {
371 371 PyObject_GC_UnTrack(entry);
372 372 Py_INCREF(entry);
373 373 }
374 374
375 375 self->cache[pos] = entry;
376 376
377 377 return entry;
378 378 }
379 379
380 380 /*
381 381 * Return the 20-byte SHA of the node corresponding to the given rev.
382 382 */
383 383 static const char *index_node(indexObject *self, Py_ssize_t pos)
384 384 {
385 385 Py_ssize_t length = index_length(self);
386 386 const char *data;
387 387
388 388 if (pos == nullrev)
389 389 return nullid;
390 390
391 391 if (pos >= length)
392 392 return NULL;
393 393
394 394 if (pos >= self->length) {
395 395 PyObject *tuple, *str;
396 396 tuple = PyList_GET_ITEM(self->added, pos - self->length);
397 397 str = PyTuple_GetItem(tuple, 7);
398 398 return str ? PyBytes_AS_STRING(str) : NULL;
399 399 }
400 400
401 401 data = index_deref(self, pos);
402 402 return data ? data + 32 : NULL;
403 403 }
404 404
405 405 /*
406 406 * Return the 20-byte SHA of the node corresponding to the given rev. The
407 407 * rev is assumed to be existing. If not, an exception is set.
408 408 */
409 409 static const char *index_node_existing(indexObject *self, Py_ssize_t pos)
410 410 {
411 411 const char *node = index_node(self, pos);
412 412 if (node == NULL) {
413 413 PyErr_Format(PyExc_IndexError, "could not access rev %d",
414 414 (int)pos);
415 415 }
416 416 return node;
417 417 }
418 418
419 419 static int nt_insert(nodetree *self, const char *node, int rev);
420 420
421 421 static int node_check(PyObject *obj, char **node)
422 422 {
423 423 Py_ssize_t nodelen;
424 424 if (PyBytes_AsStringAndSize(obj, node, &nodelen) == -1)
425 425 return -1;
426 426 if (nodelen == 20)
427 427 return 0;
428 428 PyErr_SetString(PyExc_ValueError, "20-byte hash required");
429 429 return -1;
430 430 }
431 431
432 432 static PyObject *index_append(indexObject *self, PyObject *obj)
433 433 {
434 434 char *node;
435 435 Py_ssize_t len;
436 436
437 437 if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj) != 8) {
438 438 PyErr_SetString(PyExc_TypeError, "8-tuple required");
439 439 return NULL;
440 440 }
441 441
442 442 if (node_check(PyTuple_GET_ITEM(obj, 7), &node) == -1)
443 443 return NULL;
444 444
445 445 len = index_length(self);
446 446
447 447 if (self->added == NULL) {
448 448 self->added = PyList_New(0);
449 449 if (self->added == NULL)
450 450 return NULL;
451 451 }
452 452
453 453 if (PyList_Append(self->added, obj) == -1)
454 454 return NULL;
455 455
456 456 if (self->ntinitialized)
457 457 nt_insert(&self->nt, node, (int)len);
458 458
459 459 Py_CLEAR(self->headrevs);
460 460 Py_RETURN_NONE;
461 461 }
462 462
463 463 static PyObject *index_stats(indexObject *self)
464 464 {
465 465 PyObject *obj = PyDict_New();
466 466 PyObject *s = NULL;
467 467 PyObject *t = NULL;
468 468
469 469 if (obj == NULL)
470 470 return NULL;
471 471
472 472 #define istat(__n, __d) \
473 473 do { \
474 474 s = PyBytes_FromString(__d); \
475 475 t = PyInt_FromSsize_t(self->__n); \
476 476 if (!s || !t) \
477 477 goto bail; \
478 478 if (PyDict_SetItem(obj, s, t) == -1) \
479 479 goto bail; \
480 480 Py_CLEAR(s); \
481 481 Py_CLEAR(t); \
482 482 } while (0)
483 483
484 484 if (self->added) {
485 485 Py_ssize_t len = PyList_GET_SIZE(self->added);
486 486 s = PyBytes_FromString("index entries added");
487 487 t = PyInt_FromSsize_t(len);
488 488 if (!s || !t)
489 489 goto bail;
490 490 if (PyDict_SetItem(obj, s, t) == -1)
491 491 goto bail;
492 492 Py_CLEAR(s);
493 493 Py_CLEAR(t);
494 494 }
495 495
496 496 if (self->raw_length != self->length)
497 497 istat(raw_length, "revs on disk");
498 498 istat(length, "revs in memory");
499 499 istat(ntlookups, "node trie lookups");
500 500 istat(ntmisses, "node trie misses");
501 501 istat(ntrev, "node trie last rev scanned");
502 502 if (self->ntinitialized) {
503 503 istat(nt.capacity, "node trie capacity");
504 504 istat(nt.depth, "node trie depth");
505 505 istat(nt.length, "node trie count");
506 506 istat(nt.splits, "node trie splits");
507 507 }
508 508
509 509 #undef istat
510 510
511 511 return obj;
512 512
513 513 bail:
514 514 Py_XDECREF(obj);
515 515 Py_XDECREF(s);
516 516 Py_XDECREF(t);
517 517 return NULL;
518 518 }
519 519
520 520 /*
521 521 * When we cache a list, we want to be sure the caller can't mutate
522 522 * the cached copy.
523 523 */
524 524 static PyObject *list_copy(PyObject *list)
525 525 {
526 526 Py_ssize_t len = PyList_GET_SIZE(list);
527 527 PyObject *newlist = PyList_New(len);
528 528 Py_ssize_t i;
529 529
530 530 if (newlist == NULL)
531 531 return NULL;
532 532
533 533 for (i = 0; i < len; i++) {
534 534 PyObject *obj = PyList_GET_ITEM(list, i);
535 535 Py_INCREF(obj);
536 536 PyList_SET_ITEM(newlist, i, obj);
537 537 }
538 538
539 539 return newlist;
540 540 }
541 541
542 542 static int check_filter(PyObject *filter, Py_ssize_t arg)
543 543 {
544 544 if (filter) {
545 545 PyObject *arglist, *result;
546 546 int isfiltered;
547 547
548 548 arglist = Py_BuildValue("(n)", arg);
549 549 if (!arglist) {
550 550 return -1;
551 551 }
552 552
553 553 result = PyEval_CallObject(filter, arglist);
554 554 Py_DECREF(arglist);
555 555 if (!result) {
556 556 return -1;
557 557 }
558 558
559 559 /* PyObject_IsTrue returns 1 if true, 0 if false, -1 if error,
560 560 * same as this function, so we can just return it directly.*/
561 561 isfiltered = PyObject_IsTrue(result);
562 562 Py_DECREF(result);
563 563 return isfiltered;
564 564 } else {
565 565 return 0;
566 566 }
567 567 }
568 568
569 569 static Py_ssize_t add_roots_get_min(indexObject *self, PyObject *list,
570 570 Py_ssize_t marker, char *phases)
571 571 {
572 572 PyObject *iter = NULL;
573 573 PyObject *iter_item = NULL;
574 574 Py_ssize_t min_idx = index_length(self) + 2;
575 575 long iter_item_long;
576 576
577 577 if (PyList_GET_SIZE(list) != 0) {
578 578 iter = PyObject_GetIter(list);
579 579 if (iter == NULL)
580 580 return -2;
581 581 while ((iter_item = PyIter_Next(iter))) {
582 582 if (!pylong_to_long(iter_item, &iter_item_long)) {
583 583 Py_DECREF(iter_item);
584 584 return -2;
585 585 }
586 586 Py_DECREF(iter_item);
587 587 if (iter_item_long < min_idx)
588 588 min_idx = iter_item_long;
589 589 phases[iter_item_long] = (char)marker;
590 590 }
591 591 Py_DECREF(iter);
592 592 }
593 593
594 594 return min_idx;
595 595 }
596 596
597 597 static inline void set_phase_from_parents(char *phases, int parent_1,
598 598 int parent_2, Py_ssize_t i)
599 599 {
600 600 if (parent_1 >= 0 && phases[parent_1] > phases[i])
601 601 phases[i] = phases[parent_1];
602 602 if (parent_2 >= 0 && phases[parent_2] > phases[i])
603 603 phases[i] = phases[parent_2];
604 604 }
605 605
606 606 static PyObject *reachableroots2(indexObject *self, PyObject *args)
607 607 {
608 608
609 609 /* Input */
610 610 long minroot;
611 611 PyObject *includepatharg = NULL;
612 612 int includepath = 0;
613 613 /* heads and roots are lists */
614 614 PyObject *heads = NULL;
615 615 PyObject *roots = NULL;
616 616 PyObject *reachable = NULL;
617 617
618 618 PyObject *val;
619 619 Py_ssize_t len = index_length(self);
620 620 long revnum;
621 621 Py_ssize_t k;
622 622 Py_ssize_t i;
623 623 Py_ssize_t l;
624 624 int r;
625 625 int parents[2];
626 626
627 627 /* Internal data structure:
628 628 * tovisit: array of length len+1 (all revs + nullrev), filled upto
629 629 * lentovisit
630 630 *
631 631 * revstates: array of length len+1 (all revs + nullrev) */
632 632 int *tovisit = NULL;
633 633 long lentovisit = 0;
634 634 enum { RS_SEEN = 1, RS_ROOT = 2, RS_REACHABLE = 4 };
635 635 char *revstates = NULL;
636 636
637 637 /* Get arguments */
638 638 if (!PyArg_ParseTuple(args, "lO!O!O!", &minroot, &PyList_Type, &heads,
639 639 &PyList_Type, &roots, &PyBool_Type,
640 640 &includepatharg))
641 641 goto bail;
642 642
643 643 if (includepatharg == Py_True)
644 644 includepath = 1;
645 645
646 646 /* Initialize return set */
647 647 reachable = PyList_New(0);
648 648 if (reachable == NULL)
649 649 goto bail;
650 650
651 651 /* Initialize internal datastructures */
652 652 tovisit = (int *)malloc((len + 1) * sizeof(int));
653 653 if (tovisit == NULL) {
654 654 PyErr_NoMemory();
655 655 goto bail;
656 656 }
657 657
658 658 revstates = (char *)calloc(len + 1, 1);
659 659 if (revstates == NULL) {
660 660 PyErr_NoMemory();
661 661 goto bail;
662 662 }
663 663
664 664 l = PyList_GET_SIZE(roots);
665 665 for (i = 0; i < l; i++) {
666 666 revnum = PyInt_AsLong(PyList_GET_ITEM(roots, i));
667 667 if (revnum == -1 && PyErr_Occurred())
668 668 goto bail;
669 669 /* If root is out of range, e.g. wdir(), it must be unreachable
670 670 * from heads. So we can just ignore it. */
671 671 if (revnum + 1 < 0 || revnum + 1 >= len + 1)
672 672 continue;
673 673 revstates[revnum + 1] |= RS_ROOT;
674 674 }
675 675
676 676 /* Populate tovisit with all the heads */
677 677 l = PyList_GET_SIZE(heads);
678 678 for (i = 0; i < l; i++) {
679 679 revnum = PyInt_AsLong(PyList_GET_ITEM(heads, i));
680 680 if (revnum == -1 && PyErr_Occurred())
681 681 goto bail;
682 682 if (revnum + 1 < 0 || revnum + 1 >= len + 1) {
683 683 PyErr_SetString(PyExc_IndexError, "head out of range");
684 684 goto bail;
685 685 }
686 686 if (!(revstates[revnum + 1] & RS_SEEN)) {
687 687 tovisit[lentovisit++] = (int)revnum;
688 688 revstates[revnum + 1] |= RS_SEEN;
689 689 }
690 690 }
691 691
692 692 /* Visit the tovisit list and find the reachable roots */
693 693 k = 0;
694 694 while (k < lentovisit) {
695 695 /* Add the node to reachable if it is a root*/
696 696 revnum = tovisit[k++];
697 697 if (revstates[revnum + 1] & RS_ROOT) {
698 698 revstates[revnum + 1] |= RS_REACHABLE;
699 699 val = PyInt_FromLong(revnum);
700 700 if (val == NULL)
701 701 goto bail;
702 702 r = PyList_Append(reachable, val);
703 703 Py_DECREF(val);
704 704 if (r < 0)
705 705 goto bail;
706 706 if (includepath == 0)
707 707 continue;
708 708 }
709 709
710 710 /* Add its parents to the list of nodes to visit */
711 711 if (revnum == nullrev)
712 712 continue;
713 713 r = index_get_parents(self, revnum, parents, (int)len - 1);
714 714 if (r < 0)
715 715 goto bail;
716 716 for (i = 0; i < 2; i++) {
717 717 if (!(revstates[parents[i] + 1] & RS_SEEN) &&
718 718 parents[i] >= minroot) {
719 719 tovisit[lentovisit++] = parents[i];
720 720 revstates[parents[i] + 1] |= RS_SEEN;
721 721 }
722 722 }
723 723 }
724 724
725 725 /* Find all the nodes in between the roots we found and the heads
726 726 * and add them to the reachable set */
727 727 if (includepath == 1) {
728 728 long minidx = minroot;
729 729 if (minidx < 0)
730 730 minidx = 0;
731 731 for (i = minidx; i < len; i++) {
732 732 if (!(revstates[i + 1] & RS_SEEN))
733 733 continue;
734 734 r = index_get_parents(self, i, parents, (int)len - 1);
735 735 /* Corrupted index file, error is set from
736 736 * index_get_parents */
737 737 if (r < 0)
738 738 goto bail;
739 739 if (((revstates[parents[0] + 1] |
740 740 revstates[parents[1] + 1]) &
741 741 RS_REACHABLE) &&
742 742 !(revstates[i + 1] & RS_REACHABLE)) {
743 743 revstates[i + 1] |= RS_REACHABLE;
744 744 val = PyInt_FromSsize_t(i);
745 745 if (val == NULL)
746 746 goto bail;
747 747 r = PyList_Append(reachable, val);
748 748 Py_DECREF(val);
749 749 if (r < 0)
750 750 goto bail;
751 751 }
752 752 }
753 753 }
754 754
755 755 free(revstates);
756 756 free(tovisit);
757 757 return reachable;
758 758 bail:
759 759 Py_XDECREF(reachable);
760 760 free(revstates);
761 761 free(tovisit);
762 762 return NULL;
763 763 }
764 764
765 765 static PyObject *compute_phases_map_sets(indexObject *self, PyObject *args)
766 766 {
767 767 PyObject *roots = Py_None;
768 768 PyObject *ret = NULL;
769 769 PyObject *phasessize = NULL;
770 770 PyObject *phaseroots = NULL;
771 771 PyObject *phaseset = NULL;
772 772 PyObject *phasessetlist = NULL;
773 773 PyObject *rev = NULL;
774 774 Py_ssize_t len = index_length(self);
775 775 Py_ssize_t numphase = 0;
776 776 Py_ssize_t minrevallphases = 0;
777 777 Py_ssize_t minrevphase = 0;
778 778 Py_ssize_t i = 0;
779 779 char *phases = NULL;
780 780 long phase;
781 781
782 782 if (!PyArg_ParseTuple(args, "O", &roots))
783 783 goto done;
784 784 if (roots == NULL || !PyList_Check(roots)) {
785 785 PyErr_SetString(PyExc_TypeError, "roots must be a list");
786 786 goto done;
787 787 }
788 788
789 789 phases = calloc(
790 790 len, 1); /* phase per rev: {0: public, 1: draft, 2: secret} */
791 791 if (phases == NULL) {
792 792 PyErr_NoMemory();
793 793 goto done;
794 794 }
795 795 /* Put the phase information of all the roots in phases */
796 796 numphase = PyList_GET_SIZE(roots) + 1;
797 797 minrevallphases = len + 1;
798 798 phasessetlist = PyList_New(numphase);
799 799 if (phasessetlist == NULL)
800 800 goto done;
801 801
802 802 PyList_SET_ITEM(phasessetlist, 0, Py_None);
803 803 Py_INCREF(Py_None);
804 804
805 805 for (i = 0; i < numphase - 1; i++) {
806 806 phaseroots = PyList_GET_ITEM(roots, i);
807 807 phaseset = PySet_New(NULL);
808 808 if (phaseset == NULL)
809 809 goto release;
810 810 PyList_SET_ITEM(phasessetlist, i + 1, phaseset);
811 811 if (!PyList_Check(phaseroots)) {
812 812 PyErr_SetString(PyExc_TypeError,
813 813 "roots item must be a list");
814 814 goto release;
815 815 }
816 816 minrevphase =
817 817 add_roots_get_min(self, phaseroots, i + 1, phases);
818 818 if (minrevphase == -2) /* Error from add_roots_get_min */
819 819 goto release;
820 820 minrevallphases = MIN(minrevallphases, minrevphase);
821 821 }
822 822 /* Propagate the phase information from the roots to the revs */
823 823 if (minrevallphases != -1) {
824 824 int parents[2];
825 825 for (i = minrevallphases; i < len; i++) {
826 826 if (index_get_parents(self, i, parents, (int)len - 1) <
827 827 0)
828 828 goto release;
829 829 set_phase_from_parents(phases, parents[0], parents[1],
830 830 i);
831 831 }
832 832 }
833 833 /* Transform phase list to a python list */
834 834 phasessize = PyInt_FromSsize_t(len);
835 835 if (phasessize == NULL)
836 836 goto release;
837 837 for (i = 0; i < len; i++) {
838 838 phase = phases[i];
839 839 /* We only store the sets of phase for non public phase, the
840 840 * public phase is computed as a difference */
841 841 if (phase != 0) {
842 842 phaseset = PyList_GET_ITEM(phasessetlist, phase);
843 843 rev = PyInt_FromSsize_t(i);
844 844 if (rev == NULL)
845 845 goto release;
846 846 PySet_Add(phaseset, rev);
847 847 Py_XDECREF(rev);
848 848 }
849 849 }
850 850 ret = PyTuple_Pack(2, phasessize, phasessetlist);
851 851
852 852 release:
853 853 Py_XDECREF(phasessize);
854 854 Py_XDECREF(phasessetlist);
855 855 done:
856 856 free(phases);
857 857 return ret;
858 858 }
859 859
860 860 static PyObject *index_headrevs(indexObject *self, PyObject *args)
861 861 {
862 862 Py_ssize_t i, j, len;
863 863 char *nothead = NULL;
864 864 PyObject *heads = NULL;
865 865 PyObject *filter = NULL;
866 866 PyObject *filteredrevs = Py_None;
867 867
868 868 if (!PyArg_ParseTuple(args, "|O", &filteredrevs)) {
869 869 return NULL;
870 870 }
871 871
872 872 if (self->headrevs && filteredrevs == self->filteredrevs)
873 873 return list_copy(self->headrevs);
874 874
875 875 Py_DECREF(self->filteredrevs);
876 876 self->filteredrevs = filteredrevs;
877 877 Py_INCREF(filteredrevs);
878 878
879 879 if (filteredrevs != Py_None) {
880 880 filter = PyObject_GetAttrString(filteredrevs, "__contains__");
881 881 if (!filter) {
882 882 PyErr_SetString(
883 883 PyExc_TypeError,
884 884 "filteredrevs has no attribute __contains__");
885 885 goto bail;
886 886 }
887 887 }
888 888
889 889 len = index_length(self);
890 890 heads = PyList_New(0);
891 891 if (heads == NULL)
892 892 goto bail;
893 893 if (len == 0) {
894 894 PyObject *nullid = PyInt_FromLong(-1);
895 895 if (nullid == NULL || PyList_Append(heads, nullid) == -1) {
896 896 Py_XDECREF(nullid);
897 897 goto bail;
898 898 }
899 899 goto done;
900 900 }
901 901
902 902 nothead = calloc(len, 1);
903 903 if (nothead == NULL) {
904 904 PyErr_NoMemory();
905 905 goto bail;
906 906 }
907 907
908 908 for (i = len - 1; i >= 0; i--) {
909 909 int isfiltered;
910 910 int parents[2];
911 911
912 912 /* If nothead[i] == 1, it means we've seen an unfiltered child
913 913 * of this node already, and therefore this node is not
914 914 * filtered. So we can skip the expensive check_filter step.
915 915 */
916 916 if (nothead[i] != 1) {
917 917 isfiltered = check_filter(filter, i);
918 918 if (isfiltered == -1) {
919 919 PyErr_SetString(PyExc_TypeError,
920 920 "unable to check filter");
921 921 goto bail;
922 922 }
923 923
924 924 if (isfiltered) {
925 925 nothead[i] = 1;
926 926 continue;
927 927 }
928 928 }
929 929
930 930 if (index_get_parents(self, i, parents, (int)len - 1) < 0)
931 931 goto bail;
932 932 for (j = 0; j < 2; j++) {
933 933 if (parents[j] >= 0)
934 934 nothead[parents[j]] = 1;
935 935 }
936 936 }
937 937
938 938 for (i = 0; i < len; i++) {
939 939 PyObject *head;
940 940
941 941 if (nothead[i])
942 942 continue;
943 943 head = PyInt_FromSsize_t(i);
944 944 if (head == NULL || PyList_Append(heads, head) == -1) {
945 945 Py_XDECREF(head);
946 946 goto bail;
947 947 }
948 948 }
949 949
950 950 done:
951 951 self->headrevs = heads;
952 952 Py_XDECREF(filter);
953 953 free(nothead);
954 954 return list_copy(self->headrevs);
955 955 bail:
956 956 Py_XDECREF(filter);
957 957 Py_XDECREF(heads);
958 958 free(nothead);
959 959 return NULL;
960 960 }
961 961
962 962 /**
963 963 * Obtain the base revision index entry.
964 964 *
965 965 * Callers must ensure that rev >= 0 or illegal memory access may occur.
966 966 */
967 967 static inline int index_baserev(indexObject *self, int rev)
968 968 {
969 969 const char *data;
970 970 int result;
971 971
972 972 if (rev >= self->length) {
973 973 PyObject *tuple =
974 974 PyList_GET_ITEM(self->added, rev - self->length);
975 975 long ret;
976 976 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 3), &ret)) {
977 977 return -2;
978 978 }
979 979 result = (int)ret;
980 980 } else {
981 981 data = index_deref(self, rev);
982 982 if (data == NULL) {
983 983 return -2;
984 984 }
985 985
986 986 result = getbe32(data + 16);
987 987 }
988 988 if (result > rev) {
989 989 PyErr_Format(
990 990 PyExc_ValueError,
991 991 "corrupted revlog, revision base above revision: %d, %d",
992 992 rev, result);
993 993 return -2;
994 994 }
995 995 return result;
996 996 }
997 997
998 /**
999 * Find if a revision is a snapshot or not
1000 *
1001 * Only relevant for sparse-revlog case.
1002 * Callers must ensure that rev is in a valid range.
1003 */
1004 static int index_issnapshotrev(indexObject *self, Py_ssize_t rev)
1005 {
1006 int ps[2];
1007 Py_ssize_t base;
1008 while (rev >= 0) {
1009 base = (Py_ssize_t)index_baserev(self, rev);
1010 if (base == rev) {
1011 base = -1;
1012 }
1013 if (base == -2) {
1014 assert(PyErr_Occurred());
1015 return -1;
1016 }
1017 if (base == -1) {
1018 return 1;
1019 }
1020 if (index_get_parents(self, rev, ps, (int)rev) < 0) {
1021 assert(PyErr_Occurred());
1022 return -1;
1023 };
1024 if (base == ps[0] || base == ps[1]) {
1025 return 0;
1026 }
1027 rev = base;
1028 }
1029 return rev == -1;
1030 }
1031
998 1032 static PyObject *index_deltachain(indexObject *self, PyObject *args)
999 1033 {
1000 1034 int rev, generaldelta;
1001 1035 PyObject *stoparg;
1002 1036 int stoprev, iterrev, baserev = -1;
1003 1037 int stopped;
1004 1038 PyObject *chain = NULL, *result = NULL;
1005 1039 const Py_ssize_t length = index_length(self);
1006 1040
1007 1041 if (!PyArg_ParseTuple(args, "iOi", &rev, &stoparg, &generaldelta)) {
1008 1042 return NULL;
1009 1043 }
1010 1044
1011 1045 if (PyInt_Check(stoparg)) {
1012 1046 stoprev = (int)PyInt_AsLong(stoparg);
1013 1047 if (stoprev == -1 && PyErr_Occurred()) {
1014 1048 return NULL;
1015 1049 }
1016 1050 } else if (stoparg == Py_None) {
1017 1051 stoprev = -2;
1018 1052 } else {
1019 1053 PyErr_SetString(PyExc_ValueError,
1020 1054 "stoprev must be integer or None");
1021 1055 return NULL;
1022 1056 }
1023 1057
1024 1058 if (rev < 0 || rev >= length) {
1025 1059 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
1026 1060 return NULL;
1027 1061 }
1028 1062
1029 1063 chain = PyList_New(0);
1030 1064 if (chain == NULL) {
1031 1065 return NULL;
1032 1066 }
1033 1067
1034 1068 baserev = index_baserev(self, rev);
1035 1069
1036 1070 /* This should never happen. */
1037 1071 if (baserev <= -2) {
1038 1072 /* Error should be set by index_deref() */
1039 1073 assert(PyErr_Occurred());
1040 1074 goto bail;
1041 1075 }
1042 1076
1043 1077 iterrev = rev;
1044 1078
1045 1079 while (iterrev != baserev && iterrev != stoprev) {
1046 1080 PyObject *value = PyInt_FromLong(iterrev);
1047 1081 if (value == NULL) {
1048 1082 goto bail;
1049 1083 }
1050 1084 if (PyList_Append(chain, value)) {
1051 1085 Py_DECREF(value);
1052 1086 goto bail;
1053 1087 }
1054 1088 Py_DECREF(value);
1055 1089
1056 1090 if (generaldelta) {
1057 1091 iterrev = baserev;
1058 1092 } else {
1059 1093 iterrev--;
1060 1094 }
1061 1095
1062 1096 if (iterrev < 0) {
1063 1097 break;
1064 1098 }
1065 1099
1066 1100 if (iterrev >= length) {
1067 1101 PyErr_SetString(PyExc_IndexError,
1068 1102 "revision outside index");
1069 1103 return NULL;
1070 1104 }
1071 1105
1072 1106 baserev = index_baserev(self, iterrev);
1073 1107
1074 1108 /* This should never happen. */
1075 1109 if (baserev <= -2) {
1076 1110 /* Error should be set by index_deref() */
1077 1111 assert(PyErr_Occurred());
1078 1112 goto bail;
1079 1113 }
1080 1114 }
1081 1115
1082 1116 if (iterrev == stoprev) {
1083 1117 stopped = 1;
1084 1118 } else {
1085 1119 PyObject *value = PyInt_FromLong(iterrev);
1086 1120 if (value == NULL) {
1087 1121 goto bail;
1088 1122 }
1089 1123 if (PyList_Append(chain, value)) {
1090 1124 Py_DECREF(value);
1091 1125 goto bail;
1092 1126 }
1093 1127 Py_DECREF(value);
1094 1128
1095 1129 stopped = 0;
1096 1130 }
1097 1131
1098 1132 if (PyList_Reverse(chain)) {
1099 1133 goto bail;
1100 1134 }
1101 1135
1102 1136 result = Py_BuildValue("OO", chain, stopped ? Py_True : Py_False);
1103 1137 Py_DECREF(chain);
1104 1138 return result;
1105 1139
1106 1140 bail:
1107 1141 Py_DECREF(chain);
1108 1142 return NULL;
1109 1143 }
1110 1144
1111 1145 static inline int64_t
1112 1146 index_segment_span(indexObject *self, Py_ssize_t start_rev, Py_ssize_t end_rev)
1113 1147 {
1114 1148 int64_t start_offset;
1115 1149 int64_t end_offset;
1116 1150 int end_size;
1117 1151 start_offset = index_get_start(self, start_rev);
1118 1152 if (start_offset < 0) {
1119 1153 return -1;
1120 1154 }
1121 1155 end_offset = index_get_start(self, end_rev);
1122 1156 if (end_offset < 0) {
1123 1157 return -1;
1124 1158 }
1125 1159 end_size = index_get_length(self, end_rev);
1126 1160 if (end_size < 0) {
1127 1161 return -1;
1128 1162 }
1129 1163 if (end_offset < start_offset) {
1130 1164 PyErr_Format(PyExc_ValueError,
1131 1165 "corrupted revlog index: inconsistent offset "
1132 1166 "between revisions (%zd) and (%zd)",
1133 1167 start_rev, end_rev);
1134 1168 return -1;
1135 1169 }
1136 1170 return (end_offset - start_offset) + (int64_t)end_size;
1137 1171 }
1138 1172
1139 1173 /* returns endidx so that revs[startidx:endidx] has no empty trailing revs */
1140 1174 static Py_ssize_t trim_endidx(indexObject *self, const Py_ssize_t *revs,
1141 1175 Py_ssize_t startidx, Py_ssize_t endidx)
1142 1176 {
1143 1177 int length;
1144 1178 while (endidx > 1 && endidx > startidx) {
1145 1179 length = index_get_length(self, revs[endidx - 1]);
1146 1180 if (length < 0) {
1147 1181 return -1;
1148 1182 }
1149 1183 if (length != 0) {
1150 1184 break;
1151 1185 }
1152 1186 endidx -= 1;
1153 1187 }
1154 1188 return endidx;
1155 1189 }
1156 1190
1157 1191 struct Gap {
1158 1192 int64_t size;
1159 1193 Py_ssize_t idx;
1160 1194 };
1161 1195
1162 1196 static int gap_compare(const void *left, const void *right)
1163 1197 {
1164 1198 const struct Gap *l_left = ((const struct Gap *)left);
1165 1199 const struct Gap *l_right = ((const struct Gap *)right);
1166 1200 if (l_left->size < l_right->size) {
1167 1201 return -1;
1168 1202 } else if (l_left->size > l_right->size) {
1169 1203 return 1;
1170 1204 }
1171 1205 return 0;
1172 1206 }
1173 1207 static int Py_ssize_t_compare(const void *left, const void *right)
1174 1208 {
1175 1209 const Py_ssize_t l_left = *(const Py_ssize_t *)left;
1176 1210 const Py_ssize_t l_right = *(const Py_ssize_t *)right;
1177 1211 if (l_left < l_right) {
1178 1212 return -1;
1179 1213 } else if (l_left > l_right) {
1180 1214 return 1;
1181 1215 }
1182 1216 return 0;
1183 1217 }
1184 1218
1185 1219 static PyObject *index_slicechunktodensity(indexObject *self, PyObject *args)
1186 1220 {
1187 1221 /* method arguments */
1188 1222 PyObject *list_revs = NULL; /* revisions in the chain */
1189 1223 double targetdensity = 0; /* min density to achieve */
1190 1224 Py_ssize_t mingapsize = 0; /* threshold to ignore gaps */
1191 1225
1192 1226 /* other core variables */
1193 1227 Py_ssize_t idxlen = index_length(self);
1194 1228 Py_ssize_t i; /* used for various iteration */
1195 1229 PyObject *result = NULL; /* the final return of the function */
1196 1230
1197 1231 /* generic information about the delta chain being slice */
1198 1232 Py_ssize_t num_revs = 0; /* size of the full delta chain */
1199 1233 Py_ssize_t *revs = NULL; /* native array of revision in the chain */
1200 1234 int64_t chainpayload = 0; /* sum of all delta in the chain */
1201 1235 int64_t deltachainspan = 0; /* distance from first byte to last byte */
1202 1236
1203 1237 /* variable used for slicing the delta chain */
1204 1238 int64_t readdata = 0; /* amount of data currently planned to be read */
1205 1239 double density = 0; /* ration of payload data compared to read ones */
1206 1240 int64_t previous_end;
1207 1241 struct Gap *gaps = NULL; /* array of notable gap in the chain */
1208 1242 Py_ssize_t num_gaps =
1209 1243 0; /* total number of notable gap recorded so far */
1210 1244 Py_ssize_t *selected_indices = NULL; /* indices of gap skipped over */
1211 1245 Py_ssize_t num_selected = 0; /* number of gaps skipped */
1212 1246 PyObject *chunk = NULL; /* individual slice */
1213 1247 PyObject *allchunks = NULL; /* all slices */
1214 1248 Py_ssize_t previdx;
1215 1249
1216 1250 /* parsing argument */
1217 1251 if (!PyArg_ParseTuple(args, "O!dn", &PyList_Type, &list_revs,
1218 1252 &targetdensity, &mingapsize)) {
1219 1253 goto bail;
1220 1254 }
1221 1255
1222 1256 /* If the delta chain contains a single element, we do not need slicing
1223 1257 */
1224 1258 num_revs = PyList_GET_SIZE(list_revs);
1225 1259 if (num_revs <= 1) {
1226 1260 result = PyTuple_Pack(1, list_revs);
1227 1261 goto done;
1228 1262 }
1229 1263
1230 1264 /* Turn the python list into a native integer array (for efficiency) */
1231 1265 revs = (Py_ssize_t *)calloc(num_revs, sizeof(Py_ssize_t));
1232 1266 if (revs == NULL) {
1233 1267 PyErr_NoMemory();
1234 1268 goto bail;
1235 1269 }
1236 1270 for (i = 0; i < num_revs; i++) {
1237 1271 Py_ssize_t revnum = PyInt_AsLong(PyList_GET_ITEM(list_revs, i));
1238 1272 if (revnum == -1 && PyErr_Occurred()) {
1239 1273 goto bail;
1240 1274 }
1241 1275 if (revnum < nullrev || revnum >= idxlen) {
1242 1276 PyErr_Format(PyExc_IndexError,
1243 1277 "index out of range: %zd", revnum);
1244 1278 goto bail;
1245 1279 }
1246 1280 revs[i] = revnum;
1247 1281 }
1248 1282
1249 1283 /* Compute and check various property of the unsliced delta chain */
1250 1284 deltachainspan = index_segment_span(self, revs[0], revs[num_revs - 1]);
1251 1285 if (deltachainspan < 0) {
1252 1286 goto bail;
1253 1287 }
1254 1288
1255 1289 if (deltachainspan <= mingapsize) {
1256 1290 result = PyTuple_Pack(1, list_revs);
1257 1291 goto done;
1258 1292 }
1259 1293 chainpayload = 0;
1260 1294 for (i = 0; i < num_revs; i++) {
1261 1295 int tmp = index_get_length(self, revs[i]);
1262 1296 if (tmp < 0) {
1263 1297 goto bail;
1264 1298 }
1265 1299 chainpayload += tmp;
1266 1300 }
1267 1301
1268 1302 readdata = deltachainspan;
1269 1303 density = 1.0;
1270 1304
1271 1305 if (0 < deltachainspan) {
1272 1306 density = (double)chainpayload / (double)deltachainspan;
1273 1307 }
1274 1308
1275 1309 if (density >= targetdensity) {
1276 1310 result = PyTuple_Pack(1, list_revs);
1277 1311 goto done;
1278 1312 }
1279 1313
1280 1314 /* if chain is too sparse, look for relevant gaps */
1281 1315 gaps = (struct Gap *)calloc(num_revs, sizeof(struct Gap));
1282 1316 if (gaps == NULL) {
1283 1317 PyErr_NoMemory();
1284 1318 goto bail;
1285 1319 }
1286 1320
1287 1321 previous_end = -1;
1288 1322 for (i = 0; i < num_revs; i++) {
1289 1323 int64_t revstart;
1290 1324 int revsize;
1291 1325 revstart = index_get_start(self, revs[i]);
1292 1326 if (revstart < 0) {
1293 1327 goto bail;
1294 1328 };
1295 1329 revsize = index_get_length(self, revs[i]);
1296 1330 if (revsize < 0) {
1297 1331 goto bail;
1298 1332 };
1299 1333 if (revsize == 0) {
1300 1334 continue;
1301 1335 }
1302 1336 if (previous_end >= 0) {
1303 1337 int64_t gapsize = revstart - previous_end;
1304 1338 if (gapsize > mingapsize) {
1305 1339 gaps[num_gaps].size = gapsize;
1306 1340 gaps[num_gaps].idx = i;
1307 1341 num_gaps += 1;
1308 1342 }
1309 1343 }
1310 1344 previous_end = revstart + revsize;
1311 1345 }
1312 1346 if (num_gaps == 0) {
1313 1347 result = PyTuple_Pack(1, list_revs);
1314 1348 goto done;
1315 1349 }
1316 1350 qsort(gaps, num_gaps, sizeof(struct Gap), &gap_compare);
1317 1351
1318 1352 /* Slice the largest gap first, they improve the density the most */
1319 1353 selected_indices =
1320 1354 (Py_ssize_t *)malloc((num_gaps + 1) * sizeof(Py_ssize_t));
1321 1355 if (selected_indices == NULL) {
1322 1356 PyErr_NoMemory();
1323 1357 goto bail;
1324 1358 }
1325 1359
1326 1360 for (i = num_gaps - 1; i >= 0; i--) {
1327 1361 selected_indices[num_selected] = gaps[i].idx;
1328 1362 readdata -= gaps[i].size;
1329 1363 num_selected += 1;
1330 1364 if (readdata <= 0) {
1331 1365 density = 1.0;
1332 1366 } else {
1333 1367 density = (double)chainpayload / (double)readdata;
1334 1368 }
1335 1369 if (density >= targetdensity) {
1336 1370 break;
1337 1371 }
1338 1372 }
1339 1373 qsort(selected_indices, num_selected, sizeof(Py_ssize_t),
1340 1374 &Py_ssize_t_compare);
1341 1375
1342 1376 /* create the resulting slice */
1343 1377 allchunks = PyList_New(0);
1344 1378 if (allchunks == NULL) {
1345 1379 goto bail;
1346 1380 }
1347 1381 previdx = 0;
1348 1382 selected_indices[num_selected] = num_revs;
1349 1383 for (i = 0; i <= num_selected; i++) {
1350 1384 Py_ssize_t idx = selected_indices[i];
1351 1385 Py_ssize_t endidx = trim_endidx(self, revs, previdx, idx);
1352 1386 if (endidx < 0) {
1353 1387 goto bail;
1354 1388 }
1355 1389 if (previdx < endidx) {
1356 1390 chunk = PyList_GetSlice(list_revs, previdx, endidx);
1357 1391 if (chunk == NULL) {
1358 1392 goto bail;
1359 1393 }
1360 1394 if (PyList_Append(allchunks, chunk) == -1) {
1361 1395 goto bail;
1362 1396 }
1363 1397 Py_DECREF(chunk);
1364 1398 chunk = NULL;
1365 1399 }
1366 1400 previdx = idx;
1367 1401 }
1368 1402 result = allchunks;
1369 1403 goto done;
1370 1404
1371 1405 bail:
1372 1406 Py_XDECREF(allchunks);
1373 1407 Py_XDECREF(chunk);
1374 1408 done:
1375 1409 free(revs);
1376 1410 free(gaps);
1377 1411 free(selected_indices);
1378 1412 return result;
1379 1413 }
1380 1414
1381 1415 static inline int nt_level(const char *node, Py_ssize_t level)
1382 1416 {
1383 1417 int v = node[level >> 1];
1384 1418 if (!(level & 1))
1385 1419 v >>= 4;
1386 1420 return v & 0xf;
1387 1421 }
1388 1422
1389 1423 /*
1390 1424 * Return values:
1391 1425 *
1392 1426 * -4: match is ambiguous (multiple candidates)
1393 1427 * -2: not found
1394 1428 * rest: valid rev
1395 1429 */
1396 1430 static int nt_find(nodetree *self, const char *node, Py_ssize_t nodelen,
1397 1431 int hex)
1398 1432 {
1399 1433 int (*getnybble)(const char *, Py_ssize_t) = hex ? hexdigit : nt_level;
1400 1434 int level, maxlevel, off;
1401 1435
1402 1436 if (nodelen == 20 && node[0] == '\0' && memcmp(node, nullid, 20) == 0)
1403 1437 return -1;
1404 1438
1405 1439 if (hex)
1406 1440 maxlevel = nodelen > 40 ? 40 : (int)nodelen;
1407 1441 else
1408 1442 maxlevel = nodelen > 20 ? 40 : ((int)nodelen * 2);
1409 1443
1410 1444 for (level = off = 0; level < maxlevel; level++) {
1411 1445 int k = getnybble(node, level);
1412 1446 nodetreenode *n = &self->nodes[off];
1413 1447 int v = n->children[k];
1414 1448
1415 1449 if (v < 0) {
1416 1450 const char *n;
1417 1451 Py_ssize_t i;
1418 1452
1419 1453 v = -(v + 2);
1420 1454 n = index_node(self->index, v);
1421 1455 if (n == NULL)
1422 1456 return -2;
1423 1457 for (i = level; i < maxlevel; i++)
1424 1458 if (getnybble(node, i) != nt_level(n, i))
1425 1459 return -2;
1426 1460 return v;
1427 1461 }
1428 1462 if (v == 0)
1429 1463 return -2;
1430 1464 off = v;
1431 1465 }
1432 1466 /* multiple matches against an ambiguous prefix */
1433 1467 return -4;
1434 1468 }
1435 1469
1436 1470 static int nt_new(nodetree *self)
1437 1471 {
1438 1472 if (self->length == self->capacity) {
1439 1473 unsigned newcapacity;
1440 1474 nodetreenode *newnodes;
1441 1475 newcapacity = self->capacity * 2;
1442 1476 if (newcapacity >= INT_MAX / sizeof(nodetreenode)) {
1443 1477 PyErr_SetString(PyExc_MemoryError,
1444 1478 "overflow in nt_new");
1445 1479 return -1;
1446 1480 }
1447 1481 newnodes =
1448 1482 realloc(self->nodes, newcapacity * sizeof(nodetreenode));
1449 1483 if (newnodes == NULL) {
1450 1484 PyErr_SetString(PyExc_MemoryError, "out of memory");
1451 1485 return -1;
1452 1486 }
1453 1487 self->capacity = newcapacity;
1454 1488 self->nodes = newnodes;
1455 1489 memset(&self->nodes[self->length], 0,
1456 1490 sizeof(nodetreenode) * (self->capacity - self->length));
1457 1491 }
1458 1492 return self->length++;
1459 1493 }
1460 1494
1461 1495 static int nt_insert(nodetree *self, const char *node, int rev)
1462 1496 {
1463 1497 int level = 0;
1464 1498 int off = 0;
1465 1499
1466 1500 while (level < 40) {
1467 1501 int k = nt_level(node, level);
1468 1502 nodetreenode *n;
1469 1503 int v;
1470 1504
1471 1505 n = &self->nodes[off];
1472 1506 v = n->children[k];
1473 1507
1474 1508 if (v == 0) {
1475 1509 n->children[k] = -rev - 2;
1476 1510 return 0;
1477 1511 }
1478 1512 if (v < 0) {
1479 1513 const char *oldnode =
1480 1514 index_node_existing(self->index, -(v + 2));
1481 1515 int noff;
1482 1516
1483 1517 if (oldnode == NULL)
1484 1518 return -1;
1485 1519 if (!memcmp(oldnode, node, 20)) {
1486 1520 n->children[k] = -rev - 2;
1487 1521 return 0;
1488 1522 }
1489 1523 noff = nt_new(self);
1490 1524 if (noff == -1)
1491 1525 return -1;
1492 1526 /* self->nodes may have been changed by realloc */
1493 1527 self->nodes[off].children[k] = noff;
1494 1528 off = noff;
1495 1529 n = &self->nodes[off];
1496 1530 n->children[nt_level(oldnode, ++level)] = v;
1497 1531 if (level > self->depth)
1498 1532 self->depth = level;
1499 1533 self->splits += 1;
1500 1534 } else {
1501 1535 level += 1;
1502 1536 off = v;
1503 1537 }
1504 1538 }
1505 1539
1506 1540 return -1;
1507 1541 }
1508 1542
1509 1543 static PyObject *ntobj_insert(nodetreeObject *self, PyObject *args)
1510 1544 {
1511 1545 Py_ssize_t rev;
1512 1546 const char *node;
1513 1547 Py_ssize_t length;
1514 1548 if (!PyArg_ParseTuple(args, "n", &rev))
1515 1549 return NULL;
1516 1550 length = index_length(self->nt.index);
1517 1551 if (rev < 0 || rev >= length) {
1518 1552 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
1519 1553 return NULL;
1520 1554 }
1521 1555 node = index_node_existing(self->nt.index, rev);
1522 1556 if (nt_insert(&self->nt, node, (int)rev) == -1)
1523 1557 return NULL;
1524 1558 Py_RETURN_NONE;
1525 1559 }
1526 1560
1527 1561 static int nt_delete_node(nodetree *self, const char *node)
1528 1562 {
1529 1563 /* rev==-2 happens to get encoded as 0, which is interpreted as not set
1530 1564 */
1531 1565 return nt_insert(self, node, -2);
1532 1566 }
1533 1567
1534 1568 static int nt_init(nodetree *self, indexObject *index, unsigned capacity)
1535 1569 {
1536 1570 /* Initialize before overflow-checking to avoid nt_dealloc() crash. */
1537 1571 self->nodes = NULL;
1538 1572
1539 1573 self->index = index;
1540 1574 /* The input capacity is in terms of revisions, while the field is in
1541 1575 * terms of nodetree nodes. */
1542 1576 self->capacity = (capacity < 4 ? 4 : capacity / 2);
1543 1577 self->depth = 0;
1544 1578 self->splits = 0;
1545 1579 if ((size_t)self->capacity > INT_MAX / sizeof(nodetreenode)) {
1546 1580 PyErr_SetString(PyExc_ValueError, "overflow in init_nt");
1547 1581 return -1;
1548 1582 }
1549 1583 self->nodes = calloc(self->capacity, sizeof(nodetreenode));
1550 1584 if (self->nodes == NULL) {
1551 1585 PyErr_NoMemory();
1552 1586 return -1;
1553 1587 }
1554 1588 self->length = 1;
1555 1589 return 0;
1556 1590 }
1557 1591
1558 1592 static int ntobj_init(nodetreeObject *self, PyObject *args)
1559 1593 {
1560 1594 PyObject *index;
1561 1595 unsigned capacity;
1562 1596 if (!PyArg_ParseTuple(args, "O!I", &HgRevlogIndex_Type, &index,
1563 1597 &capacity))
1564 1598 return -1;
1565 1599 Py_INCREF(index);
1566 1600 return nt_init(&self->nt, (indexObject *)index, capacity);
1567 1601 }
1568 1602
1569 1603 static int nt_partialmatch(nodetree *self, const char *node, Py_ssize_t nodelen)
1570 1604 {
1571 1605 return nt_find(self, node, nodelen, 1);
1572 1606 }
1573 1607
1574 1608 /*
1575 1609 * Find the length of the shortest unique prefix of node.
1576 1610 *
1577 1611 * Return values:
1578 1612 *
1579 1613 * -3: error (exception set)
1580 1614 * -2: not found (no exception set)
1581 1615 * rest: length of shortest prefix
1582 1616 */
1583 1617 static int nt_shortest(nodetree *self, const char *node)
1584 1618 {
1585 1619 int level, off;
1586 1620
1587 1621 for (level = off = 0; level < 40; level++) {
1588 1622 int k, v;
1589 1623 nodetreenode *n = &self->nodes[off];
1590 1624 k = nt_level(node, level);
1591 1625 v = n->children[k];
1592 1626 if (v < 0) {
1593 1627 const char *n;
1594 1628 v = -(v + 2);
1595 1629 n = index_node_existing(self->index, v);
1596 1630 if (n == NULL)
1597 1631 return -3;
1598 1632 if (memcmp(node, n, 20) != 0)
1599 1633 /*
1600 1634 * Found a unique prefix, but it wasn't for the
1601 1635 * requested node (i.e the requested node does
1602 1636 * not exist).
1603 1637 */
1604 1638 return -2;
1605 1639 return level + 1;
1606 1640 }
1607 1641 if (v == 0)
1608 1642 return -2;
1609 1643 off = v;
1610 1644 }
1611 1645 /*
1612 1646 * The node was still not unique after 40 hex digits, so this won't
1613 1647 * happen. Also, if we get here, then there's a programming error in
1614 1648 * this file that made us insert a node longer than 40 hex digits.
1615 1649 */
1616 1650 PyErr_SetString(PyExc_Exception, "broken node tree");
1617 1651 return -3;
1618 1652 }
1619 1653
1620 1654 static PyObject *ntobj_shortest(nodetreeObject *self, PyObject *args)
1621 1655 {
1622 1656 PyObject *val;
1623 1657 char *node;
1624 1658 int length;
1625 1659
1626 1660 if (!PyArg_ParseTuple(args, "O", &val))
1627 1661 return NULL;
1628 1662 if (node_check(val, &node) == -1)
1629 1663 return NULL;
1630 1664
1631 1665 length = nt_shortest(&self->nt, node);
1632 1666 if (length == -3)
1633 1667 return NULL;
1634 1668 if (length == -2) {
1635 1669 raise_revlog_error();
1636 1670 return NULL;
1637 1671 }
1638 1672 return PyInt_FromLong(length);
1639 1673 }
1640 1674
1641 1675 static void nt_dealloc(nodetree *self)
1642 1676 {
1643 1677 free(self->nodes);
1644 1678 self->nodes = NULL;
1645 1679 }
1646 1680
1647 1681 static void ntobj_dealloc(nodetreeObject *self)
1648 1682 {
1649 1683 Py_XDECREF(self->nt.index);
1650 1684 nt_dealloc(&self->nt);
1651 1685 PyObject_Del(self);
1652 1686 }
1653 1687
1654 1688 static PyMethodDef ntobj_methods[] = {
1655 1689 {"insert", (PyCFunction)ntobj_insert, METH_VARARGS,
1656 1690 "insert an index entry"},
1657 1691 {"shortest", (PyCFunction)ntobj_shortest, METH_VARARGS,
1658 1692 "find length of shortest hex nodeid of a binary ID"},
1659 1693 {NULL} /* Sentinel */
1660 1694 };
1661 1695
1662 1696 static PyTypeObject nodetreeType = {
1663 1697 PyVarObject_HEAD_INIT(NULL, 0) /* header */
1664 1698 "parsers.nodetree", /* tp_name */
1665 1699 sizeof(nodetreeObject), /* tp_basicsize */
1666 1700 0, /* tp_itemsize */
1667 1701 (destructor)ntobj_dealloc, /* tp_dealloc */
1668 1702 0, /* tp_print */
1669 1703 0, /* tp_getattr */
1670 1704 0, /* tp_setattr */
1671 1705 0, /* tp_compare */
1672 1706 0, /* tp_repr */
1673 1707 0, /* tp_as_number */
1674 1708 0, /* tp_as_sequence */
1675 1709 0, /* tp_as_mapping */
1676 1710 0, /* tp_hash */
1677 1711 0, /* tp_call */
1678 1712 0, /* tp_str */
1679 1713 0, /* tp_getattro */
1680 1714 0, /* tp_setattro */
1681 1715 0, /* tp_as_buffer */
1682 1716 Py_TPFLAGS_DEFAULT, /* tp_flags */
1683 1717 "nodetree", /* tp_doc */
1684 1718 0, /* tp_traverse */
1685 1719 0, /* tp_clear */
1686 1720 0, /* tp_richcompare */
1687 1721 0, /* tp_weaklistoffset */
1688 1722 0, /* tp_iter */
1689 1723 0, /* tp_iternext */
1690 1724 ntobj_methods, /* tp_methods */
1691 1725 0, /* tp_members */
1692 1726 0, /* tp_getset */
1693 1727 0, /* tp_base */
1694 1728 0, /* tp_dict */
1695 1729 0, /* tp_descr_get */
1696 1730 0, /* tp_descr_set */
1697 1731 0, /* tp_dictoffset */
1698 1732 (initproc)ntobj_init, /* tp_init */
1699 1733 0, /* tp_alloc */
1700 1734 };
1701 1735
1702 1736 static int index_init_nt(indexObject *self)
1703 1737 {
1704 1738 if (!self->ntinitialized) {
1705 1739 if (nt_init(&self->nt, self, (int)self->raw_length) == -1) {
1706 1740 nt_dealloc(&self->nt);
1707 1741 return -1;
1708 1742 }
1709 1743 if (nt_insert(&self->nt, nullid, -1) == -1) {
1710 1744 nt_dealloc(&self->nt);
1711 1745 return -1;
1712 1746 }
1713 1747 self->ntinitialized = 1;
1714 1748 self->ntrev = (int)index_length(self);
1715 1749 self->ntlookups = 1;
1716 1750 self->ntmisses = 0;
1717 1751 }
1718 1752 return 0;
1719 1753 }
1720 1754
1721 1755 /*
1722 1756 * Return values:
1723 1757 *
1724 1758 * -3: error (exception set)
1725 1759 * -2: not found (no exception set)
1726 1760 * rest: valid rev
1727 1761 */
1728 1762 static int index_find_node(indexObject *self, const char *node,
1729 1763 Py_ssize_t nodelen)
1730 1764 {
1731 1765 int rev;
1732 1766
1733 1767 if (index_init_nt(self) == -1)
1734 1768 return -3;
1735 1769
1736 1770 self->ntlookups++;
1737 1771 rev = nt_find(&self->nt, node, nodelen, 0);
1738 1772 if (rev >= -1)
1739 1773 return rev;
1740 1774
1741 1775 /*
1742 1776 * For the first handful of lookups, we scan the entire index,
1743 1777 * and cache only the matching nodes. This optimizes for cases
1744 1778 * like "hg tip", where only a few nodes are accessed.
1745 1779 *
1746 1780 * After that, we cache every node we visit, using a single
1747 1781 * scan amortized over multiple lookups. This gives the best
1748 1782 * bulk performance, e.g. for "hg log".
1749 1783 */
1750 1784 if (self->ntmisses++ < 4) {
1751 1785 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1752 1786 const char *n = index_node_existing(self, rev);
1753 1787 if (n == NULL)
1754 1788 return -3;
1755 1789 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1756 1790 if (nt_insert(&self->nt, n, rev) == -1)
1757 1791 return -3;
1758 1792 break;
1759 1793 }
1760 1794 }
1761 1795 } else {
1762 1796 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1763 1797 const char *n = index_node_existing(self, rev);
1764 1798 if (n == NULL)
1765 1799 return -3;
1766 1800 if (nt_insert(&self->nt, n, rev) == -1) {
1767 1801 self->ntrev = rev + 1;
1768 1802 return -3;
1769 1803 }
1770 1804 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1771 1805 break;
1772 1806 }
1773 1807 }
1774 1808 self->ntrev = rev;
1775 1809 }
1776 1810
1777 1811 if (rev >= 0)
1778 1812 return rev;
1779 1813 return -2;
1780 1814 }
1781 1815
1782 1816 static PyObject *index_getitem(indexObject *self, PyObject *value)
1783 1817 {
1784 1818 char *node;
1785 1819 int rev;
1786 1820
1787 1821 if (PyInt_Check(value)) {
1788 1822 long idx;
1789 1823 if (!pylong_to_long(value, &idx)) {
1790 1824 return NULL;
1791 1825 }
1792 1826 return index_get(self, idx);
1793 1827 }
1794 1828
1795 1829 if (node_check(value, &node) == -1)
1796 1830 return NULL;
1797 1831 rev = index_find_node(self, node, 20);
1798 1832 if (rev >= -1)
1799 1833 return PyInt_FromLong(rev);
1800 1834 if (rev == -2)
1801 1835 raise_revlog_error();
1802 1836 return NULL;
1803 1837 }
1804 1838
1805 1839 /*
1806 1840 * Fully populate the radix tree.
1807 1841 */
1808 1842 static int index_populate_nt(indexObject *self)
1809 1843 {
1810 1844 int rev;
1811 1845 if (self->ntrev > 0) {
1812 1846 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1813 1847 const char *n = index_node_existing(self, rev);
1814 1848 if (n == NULL)
1815 1849 return -1;
1816 1850 if (nt_insert(&self->nt, n, rev) == -1)
1817 1851 return -1;
1818 1852 }
1819 1853 self->ntrev = -1;
1820 1854 }
1821 1855 return 0;
1822 1856 }
1823 1857
1824 1858 static PyObject *index_partialmatch(indexObject *self, PyObject *args)
1825 1859 {
1826 1860 const char *fullnode;
1827 1861 int nodelen;
1828 1862 char *node;
1829 1863 int rev, i;
1830 1864
1831 1865 if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &node, &nodelen))
1832 1866 return NULL;
1833 1867
1834 1868 if (nodelen < 1) {
1835 1869 PyErr_SetString(PyExc_ValueError, "key too short");
1836 1870 return NULL;
1837 1871 }
1838 1872
1839 1873 if (nodelen > 40) {
1840 1874 PyErr_SetString(PyExc_ValueError, "key too long");
1841 1875 return NULL;
1842 1876 }
1843 1877
1844 1878 for (i = 0; i < nodelen; i++)
1845 1879 hexdigit(node, i);
1846 1880 if (PyErr_Occurred()) {
1847 1881 /* input contains non-hex characters */
1848 1882 PyErr_Clear();
1849 1883 Py_RETURN_NONE;
1850 1884 }
1851 1885
1852 1886 if (index_init_nt(self) == -1)
1853 1887 return NULL;
1854 1888 if (index_populate_nt(self) == -1)
1855 1889 return NULL;
1856 1890 rev = nt_partialmatch(&self->nt, node, nodelen);
1857 1891
1858 1892 switch (rev) {
1859 1893 case -4:
1860 1894 raise_revlog_error();
1861 1895 return NULL;
1862 1896 case -2:
1863 1897 Py_RETURN_NONE;
1864 1898 case -1:
1865 1899 return PyBytes_FromStringAndSize(nullid, 20);
1866 1900 }
1867 1901
1868 1902 fullnode = index_node_existing(self, rev);
1869 1903 if (fullnode == NULL) {
1870 1904 return NULL;
1871 1905 }
1872 1906 return PyBytes_FromStringAndSize(fullnode, 20);
1873 1907 }
1874 1908
1875 1909 static PyObject *index_shortest(indexObject *self, PyObject *args)
1876 1910 {
1877 1911 PyObject *val;
1878 1912 char *node;
1879 1913 int length;
1880 1914
1881 1915 if (!PyArg_ParseTuple(args, "O", &val))
1882 1916 return NULL;
1883 1917 if (node_check(val, &node) == -1)
1884 1918 return NULL;
1885 1919
1886 1920 self->ntlookups++;
1887 1921 if (index_init_nt(self) == -1)
1888 1922 return NULL;
1889 1923 if (index_populate_nt(self) == -1)
1890 1924 return NULL;
1891 1925 length = nt_shortest(&self->nt, node);
1892 1926 if (length == -3)
1893 1927 return NULL;
1894 1928 if (length == -2) {
1895 1929 raise_revlog_error();
1896 1930 return NULL;
1897 1931 }
1898 1932 return PyInt_FromLong(length);
1899 1933 }
1900 1934
1901 1935 static PyObject *index_m_get(indexObject *self, PyObject *args)
1902 1936 {
1903 1937 PyObject *val;
1904 1938 char *node;
1905 1939 int rev;
1906 1940
1907 1941 if (!PyArg_ParseTuple(args, "O", &val))
1908 1942 return NULL;
1909 1943 if (node_check(val, &node) == -1)
1910 1944 return NULL;
1911 1945 rev = index_find_node(self, node, 20);
1912 1946 if (rev == -3)
1913 1947 return NULL;
1914 1948 if (rev == -2)
1915 1949 Py_RETURN_NONE;
1916 1950 return PyInt_FromLong(rev);
1917 1951 }
1918 1952
1919 1953 static int index_contains(indexObject *self, PyObject *value)
1920 1954 {
1921 1955 char *node;
1922 1956
1923 1957 if (PyInt_Check(value)) {
1924 1958 long rev;
1925 1959 if (!pylong_to_long(value, &rev)) {
1926 1960 return -1;
1927 1961 }
1928 1962 return rev >= -1 && rev < index_length(self);
1929 1963 }
1930 1964
1931 1965 if (node_check(value, &node) == -1)
1932 1966 return -1;
1933 1967
1934 1968 switch (index_find_node(self, node, 20)) {
1935 1969 case -3:
1936 1970 return -1;
1937 1971 case -2:
1938 1972 return 0;
1939 1973 default:
1940 1974 return 1;
1941 1975 }
1942 1976 }
1943 1977
1944 1978 typedef uint64_t bitmask;
1945 1979
1946 1980 /*
1947 1981 * Given a disjoint set of revs, return all candidates for the
1948 1982 * greatest common ancestor. In revset notation, this is the set
1949 1983 * "heads(::a and ::b and ...)"
1950 1984 */
1951 1985 static PyObject *find_gca_candidates(indexObject *self, const int *revs,
1952 1986 int revcount)
1953 1987 {
1954 1988 const bitmask allseen = (1ull << revcount) - 1;
1955 1989 const bitmask poison = 1ull << revcount;
1956 1990 PyObject *gca = PyList_New(0);
1957 1991 int i, v, interesting;
1958 1992 int maxrev = -1;
1959 1993 bitmask sp;
1960 1994 bitmask *seen;
1961 1995
1962 1996 if (gca == NULL)
1963 1997 return PyErr_NoMemory();
1964 1998
1965 1999 for (i = 0; i < revcount; i++) {
1966 2000 if (revs[i] > maxrev)
1967 2001 maxrev = revs[i];
1968 2002 }
1969 2003
1970 2004 seen = calloc(sizeof(*seen), maxrev + 1);
1971 2005 if (seen == NULL) {
1972 2006 Py_DECREF(gca);
1973 2007 return PyErr_NoMemory();
1974 2008 }
1975 2009
1976 2010 for (i = 0; i < revcount; i++)
1977 2011 seen[revs[i]] = 1ull << i;
1978 2012
1979 2013 interesting = revcount;
1980 2014
1981 2015 for (v = maxrev; v >= 0 && interesting; v--) {
1982 2016 bitmask sv = seen[v];
1983 2017 int parents[2];
1984 2018
1985 2019 if (!sv)
1986 2020 continue;
1987 2021
1988 2022 if (sv < poison) {
1989 2023 interesting -= 1;
1990 2024 if (sv == allseen) {
1991 2025 PyObject *obj = PyInt_FromLong(v);
1992 2026 if (obj == NULL)
1993 2027 goto bail;
1994 2028 if (PyList_Append(gca, obj) == -1) {
1995 2029 Py_DECREF(obj);
1996 2030 goto bail;
1997 2031 }
1998 2032 sv |= poison;
1999 2033 for (i = 0; i < revcount; i++) {
2000 2034 if (revs[i] == v)
2001 2035 goto done;
2002 2036 }
2003 2037 }
2004 2038 }
2005 2039 if (index_get_parents(self, v, parents, maxrev) < 0)
2006 2040 goto bail;
2007 2041
2008 2042 for (i = 0; i < 2; i++) {
2009 2043 int p = parents[i];
2010 2044 if (p == -1)
2011 2045 continue;
2012 2046 sp = seen[p];
2013 2047 if (sv < poison) {
2014 2048 if (sp == 0) {
2015 2049 seen[p] = sv;
2016 2050 interesting++;
2017 2051 } else if (sp != sv)
2018 2052 seen[p] |= sv;
2019 2053 } else {
2020 2054 if (sp && sp < poison)
2021 2055 interesting--;
2022 2056 seen[p] = sv;
2023 2057 }
2024 2058 }
2025 2059 }
2026 2060
2027 2061 done:
2028 2062 free(seen);
2029 2063 return gca;
2030 2064 bail:
2031 2065 free(seen);
2032 2066 Py_XDECREF(gca);
2033 2067 return NULL;
2034 2068 }
2035 2069
2036 2070 /*
2037 2071 * Given a disjoint set of revs, return the subset with the longest
2038 2072 * path to the root.
2039 2073 */
2040 2074 static PyObject *find_deepest(indexObject *self, PyObject *revs)
2041 2075 {
2042 2076 const Py_ssize_t revcount = PyList_GET_SIZE(revs);
2043 2077 static const Py_ssize_t capacity = 24;
2044 2078 int *depth, *interesting = NULL;
2045 2079 int i, j, v, ninteresting;
2046 2080 PyObject *dict = NULL, *keys = NULL;
2047 2081 long *seen = NULL;
2048 2082 int maxrev = -1;
2049 2083 long final;
2050 2084
2051 2085 if (revcount > capacity) {
2052 2086 PyErr_Format(PyExc_OverflowError,
2053 2087 "bitset size (%ld) > capacity (%ld)",
2054 2088 (long)revcount, (long)capacity);
2055 2089 return NULL;
2056 2090 }
2057 2091
2058 2092 for (i = 0; i < revcount; i++) {
2059 2093 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
2060 2094 if (n > maxrev)
2061 2095 maxrev = n;
2062 2096 }
2063 2097
2064 2098 depth = calloc(sizeof(*depth), maxrev + 1);
2065 2099 if (depth == NULL)
2066 2100 return PyErr_NoMemory();
2067 2101
2068 2102 seen = calloc(sizeof(*seen), maxrev + 1);
2069 2103 if (seen == NULL) {
2070 2104 PyErr_NoMemory();
2071 2105 goto bail;
2072 2106 }
2073 2107
2074 2108 interesting = calloc(sizeof(*interesting), ((size_t)1) << revcount);
2075 2109 if (interesting == NULL) {
2076 2110 PyErr_NoMemory();
2077 2111 goto bail;
2078 2112 }
2079 2113
2080 2114 if (PyList_Sort(revs) == -1)
2081 2115 goto bail;
2082 2116
2083 2117 for (i = 0; i < revcount; i++) {
2084 2118 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
2085 2119 long b = 1l << i;
2086 2120 depth[n] = 1;
2087 2121 seen[n] = b;
2088 2122 interesting[b] = 1;
2089 2123 }
2090 2124
2091 2125 /* invariant: ninteresting is the number of non-zero entries in
2092 2126 * interesting. */
2093 2127 ninteresting = (int)revcount;
2094 2128
2095 2129 for (v = maxrev; v >= 0 && ninteresting > 1; v--) {
2096 2130 int dv = depth[v];
2097 2131 int parents[2];
2098 2132 long sv;
2099 2133
2100 2134 if (dv == 0)
2101 2135 continue;
2102 2136
2103 2137 sv = seen[v];
2104 2138 if (index_get_parents(self, v, parents, maxrev) < 0)
2105 2139 goto bail;
2106 2140
2107 2141 for (i = 0; i < 2; i++) {
2108 2142 int p = parents[i];
2109 2143 long sp;
2110 2144 int dp;
2111 2145
2112 2146 if (p == -1)
2113 2147 continue;
2114 2148
2115 2149 dp = depth[p];
2116 2150 sp = seen[p];
2117 2151 if (dp <= dv) {
2118 2152 depth[p] = dv + 1;
2119 2153 if (sp != sv) {
2120 2154 interesting[sv] += 1;
2121 2155 seen[p] = sv;
2122 2156 if (sp) {
2123 2157 interesting[sp] -= 1;
2124 2158 if (interesting[sp] == 0)
2125 2159 ninteresting -= 1;
2126 2160 }
2127 2161 }
2128 2162 } else if (dv == dp - 1) {
2129 2163 long nsp = sp | sv;
2130 2164 if (nsp == sp)
2131 2165 continue;
2132 2166 seen[p] = nsp;
2133 2167 interesting[sp] -= 1;
2134 2168 if (interesting[sp] == 0)
2135 2169 ninteresting -= 1;
2136 2170 if (interesting[nsp] == 0)
2137 2171 ninteresting += 1;
2138 2172 interesting[nsp] += 1;
2139 2173 }
2140 2174 }
2141 2175 interesting[sv] -= 1;
2142 2176 if (interesting[sv] == 0)
2143 2177 ninteresting -= 1;
2144 2178 }
2145 2179
2146 2180 final = 0;
2147 2181 j = ninteresting;
2148 2182 for (i = 0; i < (int)(2 << revcount) && j > 0; i++) {
2149 2183 if (interesting[i] == 0)
2150 2184 continue;
2151 2185 final |= i;
2152 2186 j -= 1;
2153 2187 }
2154 2188 if (final == 0) {
2155 2189 keys = PyList_New(0);
2156 2190 goto bail;
2157 2191 }
2158 2192
2159 2193 dict = PyDict_New();
2160 2194 if (dict == NULL)
2161 2195 goto bail;
2162 2196
2163 2197 for (i = 0; i < revcount; i++) {
2164 2198 PyObject *key;
2165 2199
2166 2200 if ((final & (1 << i)) == 0)
2167 2201 continue;
2168 2202
2169 2203 key = PyList_GET_ITEM(revs, i);
2170 2204 Py_INCREF(key);
2171 2205 Py_INCREF(Py_None);
2172 2206 if (PyDict_SetItem(dict, key, Py_None) == -1) {
2173 2207 Py_DECREF(key);
2174 2208 Py_DECREF(Py_None);
2175 2209 goto bail;
2176 2210 }
2177 2211 }
2178 2212
2179 2213 keys = PyDict_Keys(dict);
2180 2214
2181 2215 bail:
2182 2216 free(depth);
2183 2217 free(seen);
2184 2218 free(interesting);
2185 2219 Py_XDECREF(dict);
2186 2220
2187 2221 return keys;
2188 2222 }
2189 2223
2190 2224 /*
2191 2225 * Given a (possibly overlapping) set of revs, return all the
2192 2226 * common ancestors heads: heads(::args[0] and ::a[1] and ...)
2193 2227 */
2194 2228 static PyObject *index_commonancestorsheads(indexObject *self, PyObject *args)
2195 2229 {
2196 2230 PyObject *ret = NULL;
2197 2231 Py_ssize_t argcount, i, len;
2198 2232 bitmask repeat = 0;
2199 2233 int revcount = 0;
2200 2234 int *revs;
2201 2235
2202 2236 argcount = PySequence_Length(args);
2203 2237 revs = PyMem_Malloc(argcount * sizeof(*revs));
2204 2238 if (argcount > 0 && revs == NULL)
2205 2239 return PyErr_NoMemory();
2206 2240 len = index_length(self);
2207 2241
2208 2242 for (i = 0; i < argcount; i++) {
2209 2243 static const int capacity = 24;
2210 2244 PyObject *obj = PySequence_GetItem(args, i);
2211 2245 bitmask x;
2212 2246 long val;
2213 2247
2214 2248 if (!PyInt_Check(obj)) {
2215 2249 PyErr_SetString(PyExc_TypeError,
2216 2250 "arguments must all be ints");
2217 2251 Py_DECREF(obj);
2218 2252 goto bail;
2219 2253 }
2220 2254 val = PyInt_AsLong(obj);
2221 2255 Py_DECREF(obj);
2222 2256 if (val == -1) {
2223 2257 ret = PyList_New(0);
2224 2258 goto done;
2225 2259 }
2226 2260 if (val < 0 || val >= len) {
2227 2261 PyErr_SetString(PyExc_IndexError, "index out of range");
2228 2262 goto bail;
2229 2263 }
2230 2264 /* this cheesy bloom filter lets us avoid some more
2231 2265 * expensive duplicate checks in the common set-is-disjoint
2232 2266 * case */
2233 2267 x = 1ull << (val & 0x3f);
2234 2268 if (repeat & x) {
2235 2269 int k;
2236 2270 for (k = 0; k < revcount; k++) {
2237 2271 if (val == revs[k])
2238 2272 goto duplicate;
2239 2273 }
2240 2274 } else
2241 2275 repeat |= x;
2242 2276 if (revcount >= capacity) {
2243 2277 PyErr_Format(PyExc_OverflowError,
2244 2278 "bitset size (%d) > capacity (%d)",
2245 2279 revcount, capacity);
2246 2280 goto bail;
2247 2281 }
2248 2282 revs[revcount++] = (int)val;
2249 2283 duplicate:;
2250 2284 }
2251 2285
2252 2286 if (revcount == 0) {
2253 2287 ret = PyList_New(0);
2254 2288 goto done;
2255 2289 }
2256 2290 if (revcount == 1) {
2257 2291 PyObject *obj;
2258 2292 ret = PyList_New(1);
2259 2293 if (ret == NULL)
2260 2294 goto bail;
2261 2295 obj = PyInt_FromLong(revs[0]);
2262 2296 if (obj == NULL)
2263 2297 goto bail;
2264 2298 PyList_SET_ITEM(ret, 0, obj);
2265 2299 goto done;
2266 2300 }
2267 2301
2268 2302 ret = find_gca_candidates(self, revs, revcount);
2269 2303 if (ret == NULL)
2270 2304 goto bail;
2271 2305
2272 2306 done:
2273 2307 PyMem_Free(revs);
2274 2308 return ret;
2275 2309
2276 2310 bail:
2277 2311 PyMem_Free(revs);
2278 2312 Py_XDECREF(ret);
2279 2313 return NULL;
2280 2314 }
2281 2315
2282 2316 /*
2283 2317 * Given a (possibly overlapping) set of revs, return the greatest
2284 2318 * common ancestors: those with the longest path to the root.
2285 2319 */
2286 2320 static PyObject *index_ancestors(indexObject *self, PyObject *args)
2287 2321 {
2288 2322 PyObject *ret;
2289 2323 PyObject *gca = index_commonancestorsheads(self, args);
2290 2324 if (gca == NULL)
2291 2325 return NULL;
2292 2326
2293 2327 if (PyList_GET_SIZE(gca) <= 1) {
2294 2328 return gca;
2295 2329 }
2296 2330
2297 2331 ret = find_deepest(self, gca);
2298 2332 Py_DECREF(gca);
2299 2333 return ret;
2300 2334 }
2301 2335
2302 2336 /*
2303 2337 * Invalidate any trie entries introduced by added revs.
2304 2338 */
2305 2339 static void index_invalidate_added(indexObject *self, Py_ssize_t start)
2306 2340 {
2307 2341 Py_ssize_t i, len = PyList_GET_SIZE(self->added);
2308 2342
2309 2343 for (i = start; i < len; i++) {
2310 2344 PyObject *tuple = PyList_GET_ITEM(self->added, i);
2311 2345 PyObject *node = PyTuple_GET_ITEM(tuple, 7);
2312 2346
2313 2347 nt_delete_node(&self->nt, PyBytes_AS_STRING(node));
2314 2348 }
2315 2349
2316 2350 if (start == 0)
2317 2351 Py_CLEAR(self->added);
2318 2352 }
2319 2353
2320 2354 /*
2321 2355 * Delete a numeric range of revs, which must be at the end of the
2322 2356 * range, but exclude the sentinel nullid entry.
2323 2357 */
2324 2358 static int index_slice_del(indexObject *self, PyObject *item)
2325 2359 {
2326 2360 Py_ssize_t start, stop, step, slicelength;
2327 2361 Py_ssize_t length = index_length(self) + 1;
2328 2362 int ret = 0;
2329 2363
2330 2364 /* Argument changed from PySliceObject* to PyObject* in Python 3. */
2331 2365 #ifdef IS_PY3K
2332 2366 if (PySlice_GetIndicesEx(item, length, &start, &stop, &step,
2333 2367 &slicelength) < 0)
2334 2368 #else
2335 2369 if (PySlice_GetIndicesEx((PySliceObject *)item, length, &start, &stop,
2336 2370 &step, &slicelength) < 0)
2337 2371 #endif
2338 2372 return -1;
2339 2373
2340 2374 if (slicelength <= 0)
2341 2375 return 0;
2342 2376
2343 2377 if ((step < 0 && start < stop) || (step > 0 && start > stop))
2344 2378 stop = start;
2345 2379
2346 2380 if (step < 0) {
2347 2381 stop = start + 1;
2348 2382 start = stop + step * (slicelength - 1) - 1;
2349 2383 step = -step;
2350 2384 }
2351 2385
2352 2386 if (step != 1) {
2353 2387 PyErr_SetString(PyExc_ValueError,
2354 2388 "revlog index delete requires step size of 1");
2355 2389 return -1;
2356 2390 }
2357 2391
2358 2392 if (stop != length - 1) {
2359 2393 PyErr_SetString(PyExc_IndexError,
2360 2394 "revlog index deletion indices are invalid");
2361 2395 return -1;
2362 2396 }
2363 2397
2364 2398 if (start < self->length) {
2365 2399 if (self->ntinitialized) {
2366 2400 Py_ssize_t i;
2367 2401
2368 2402 for (i = start + 1; i < self->length; i++) {
2369 2403 const char *node = index_node_existing(self, i);
2370 2404 if (node == NULL)
2371 2405 return -1;
2372 2406
2373 2407 nt_delete_node(&self->nt, node);
2374 2408 }
2375 2409 if (self->added)
2376 2410 index_invalidate_added(self, 0);
2377 2411 if (self->ntrev > start)
2378 2412 self->ntrev = (int)start;
2379 2413 }
2380 2414 self->length = start;
2381 2415 if (start < self->raw_length) {
2382 2416 if (self->cache) {
2383 2417 Py_ssize_t i;
2384 2418 for (i = start; i < self->raw_length; i++)
2385 2419 Py_CLEAR(self->cache[i]);
2386 2420 }
2387 2421 self->raw_length = start;
2388 2422 }
2389 2423 goto done;
2390 2424 }
2391 2425
2392 2426 if (self->ntinitialized) {
2393 2427 index_invalidate_added(self, start - self->length);
2394 2428 if (self->ntrev > start)
2395 2429 self->ntrev = (int)start;
2396 2430 }
2397 2431 if (self->added)
2398 2432 ret = PyList_SetSlice(self->added, start - self->length,
2399 2433 PyList_GET_SIZE(self->added), NULL);
2400 2434 done:
2401 2435 Py_CLEAR(self->headrevs);
2402 2436 return ret;
2403 2437 }
2404 2438
2405 2439 /*
2406 2440 * Supported ops:
2407 2441 *
2408 2442 * slice deletion
2409 2443 * string assignment (extend node->rev mapping)
2410 2444 * string deletion (shrink node->rev mapping)
2411 2445 */
2412 2446 static int index_assign_subscript(indexObject *self, PyObject *item,
2413 2447 PyObject *value)
2414 2448 {
2415 2449 char *node;
2416 2450 long rev;
2417 2451
2418 2452 if (PySlice_Check(item) && value == NULL)
2419 2453 return index_slice_del(self, item);
2420 2454
2421 2455 if (node_check(item, &node) == -1)
2422 2456 return -1;
2423 2457
2424 2458 if (value == NULL)
2425 2459 return self->ntinitialized ? nt_delete_node(&self->nt, node)
2426 2460 : 0;
2427 2461 rev = PyInt_AsLong(value);
2428 2462 if (rev > INT_MAX || rev < 0) {
2429 2463 if (!PyErr_Occurred())
2430 2464 PyErr_SetString(PyExc_ValueError, "rev out of range");
2431 2465 return -1;
2432 2466 }
2433 2467
2434 2468 if (index_init_nt(self) == -1)
2435 2469 return -1;
2436 2470 return nt_insert(&self->nt, node, (int)rev);
2437 2471 }
2438 2472
2439 2473 /*
2440 2474 * Find all RevlogNG entries in an index that has inline data. Update
2441 2475 * the optional "offsets" table with those entries.
2442 2476 */
2443 2477 static Py_ssize_t inline_scan(indexObject *self, const char **offsets)
2444 2478 {
2445 2479 const char *data = (const char *)self->buf.buf;
2446 2480 Py_ssize_t pos = 0;
2447 2481 Py_ssize_t end = self->buf.len;
2448 2482 long incr = v1_hdrsize;
2449 2483 Py_ssize_t len = 0;
2450 2484
2451 2485 while (pos + v1_hdrsize <= end && pos >= 0) {
2452 2486 uint32_t comp_len;
2453 2487 /* 3rd element of header is length of compressed inline data */
2454 2488 comp_len = getbe32(data + pos + 8);
2455 2489 incr = v1_hdrsize + comp_len;
2456 2490 if (offsets)
2457 2491 offsets[len] = data + pos;
2458 2492 len++;
2459 2493 pos += incr;
2460 2494 }
2461 2495
2462 2496 if (pos != end) {
2463 2497 if (!PyErr_Occurred())
2464 2498 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2465 2499 return -1;
2466 2500 }
2467 2501
2468 2502 return len;
2469 2503 }
2470 2504
2471 2505 static int index_init(indexObject *self, PyObject *args)
2472 2506 {
2473 2507 PyObject *data_obj, *inlined_obj;
2474 2508 Py_ssize_t size;
2475 2509
2476 2510 /* Initialize before argument-checking to avoid index_dealloc() crash.
2477 2511 */
2478 2512 self->raw_length = 0;
2479 2513 self->added = NULL;
2480 2514 self->cache = NULL;
2481 2515 self->data = NULL;
2482 2516 memset(&self->buf, 0, sizeof(self->buf));
2483 2517 self->headrevs = NULL;
2484 2518 self->filteredrevs = Py_None;
2485 2519 Py_INCREF(Py_None);
2486 2520 self->ntinitialized = 0;
2487 2521 self->offsets = NULL;
2488 2522
2489 2523 if (!PyArg_ParseTuple(args, "OO", &data_obj, &inlined_obj))
2490 2524 return -1;
2491 2525 if (!PyObject_CheckBuffer(data_obj)) {
2492 2526 PyErr_SetString(PyExc_TypeError,
2493 2527 "data does not support buffer interface");
2494 2528 return -1;
2495 2529 }
2496 2530
2497 2531 if (PyObject_GetBuffer(data_obj, &self->buf, PyBUF_SIMPLE) == -1)
2498 2532 return -1;
2499 2533 size = self->buf.len;
2500 2534
2501 2535 self->inlined = inlined_obj && PyObject_IsTrue(inlined_obj);
2502 2536 self->data = data_obj;
2503 2537
2504 2538 self->ntlookups = self->ntmisses = 0;
2505 2539 self->ntrev = -1;
2506 2540 Py_INCREF(self->data);
2507 2541
2508 2542 if (self->inlined) {
2509 2543 Py_ssize_t len = inline_scan(self, NULL);
2510 2544 if (len == -1)
2511 2545 goto bail;
2512 2546 self->raw_length = len;
2513 2547 self->length = len;
2514 2548 } else {
2515 2549 if (size % v1_hdrsize) {
2516 2550 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2517 2551 goto bail;
2518 2552 }
2519 2553 self->raw_length = size / v1_hdrsize;
2520 2554 self->length = self->raw_length;
2521 2555 }
2522 2556
2523 2557 return 0;
2524 2558 bail:
2525 2559 return -1;
2526 2560 }
2527 2561
2528 2562 static PyObject *index_nodemap(indexObject *self)
2529 2563 {
2530 2564 Py_INCREF(self);
2531 2565 return (PyObject *)self;
2532 2566 }
2533 2567
2534 2568 static void _index_clearcaches(indexObject *self)
2535 2569 {
2536 2570 if (self->cache) {
2537 2571 Py_ssize_t i;
2538 2572
2539 2573 for (i = 0; i < self->raw_length; i++)
2540 2574 Py_CLEAR(self->cache[i]);
2541 2575 free(self->cache);
2542 2576 self->cache = NULL;
2543 2577 }
2544 2578 if (self->offsets) {
2545 2579 PyMem_Free((void *)self->offsets);
2546 2580 self->offsets = NULL;
2547 2581 }
2548 2582 if (self->ntinitialized) {
2549 2583 nt_dealloc(&self->nt);
2550 2584 }
2551 2585 self->ntinitialized = 0;
2552 2586 Py_CLEAR(self->headrevs);
2553 2587 }
2554 2588
2555 2589 static PyObject *index_clearcaches(indexObject *self)
2556 2590 {
2557 2591 _index_clearcaches(self);
2558 2592 self->ntrev = -1;
2559 2593 self->ntlookups = self->ntmisses = 0;
2560 2594 Py_RETURN_NONE;
2561 2595 }
2562 2596
2563 2597 static void index_dealloc(indexObject *self)
2564 2598 {
2565 2599 _index_clearcaches(self);
2566 2600 Py_XDECREF(self->filteredrevs);
2567 2601 if (self->buf.buf) {
2568 2602 PyBuffer_Release(&self->buf);
2569 2603 memset(&self->buf, 0, sizeof(self->buf));
2570 2604 }
2571 2605 Py_XDECREF(self->data);
2572 2606 Py_XDECREF(self->added);
2573 2607 PyObject_Del(self);
2574 2608 }
2575 2609
2576 2610 static PySequenceMethods index_sequence_methods = {
2577 2611 (lenfunc)index_length, /* sq_length */
2578 2612 0, /* sq_concat */
2579 2613 0, /* sq_repeat */
2580 2614 (ssizeargfunc)index_get, /* sq_item */
2581 2615 0, /* sq_slice */
2582 2616 0, /* sq_ass_item */
2583 2617 0, /* sq_ass_slice */
2584 2618 (objobjproc)index_contains, /* sq_contains */
2585 2619 };
2586 2620
2587 2621 static PyMappingMethods index_mapping_methods = {
2588 2622 (lenfunc)index_length, /* mp_length */
2589 2623 (binaryfunc)index_getitem, /* mp_subscript */
2590 2624 (objobjargproc)index_assign_subscript, /* mp_ass_subscript */
2591 2625 };
2592 2626
2593 2627 static PyMethodDef index_methods[] = {
2594 2628 {"ancestors", (PyCFunction)index_ancestors, METH_VARARGS,
2595 2629 "return the gca set of the given revs"},
2596 2630 {"commonancestorsheads", (PyCFunction)index_commonancestorsheads,
2597 2631 METH_VARARGS,
2598 2632 "return the heads of the common ancestors of the given revs"},
2599 2633 {"clearcaches", (PyCFunction)index_clearcaches, METH_NOARGS,
2600 2634 "clear the index caches"},
2601 2635 {"get", (PyCFunction)index_m_get, METH_VARARGS, "get an index entry"},
2602 2636 {"computephasesmapsets", (PyCFunction)compute_phases_map_sets, METH_VARARGS,
2603 2637 "compute phases"},
2604 2638 {"reachableroots2", (PyCFunction)reachableroots2, METH_VARARGS,
2605 2639 "reachableroots"},
2606 2640 {"headrevs", (PyCFunction)index_headrevs, METH_VARARGS,
2607 2641 "get head revisions"}, /* Can do filtering since 3.2 */
2608 2642 {"headrevsfiltered", (PyCFunction)index_headrevs, METH_VARARGS,
2609 2643 "get filtered head revisions"}, /* Can always do filtering */
2610 2644 {"deltachain", (PyCFunction)index_deltachain, METH_VARARGS,
2611 2645 "determine revisions with deltas to reconstruct fulltext"},
2612 2646 {"slicechunktodensity", (PyCFunction)index_slicechunktodensity,
2613 2647 METH_VARARGS, "determine revisions with deltas to reconstruct fulltext"},
2614 2648 {"append", (PyCFunction)index_append, METH_O, "append an index entry"},
2615 2649 {"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS,
2616 2650 "match a potentially ambiguous node ID"},
2617 2651 {"shortest", (PyCFunction)index_shortest, METH_VARARGS,
2618 2652 "find length of shortest hex nodeid of a binary ID"},
2619 2653 {"stats", (PyCFunction)index_stats, METH_NOARGS, "stats for the index"},
2620 2654 {NULL} /* Sentinel */
2621 2655 };
2622 2656
2623 2657 static PyGetSetDef index_getset[] = {
2624 2658 {"nodemap", (getter)index_nodemap, NULL, "nodemap", NULL},
2625 2659 {NULL} /* Sentinel */
2626 2660 };
2627 2661
2628 2662 PyTypeObject HgRevlogIndex_Type = {
2629 2663 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2630 2664 "parsers.index", /* tp_name */
2631 2665 sizeof(indexObject), /* tp_basicsize */
2632 2666 0, /* tp_itemsize */
2633 2667 (destructor)index_dealloc, /* tp_dealloc */
2634 2668 0, /* tp_print */
2635 2669 0, /* tp_getattr */
2636 2670 0, /* tp_setattr */
2637 2671 0, /* tp_compare */
2638 2672 0, /* tp_repr */
2639 2673 0, /* tp_as_number */
2640 2674 &index_sequence_methods, /* tp_as_sequence */
2641 2675 &index_mapping_methods, /* tp_as_mapping */
2642 2676 0, /* tp_hash */
2643 2677 0, /* tp_call */
2644 2678 0, /* tp_str */
2645 2679 0, /* tp_getattro */
2646 2680 0, /* tp_setattro */
2647 2681 0, /* tp_as_buffer */
2648 2682 Py_TPFLAGS_DEFAULT, /* tp_flags */
2649 2683 "revlog index", /* tp_doc */
2650 2684 0, /* tp_traverse */
2651 2685 0, /* tp_clear */
2652 2686 0, /* tp_richcompare */
2653 2687 0, /* tp_weaklistoffset */
2654 2688 0, /* tp_iter */
2655 2689 0, /* tp_iternext */
2656 2690 index_methods, /* tp_methods */
2657 2691 0, /* tp_members */
2658 2692 index_getset, /* tp_getset */
2659 2693 0, /* tp_base */
2660 2694 0, /* tp_dict */
2661 2695 0, /* tp_descr_get */
2662 2696 0, /* tp_descr_set */
2663 2697 0, /* tp_dictoffset */
2664 2698 (initproc)index_init, /* tp_init */
2665 2699 0, /* tp_alloc */
2666 2700 };
2667 2701
2668 2702 /*
2669 2703 * returns a tuple of the form (index, index, cache) with elements as
2670 2704 * follows:
2671 2705 *
2672 2706 * index: an index object that lazily parses RevlogNG records
2673 2707 * cache: if data is inlined, a tuple (0, index_file_content), else None
2674 2708 * index_file_content could be a string, or a buffer
2675 2709 *
2676 2710 * added complications are for backwards compatibility
2677 2711 */
2678 2712 PyObject *parse_index2(PyObject *self, PyObject *args)
2679 2713 {
2680 2714 PyObject *tuple = NULL, *cache = NULL;
2681 2715 indexObject *idx;
2682 2716 int ret;
2683 2717
2684 2718 idx = PyObject_New(indexObject, &HgRevlogIndex_Type);
2685 2719 if (idx == NULL)
2686 2720 goto bail;
2687 2721
2688 2722 ret = index_init(idx, args);
2689 2723 if (ret == -1)
2690 2724 goto bail;
2691 2725
2692 2726 if (idx->inlined) {
2693 2727 cache = Py_BuildValue("iO", 0, idx->data);
2694 2728 if (cache == NULL)
2695 2729 goto bail;
2696 2730 } else {
2697 2731 cache = Py_None;
2698 2732 Py_INCREF(cache);
2699 2733 }
2700 2734
2701 2735 tuple = Py_BuildValue("NN", idx, cache);
2702 2736 if (!tuple)
2703 2737 goto bail;
2704 2738 return tuple;
2705 2739
2706 2740 bail:
2707 2741 Py_XDECREF(idx);
2708 2742 Py_XDECREF(cache);
2709 2743 Py_XDECREF(tuple);
2710 2744 return NULL;
2711 2745 }
2712 2746
2713 2747 #ifdef WITH_RUST
2714 2748
2715 2749 /* rustlazyancestors: iteration over ancestors implemented in Rust
2716 2750 *
2717 2751 * This class holds a reference to an index and to the Rust iterator.
2718 2752 */
2719 2753 typedef struct rustlazyancestorsObjectStruct rustlazyancestorsObject;
2720 2754
2721 2755 struct rustlazyancestorsObjectStruct {
2722 2756 PyObject_HEAD
2723 2757 /* Type-specific fields go here. */
2724 2758 indexObject *index; /* Ref kept to avoid GC'ing the index */
2725 2759 void *iter; /* Rust iterator */
2726 2760 };
2727 2761
2728 2762 /* FFI exposed from Rust code */
2729 2763 rustlazyancestorsObject *rustlazyancestors_init(indexObject *index,
2730 2764 /* intrevs vector */
2731 2765 Py_ssize_t initrevslen,
2732 2766 long *initrevs, long stoprev,
2733 2767 int inclusive);
2734 2768 void rustlazyancestors_drop(rustlazyancestorsObject *self);
2735 2769 int rustlazyancestors_next(rustlazyancestorsObject *self);
2736 2770 int rustlazyancestors_contains(rustlazyancestorsObject *self, long rev);
2737 2771
2738 2772 /* CPython instance methods */
2739 2773 static int rustla_init(rustlazyancestorsObject *self, PyObject *args)
2740 2774 {
2741 2775 PyObject *initrevsarg = NULL;
2742 2776 PyObject *inclusivearg = NULL;
2743 2777 long stoprev = 0;
2744 2778 long *initrevs = NULL;
2745 2779 int inclusive = 0;
2746 2780 Py_ssize_t i;
2747 2781
2748 2782 indexObject *index;
2749 2783 if (!PyArg_ParseTuple(args, "O!O!lO!", &HgRevlogIndex_Type, &index,
2750 2784 &PyList_Type, &initrevsarg, &stoprev,
2751 2785 &PyBool_Type, &inclusivearg))
2752 2786 return -1;
2753 2787
2754 2788 Py_INCREF(index);
2755 2789 self->index = index;
2756 2790
2757 2791 if (inclusivearg == Py_True)
2758 2792 inclusive = 1;
2759 2793
2760 2794 Py_ssize_t linit = PyList_GET_SIZE(initrevsarg);
2761 2795
2762 2796 initrevs = (long *)calloc(linit, sizeof(long));
2763 2797
2764 2798 if (initrevs == NULL) {
2765 2799 PyErr_NoMemory();
2766 2800 goto bail;
2767 2801 }
2768 2802
2769 2803 for (i = 0; i < linit; i++) {
2770 2804 initrevs[i] = PyInt_AsLong(PyList_GET_ITEM(initrevsarg, i));
2771 2805 }
2772 2806 if (PyErr_Occurred())
2773 2807 goto bail;
2774 2808
2775 2809 self->iter =
2776 2810 rustlazyancestors_init(index, linit, initrevs, stoprev, inclusive);
2777 2811 if (self->iter == NULL) {
2778 2812 /* if this is because of GraphError::ParentOutOfRange
2779 2813 * HgRevlogIndex_GetParents() has already set the proper
2780 2814 * exception */
2781 2815 goto bail;
2782 2816 }
2783 2817
2784 2818 free(initrevs);
2785 2819 return 0;
2786 2820
2787 2821 bail:
2788 2822 free(initrevs);
2789 2823 return -1;
2790 2824 };
2791 2825
2792 2826 static void rustla_dealloc(rustlazyancestorsObject *self)
2793 2827 {
2794 2828 Py_XDECREF(self->index);
2795 2829 if (self->iter != NULL) { /* can happen if rustla_init failed */
2796 2830 rustlazyancestors_drop(self->iter);
2797 2831 }
2798 2832 PyObject_Del(self);
2799 2833 }
2800 2834
2801 2835 static PyObject *rustla_next(rustlazyancestorsObject *self)
2802 2836 {
2803 2837 int res = rustlazyancestors_next(self->iter);
2804 2838 if (res == -1) {
2805 2839 /* Setting an explicit exception seems unnecessary
2806 2840 * as examples from Python source code (Objects/rangeobjets.c
2807 2841 * and Modules/_io/stringio.c) seem to demonstrate.
2808 2842 */
2809 2843 return NULL;
2810 2844 }
2811 2845 return PyInt_FromLong(res);
2812 2846 }
2813 2847
2814 2848 static int rustla_contains(rustlazyancestorsObject *self, PyObject *rev)
2815 2849 {
2816 2850 long lrev;
2817 2851 if (!pylong_to_long(rev, &lrev)) {
2818 2852 PyErr_Clear();
2819 2853 return 0;
2820 2854 }
2821 2855 return rustlazyancestors_contains(self->iter, lrev);
2822 2856 }
2823 2857
2824 2858 static PySequenceMethods rustla_sequence_methods = {
2825 2859 0, /* sq_length */
2826 2860 0, /* sq_concat */
2827 2861 0, /* sq_repeat */
2828 2862 0, /* sq_item */
2829 2863 0, /* sq_slice */
2830 2864 0, /* sq_ass_item */
2831 2865 0, /* sq_ass_slice */
2832 2866 (objobjproc)rustla_contains, /* sq_contains */
2833 2867 };
2834 2868
2835 2869 static PyTypeObject rustlazyancestorsType = {
2836 2870 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2837 2871 "parsers.rustlazyancestors", /* tp_name */
2838 2872 sizeof(rustlazyancestorsObject), /* tp_basicsize */
2839 2873 0, /* tp_itemsize */
2840 2874 (destructor)rustla_dealloc, /* tp_dealloc */
2841 2875 0, /* tp_print */
2842 2876 0, /* tp_getattr */
2843 2877 0, /* tp_setattr */
2844 2878 0, /* tp_compare */
2845 2879 0, /* tp_repr */
2846 2880 0, /* tp_as_number */
2847 2881 &rustla_sequence_methods, /* tp_as_sequence */
2848 2882 0, /* tp_as_mapping */
2849 2883 0, /* tp_hash */
2850 2884 0, /* tp_call */
2851 2885 0, /* tp_str */
2852 2886 0, /* tp_getattro */
2853 2887 0, /* tp_setattro */
2854 2888 0, /* tp_as_buffer */
2855 2889 Py_TPFLAGS_DEFAULT, /* tp_flags */
2856 2890 "Iterator over ancestors, implemented in Rust", /* tp_doc */
2857 2891 0, /* tp_traverse */
2858 2892 0, /* tp_clear */
2859 2893 0, /* tp_richcompare */
2860 2894 0, /* tp_weaklistoffset */
2861 2895 0, /* tp_iter */
2862 2896 (iternextfunc)rustla_next, /* tp_iternext */
2863 2897 0, /* tp_methods */
2864 2898 0, /* tp_members */
2865 2899 0, /* tp_getset */
2866 2900 0, /* tp_base */
2867 2901 0, /* tp_dict */
2868 2902 0, /* tp_descr_get */
2869 2903 0, /* tp_descr_set */
2870 2904 0, /* tp_dictoffset */
2871 2905 (initproc)rustla_init, /* tp_init */
2872 2906 0, /* tp_alloc */
2873 2907 };
2874 2908 #endif /* WITH_RUST */
2875 2909
2876 2910 void revlog_module_init(PyObject *mod)
2877 2911 {
2878 2912 PyObject *caps = NULL;
2879 2913 HgRevlogIndex_Type.tp_new = PyType_GenericNew;
2880 2914 if (PyType_Ready(&HgRevlogIndex_Type) < 0)
2881 2915 return;
2882 2916 Py_INCREF(&HgRevlogIndex_Type);
2883 2917 PyModule_AddObject(mod, "index", (PyObject *)&HgRevlogIndex_Type);
2884 2918
2885 2919 nodetreeType.tp_new = PyType_GenericNew;
2886 2920 if (PyType_Ready(&nodetreeType) < 0)
2887 2921 return;
2888 2922 Py_INCREF(&nodetreeType);
2889 2923 PyModule_AddObject(mod, "nodetree", (PyObject *)&nodetreeType);
2890 2924
2891 2925 if (!nullentry) {
2892 2926 nullentry = Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0,
2893 2927 0, -1, -1, -1, -1, nullid, 20);
2894 2928 }
2895 2929 if (nullentry)
2896 2930 PyObject_GC_UnTrack(nullentry);
2897 2931
2898 2932 caps = PyCapsule_New(HgRevlogIndex_GetParents,
2899 2933 "mercurial.cext.parsers.index_get_parents_CAPI",
2900 2934 NULL);
2901 2935 if (caps != NULL)
2902 2936 PyModule_AddObject(mod, "index_get_parents_CAPI", caps);
2903 2937
2904 2938 #ifdef WITH_RUST
2905 2939 rustlazyancestorsType.tp_new = PyType_GenericNew;
2906 2940 if (PyType_Ready(&rustlazyancestorsType) < 0)
2907 2941 return;
2908 2942 Py_INCREF(&rustlazyancestorsType);
2909 2943 PyModule_AddObject(mod, "rustlazyancestors",
2910 2944 (PyObject *)&rustlazyancestorsType);
2911 2945 #endif
2912 2946 }
General Comments 0
You need to be logged in to leave comments. Login now