##// END OF EJS Templates
revlog: delete references to deleted nullid sentinel value...
Martin von Zweigbergk -
r43981:ae5e3951 default
parent child Browse files
Show More
@@ -1,3070 +1,3069 b''
1 1 /*
2 2 parsers.c - efficient content parsing
3 3
4 4 Copyright 2008 Matt Mackall <mpm@selenic.com> and others
5 5
6 6 This software may be used and distributed according to the terms of
7 7 the GNU General Public License, incorporated herein by reference.
8 8 */
9 9
10 10 #define PY_SSIZE_T_CLEAN
11 11 #include <Python.h>
12 12 #include <assert.h>
13 13 #include <ctype.h>
14 14 #include <limits.h>
15 15 #include <stddef.h>
16 16 #include <stdlib.h>
17 17 #include <string.h>
18 18
19 19 #include "bitmanipulation.h"
20 20 #include "charencode.h"
21 21 #include "revlog.h"
22 22 #include "util.h"
23 23
24 24 #ifdef IS_PY3K
25 25 /* The mapping of Python types is meant to be temporary to get Python
26 26 * 3 to compile. We should remove this once Python 3 support is fully
27 27 * supported and proper types are used in the extensions themselves. */
28 28 #define PyInt_Check PyLong_Check
29 29 #define PyInt_FromLong PyLong_FromLong
30 30 #define PyInt_FromSsize_t PyLong_FromSsize_t
31 31 #define PyInt_AsLong PyLong_AsLong
32 32 #endif
33 33
34 34 typedef struct indexObjectStruct indexObject;
35 35
36 36 typedef struct {
37 37 int children[16];
38 38 } nodetreenode;
39 39
40 40 /*
41 41 * A base-16 trie for fast node->rev mapping.
42 42 *
43 43 * Positive value is index of the next node in the trie
44 44 * Negative value is a leaf: -(rev + 2)
45 45 * Zero is empty
46 46 */
47 47 typedef struct {
48 48 indexObject *index;
49 49 nodetreenode *nodes;
50 50 unsigned length; /* # nodes in use */
51 51 unsigned capacity; /* # nodes allocated */
52 52 int depth; /* maximum depth of tree */
53 53 int splits; /* # splits performed */
54 54 } nodetree;
55 55
56 56 typedef struct {
57 57 PyObject_HEAD /* ; */
58 58 nodetree nt;
59 59 } nodetreeObject;
60 60
61 61 /*
62 62 * This class has two behaviors.
63 63 *
64 64 * When used in a list-like way (with integer keys), we decode an
65 * entry in a RevlogNG index file on demand. Our last entry is a
66 * sentinel, always a nullid. We have limited support for
65 * entry in a RevlogNG index file on demand. We have limited support for
67 66 * integer-keyed insert and delete, only at elements right before the
68 * sentinel.
67 * end.
69 68 *
70 69 * With string keys, we lazily perform a reverse mapping from node to
71 70 * rev, using a base-16 trie.
72 71 */
73 72 struct indexObjectStruct {
74 73 PyObject_HEAD
75 74 /* Type-specific fields go here. */
76 75 PyObject *data; /* raw bytes of index */
77 76 Py_buffer buf; /* buffer of data */
78 77 PyObject **cache; /* cached tuples */
79 78 const char **offsets; /* populated on demand */
80 79 Py_ssize_t raw_length; /* original number of elements */
81 80 Py_ssize_t length; /* current number of elements */
82 81 PyObject *added; /* populated on demand */
83 82 PyObject *headrevs; /* cache, invalidated on changes */
84 83 PyObject *filteredrevs; /* filtered revs set */
85 84 nodetree nt; /* base-16 trie */
86 85 int ntinitialized; /* 0 or 1 */
87 86 int ntrev; /* last rev scanned */
88 87 int ntlookups; /* # lookups */
89 88 int ntmisses; /* # lookups that miss the cache */
90 89 int inlined;
91 90 };
92 91
93 92 static Py_ssize_t index_length(const indexObject *self)
94 93 {
95 94 if (self->added == NULL)
96 95 return self->length;
97 96 return self->length + PyList_GET_SIZE(self->added);
98 97 }
99 98
100 99 static PyObject *nullentry = NULL;
101 100 static const char nullid[20] = {0};
102 101 static const Py_ssize_t nullrev = -1;
103 102
104 103 static Py_ssize_t inline_scan(indexObject *self, const char **offsets);
105 104
106 105 #if LONG_MAX == 0x7fffffffL
107 106 static const char *const tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
108 107 #else
109 108 static const char *const tuple_format = PY23("kiiiiiis#", "kiiiiiiy#");
110 109 #endif
111 110
112 111 /* A RevlogNG v1 index entry is 64 bytes long. */
113 112 static const long v1_hdrsize = 64;
114 113
115 114 static void raise_revlog_error(void)
116 115 {
117 116 PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
118 117
119 118 mod = PyImport_ImportModule("mercurial.error");
120 119 if (mod == NULL) {
121 120 goto cleanup;
122 121 }
123 122
124 123 dict = PyModule_GetDict(mod);
125 124 if (dict == NULL) {
126 125 goto cleanup;
127 126 }
128 127 Py_INCREF(dict);
129 128
130 129 errclass = PyDict_GetItemString(dict, "RevlogError");
131 130 if (errclass == NULL) {
132 131 PyErr_SetString(PyExc_SystemError,
133 132 "could not find RevlogError");
134 133 goto cleanup;
135 134 }
136 135
137 136 /* value of exception is ignored by callers */
138 137 PyErr_SetString(errclass, "RevlogError");
139 138
140 139 cleanup:
141 140 Py_XDECREF(dict);
142 141 Py_XDECREF(mod);
143 142 }
144 143
145 144 /*
146 145 * Return a pointer to the beginning of a RevlogNG record.
147 146 */
148 147 static const char *index_deref(indexObject *self, Py_ssize_t pos)
149 148 {
150 149 if (self->inlined && pos > 0) {
151 150 if (self->offsets == NULL) {
152 151 self->offsets = PyMem_Malloc(self->raw_length *
153 152 sizeof(*self->offsets));
154 153 if (self->offsets == NULL)
155 154 return (const char *)PyErr_NoMemory();
156 155 inline_scan(self, self->offsets);
157 156 }
158 157 return self->offsets[pos];
159 158 }
160 159
161 160 return (const char *)(self->buf.buf) + pos * v1_hdrsize;
162 161 }
163 162
164 163 /*
165 164 * Get parents of the given rev.
166 165 *
167 166 * The specified rev must be valid and must not be nullrev. A returned
168 167 * parent revision may be nullrev, but is guaranteed to be in valid range.
169 168 */
170 169 static inline int index_get_parents(indexObject *self, Py_ssize_t rev, int *ps,
171 170 int maxrev)
172 171 {
173 172 if (rev >= self->length) {
174 173 long tmp;
175 174 PyObject *tuple =
176 175 PyList_GET_ITEM(self->added, rev - self->length);
177 176 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 5), &tmp)) {
178 177 return -1;
179 178 }
180 179 ps[0] = (int)tmp;
181 180 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 6), &tmp)) {
182 181 return -1;
183 182 }
184 183 ps[1] = (int)tmp;
185 184 } else {
186 185 const char *data = index_deref(self, rev);
187 186 ps[0] = getbe32(data + 24);
188 187 ps[1] = getbe32(data + 28);
189 188 }
190 189 /* If index file is corrupted, ps[] may point to invalid revisions. So
191 190 * there is a risk of buffer overflow to trust them unconditionally. */
192 191 if (ps[0] < -1 || ps[0] > maxrev || ps[1] < -1 || ps[1] > maxrev) {
193 192 PyErr_SetString(PyExc_ValueError, "parent out of range");
194 193 return -1;
195 194 }
196 195 return 0;
197 196 }
198 197
199 198 /*
200 199 * Get parents of the given rev.
201 200 *
202 201 * If the specified rev is out of range, IndexError will be raised. If the
203 202 * revlog entry is corrupted, ValueError may be raised.
204 203 *
205 204 * Returns 0 on success or -1 on failure.
206 205 */
207 206 int HgRevlogIndex_GetParents(PyObject *op, int rev, int *ps)
208 207 {
209 208 int tiprev;
210 209 if (!op || !HgRevlogIndex_Check(op) || !ps) {
211 210 PyErr_BadInternalCall();
212 211 return -1;
213 212 }
214 213 tiprev = (int)index_length((indexObject *)op) - 1;
215 214 if (rev < -1 || rev > tiprev) {
216 215 PyErr_Format(PyExc_IndexError, "rev out of range: %d", rev);
217 216 return -1;
218 217 } else if (rev == -1) {
219 218 ps[0] = ps[1] = -1;
220 219 return 0;
221 220 } else {
222 221 return index_get_parents((indexObject *)op, rev, ps, tiprev);
223 222 }
224 223 }
225 224
226 225 static inline int64_t index_get_start(indexObject *self, Py_ssize_t rev)
227 226 {
228 227 uint64_t offset;
229 228 if (rev == nullrev) {
230 229 return 0;
231 230 }
232 231 if (rev >= self->length) {
233 232 PyObject *tuple;
234 233 PyObject *pylong;
235 234 PY_LONG_LONG tmp;
236 235 tuple = PyList_GET_ITEM(self->added, rev - self->length);
237 236 pylong = PyTuple_GET_ITEM(tuple, 0);
238 237 tmp = PyLong_AsLongLong(pylong);
239 238 if (tmp == -1 && PyErr_Occurred()) {
240 239 return -1;
241 240 }
242 241 if (tmp < 0) {
243 242 PyErr_Format(PyExc_OverflowError,
244 243 "revlog entry size out of bound (%lld)",
245 244 (long long)tmp);
246 245 return -1;
247 246 }
248 247 offset = (uint64_t)tmp;
249 248 } else {
250 249 const char *data = index_deref(self, rev);
251 250 offset = getbe32(data + 4);
252 251 if (rev == 0) {
253 252 /* mask out version number for the first entry */
254 253 offset &= 0xFFFF;
255 254 } else {
256 255 uint32_t offset_high = getbe32(data);
257 256 offset |= ((uint64_t)offset_high) << 32;
258 257 }
259 258 }
260 259 return (int64_t)(offset >> 16);
261 260 }
262 261
263 262 static inline int index_get_length(indexObject *self, Py_ssize_t rev)
264 263 {
265 264 if (rev == nullrev) {
266 265 return 0;
267 266 }
268 267 if (rev >= self->length) {
269 268 PyObject *tuple;
270 269 PyObject *pylong;
271 270 long ret;
272 271 tuple = PyList_GET_ITEM(self->added, rev - self->length);
273 272 pylong = PyTuple_GET_ITEM(tuple, 1);
274 273 ret = PyInt_AsLong(pylong);
275 274 if (ret == -1 && PyErr_Occurred()) {
276 275 return -1;
277 276 }
278 277 if (ret < 0 || ret > (long)INT_MAX) {
279 278 PyErr_Format(PyExc_OverflowError,
280 279 "revlog entry size out of bound (%ld)",
281 280 ret);
282 281 return -1;
283 282 }
284 283 return (int)ret;
285 284 } else {
286 285 const char *data = index_deref(self, rev);
287 286 int tmp = (int)getbe32(data + 8);
288 287 if (tmp < 0) {
289 288 PyErr_Format(PyExc_OverflowError,
290 289 "revlog entry size out of bound (%d)",
291 290 tmp);
292 291 return -1;
293 292 }
294 293 return tmp;
295 294 }
296 295 }
297 296
298 297 /*
299 298 * RevlogNG format (all in big endian, data may be inlined):
300 299 * 6 bytes: offset
301 300 * 2 bytes: flags
302 301 * 4 bytes: compressed length
303 302 * 4 bytes: uncompressed length
304 303 * 4 bytes: base revision
305 304 * 4 bytes: link revision
306 305 * 4 bytes: parent 1 revision
307 306 * 4 bytes: parent 2 revision
308 307 * 32 bytes: nodeid (only 20 bytes used)
309 308 */
310 309 static PyObject *index_get(indexObject *self, Py_ssize_t pos)
311 310 {
312 311 uint64_t offset_flags;
313 312 int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
314 313 const char *c_node_id;
315 314 const char *data;
316 315 Py_ssize_t length = index_length(self);
317 316 PyObject *entry;
318 317
319 318 if (pos == nullrev) {
320 319 Py_INCREF(nullentry);
321 320 return nullentry;
322 321 }
323 322
324 323 if (pos < 0 || pos >= length) {
325 324 PyErr_SetString(PyExc_IndexError, "revlog index out of range");
326 325 return NULL;
327 326 }
328 327
329 328 if (pos >= self->length) {
330 329 PyObject *obj;
331 330 obj = PyList_GET_ITEM(self->added, pos - self->length);
332 331 Py_INCREF(obj);
333 332 return obj;
334 333 }
335 334
336 335 if (self->cache) {
337 336 if (self->cache[pos]) {
338 337 Py_INCREF(self->cache[pos]);
339 338 return self->cache[pos];
340 339 }
341 340 } else {
342 341 self->cache = calloc(self->raw_length, sizeof(PyObject *));
343 342 if (self->cache == NULL)
344 343 return PyErr_NoMemory();
345 344 }
346 345
347 346 data = index_deref(self, pos);
348 347 if (data == NULL)
349 348 return NULL;
350 349
351 350 offset_flags = getbe32(data + 4);
352 351 if (pos == 0) /* mask out version number for the first entry */
353 352 offset_flags &= 0xFFFF;
354 353 else {
355 354 uint32_t offset_high = getbe32(data);
356 355 offset_flags |= ((uint64_t)offset_high) << 32;
357 356 }
358 357
359 358 comp_len = getbe32(data + 8);
360 359 uncomp_len = getbe32(data + 12);
361 360 base_rev = getbe32(data + 16);
362 361 link_rev = getbe32(data + 20);
363 362 parent_1 = getbe32(data + 24);
364 363 parent_2 = getbe32(data + 28);
365 364 c_node_id = data + 32;
366 365
367 366 entry = Py_BuildValue(tuple_format, offset_flags, comp_len, uncomp_len,
368 367 base_rev, link_rev, parent_1, parent_2, c_node_id,
369 368 (Py_ssize_t)20);
370 369
371 370 if (entry) {
372 371 PyObject_GC_UnTrack(entry);
373 372 Py_INCREF(entry);
374 373 }
375 374
376 375 self->cache[pos] = entry;
377 376
378 377 return entry;
379 378 }
380 379
381 380 /*
382 381 * Return the 20-byte SHA of the node corresponding to the given rev.
383 382 */
384 383 static const char *index_node(indexObject *self, Py_ssize_t pos)
385 384 {
386 385 Py_ssize_t length = index_length(self);
387 386 const char *data;
388 387
389 388 if (pos == nullrev)
390 389 return nullid;
391 390
392 391 if (pos >= length)
393 392 return NULL;
394 393
395 394 if (pos >= self->length) {
396 395 PyObject *tuple, *str;
397 396 tuple = PyList_GET_ITEM(self->added, pos - self->length);
398 397 str = PyTuple_GetItem(tuple, 7);
399 398 return str ? PyBytes_AS_STRING(str) : NULL;
400 399 }
401 400
402 401 data = index_deref(self, pos);
403 402 return data ? data + 32 : NULL;
404 403 }
405 404
406 405 /*
407 406 * Return the 20-byte SHA of the node corresponding to the given rev. The
408 407 * rev is assumed to be existing. If not, an exception is set.
409 408 */
410 409 static const char *index_node_existing(indexObject *self, Py_ssize_t pos)
411 410 {
412 411 const char *node = index_node(self, pos);
413 412 if (node == NULL) {
414 413 PyErr_Format(PyExc_IndexError, "could not access rev %d",
415 414 (int)pos);
416 415 }
417 416 return node;
418 417 }
419 418
420 419 static int nt_insert(nodetree *self, const char *node, int rev);
421 420
422 421 static int node_check(PyObject *obj, char **node)
423 422 {
424 423 Py_ssize_t nodelen;
425 424 if (PyBytes_AsStringAndSize(obj, node, &nodelen) == -1)
426 425 return -1;
427 426 if (nodelen == 20)
428 427 return 0;
429 428 PyErr_SetString(PyExc_ValueError, "20-byte hash required");
430 429 return -1;
431 430 }
432 431
433 432 static PyObject *index_append(indexObject *self, PyObject *obj)
434 433 {
435 434 char *node;
436 435 Py_ssize_t len;
437 436
438 437 if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj) != 8) {
439 438 PyErr_SetString(PyExc_TypeError, "8-tuple required");
440 439 return NULL;
441 440 }
442 441
443 442 if (node_check(PyTuple_GET_ITEM(obj, 7), &node) == -1)
444 443 return NULL;
445 444
446 445 len = index_length(self);
447 446
448 447 if (self->added == NULL) {
449 448 self->added = PyList_New(0);
450 449 if (self->added == NULL)
451 450 return NULL;
452 451 }
453 452
454 453 if (PyList_Append(self->added, obj) == -1)
455 454 return NULL;
456 455
457 456 if (self->ntinitialized)
458 457 nt_insert(&self->nt, node, (int)len);
459 458
460 459 Py_CLEAR(self->headrevs);
461 460 Py_RETURN_NONE;
462 461 }
463 462
464 463 static PyObject *index_stats(indexObject *self)
465 464 {
466 465 PyObject *obj = PyDict_New();
467 466 PyObject *s = NULL;
468 467 PyObject *t = NULL;
469 468
470 469 if (obj == NULL)
471 470 return NULL;
472 471
473 472 #define istat(__n, __d) \
474 473 do { \
475 474 s = PyBytes_FromString(__d); \
476 475 t = PyInt_FromSsize_t(self->__n); \
477 476 if (!s || !t) \
478 477 goto bail; \
479 478 if (PyDict_SetItem(obj, s, t) == -1) \
480 479 goto bail; \
481 480 Py_CLEAR(s); \
482 481 Py_CLEAR(t); \
483 482 } while (0)
484 483
485 484 if (self->added) {
486 485 Py_ssize_t len = PyList_GET_SIZE(self->added);
487 486 s = PyBytes_FromString("index entries added");
488 487 t = PyInt_FromSsize_t(len);
489 488 if (!s || !t)
490 489 goto bail;
491 490 if (PyDict_SetItem(obj, s, t) == -1)
492 491 goto bail;
493 492 Py_CLEAR(s);
494 493 Py_CLEAR(t);
495 494 }
496 495
497 496 if (self->raw_length != self->length)
498 497 istat(raw_length, "revs on disk");
499 498 istat(length, "revs in memory");
500 499 istat(ntlookups, "node trie lookups");
501 500 istat(ntmisses, "node trie misses");
502 501 istat(ntrev, "node trie last rev scanned");
503 502 if (self->ntinitialized) {
504 503 istat(nt.capacity, "node trie capacity");
505 504 istat(nt.depth, "node trie depth");
506 505 istat(nt.length, "node trie count");
507 506 istat(nt.splits, "node trie splits");
508 507 }
509 508
510 509 #undef istat
511 510
512 511 return obj;
513 512
514 513 bail:
515 514 Py_XDECREF(obj);
516 515 Py_XDECREF(s);
517 516 Py_XDECREF(t);
518 517 return NULL;
519 518 }
520 519
521 520 /*
522 521 * When we cache a list, we want to be sure the caller can't mutate
523 522 * the cached copy.
524 523 */
525 524 static PyObject *list_copy(PyObject *list)
526 525 {
527 526 Py_ssize_t len = PyList_GET_SIZE(list);
528 527 PyObject *newlist = PyList_New(len);
529 528 Py_ssize_t i;
530 529
531 530 if (newlist == NULL)
532 531 return NULL;
533 532
534 533 for (i = 0; i < len; i++) {
535 534 PyObject *obj = PyList_GET_ITEM(list, i);
536 535 Py_INCREF(obj);
537 536 PyList_SET_ITEM(newlist, i, obj);
538 537 }
539 538
540 539 return newlist;
541 540 }
542 541
543 542 static int check_filter(PyObject *filter, Py_ssize_t arg)
544 543 {
545 544 if (filter) {
546 545 PyObject *arglist, *result;
547 546 int isfiltered;
548 547
549 548 arglist = Py_BuildValue("(n)", arg);
550 549 if (!arglist) {
551 550 return -1;
552 551 }
553 552
554 553 result = PyEval_CallObject(filter, arglist);
555 554 Py_DECREF(arglist);
556 555 if (!result) {
557 556 return -1;
558 557 }
559 558
560 559 /* PyObject_IsTrue returns 1 if true, 0 if false, -1 if error,
561 560 * same as this function, so we can just return it directly.*/
562 561 isfiltered = PyObject_IsTrue(result);
563 562 Py_DECREF(result);
564 563 return isfiltered;
565 564 } else {
566 565 return 0;
567 566 }
568 567 }
569 568
570 569 static Py_ssize_t add_roots_get_min(indexObject *self, PyObject *list,
571 570 Py_ssize_t marker, char *phases)
572 571 {
573 572 PyObject *iter = NULL;
574 573 PyObject *iter_item = NULL;
575 574 Py_ssize_t min_idx = index_length(self) + 2;
576 575 long iter_item_long;
577 576
578 577 if (PyList_GET_SIZE(list) != 0) {
579 578 iter = PyObject_GetIter(list);
580 579 if (iter == NULL)
581 580 return -2;
582 581 while ((iter_item = PyIter_Next(iter))) {
583 582 if (!pylong_to_long(iter_item, &iter_item_long)) {
584 583 Py_DECREF(iter_item);
585 584 return -2;
586 585 }
587 586 Py_DECREF(iter_item);
588 587 if (iter_item_long < min_idx)
589 588 min_idx = iter_item_long;
590 589 phases[iter_item_long] = (char)marker;
591 590 }
592 591 Py_DECREF(iter);
593 592 }
594 593
595 594 return min_idx;
596 595 }
597 596
598 597 static inline void set_phase_from_parents(char *phases, int parent_1,
599 598 int parent_2, Py_ssize_t i)
600 599 {
601 600 if (parent_1 >= 0 && phases[parent_1] > phases[i])
602 601 phases[i] = phases[parent_1];
603 602 if (parent_2 >= 0 && phases[parent_2] > phases[i])
604 603 phases[i] = phases[parent_2];
605 604 }
606 605
607 606 static PyObject *reachableroots2(indexObject *self, PyObject *args)
608 607 {
609 608
610 609 /* Input */
611 610 long minroot;
612 611 PyObject *includepatharg = NULL;
613 612 int includepath = 0;
614 613 /* heads and roots are lists */
615 614 PyObject *heads = NULL;
616 615 PyObject *roots = NULL;
617 616 PyObject *reachable = NULL;
618 617
619 618 PyObject *val;
620 619 Py_ssize_t len = index_length(self);
621 620 long revnum;
622 621 Py_ssize_t k;
623 622 Py_ssize_t i;
624 623 Py_ssize_t l;
625 624 int r;
626 625 int parents[2];
627 626
628 627 /* Internal data structure:
629 628 * tovisit: array of length len+1 (all revs + nullrev), filled upto
630 629 * lentovisit
631 630 *
632 631 * revstates: array of length len+1 (all revs + nullrev) */
633 632 int *tovisit = NULL;
634 633 long lentovisit = 0;
635 634 enum { RS_SEEN = 1, RS_ROOT = 2, RS_REACHABLE = 4 };
636 635 char *revstates = NULL;
637 636
638 637 /* Get arguments */
639 638 if (!PyArg_ParseTuple(args, "lO!O!O!", &minroot, &PyList_Type, &heads,
640 639 &PyList_Type, &roots, &PyBool_Type,
641 640 &includepatharg))
642 641 goto bail;
643 642
644 643 if (includepatharg == Py_True)
645 644 includepath = 1;
646 645
647 646 /* Initialize return set */
648 647 reachable = PyList_New(0);
649 648 if (reachable == NULL)
650 649 goto bail;
651 650
652 651 /* Initialize internal datastructures */
653 652 tovisit = (int *)malloc((len + 1) * sizeof(int));
654 653 if (tovisit == NULL) {
655 654 PyErr_NoMemory();
656 655 goto bail;
657 656 }
658 657
659 658 revstates = (char *)calloc(len + 1, 1);
660 659 if (revstates == NULL) {
661 660 PyErr_NoMemory();
662 661 goto bail;
663 662 }
664 663
665 664 l = PyList_GET_SIZE(roots);
666 665 for (i = 0; i < l; i++) {
667 666 revnum = PyInt_AsLong(PyList_GET_ITEM(roots, i));
668 667 if (revnum == -1 && PyErr_Occurred())
669 668 goto bail;
670 669 /* If root is out of range, e.g. wdir(), it must be unreachable
671 670 * from heads. So we can just ignore it. */
672 671 if (revnum + 1 < 0 || revnum + 1 >= len + 1)
673 672 continue;
674 673 revstates[revnum + 1] |= RS_ROOT;
675 674 }
676 675
677 676 /* Populate tovisit with all the heads */
678 677 l = PyList_GET_SIZE(heads);
679 678 for (i = 0; i < l; i++) {
680 679 revnum = PyInt_AsLong(PyList_GET_ITEM(heads, i));
681 680 if (revnum == -1 && PyErr_Occurred())
682 681 goto bail;
683 682 if (revnum + 1 < 0 || revnum + 1 >= len + 1) {
684 683 PyErr_SetString(PyExc_IndexError, "head out of range");
685 684 goto bail;
686 685 }
687 686 if (!(revstates[revnum + 1] & RS_SEEN)) {
688 687 tovisit[lentovisit++] = (int)revnum;
689 688 revstates[revnum + 1] |= RS_SEEN;
690 689 }
691 690 }
692 691
693 692 /* Visit the tovisit list and find the reachable roots */
694 693 k = 0;
695 694 while (k < lentovisit) {
696 695 /* Add the node to reachable if it is a root*/
697 696 revnum = tovisit[k++];
698 697 if (revstates[revnum + 1] & RS_ROOT) {
699 698 revstates[revnum + 1] |= RS_REACHABLE;
700 699 val = PyInt_FromLong(revnum);
701 700 if (val == NULL)
702 701 goto bail;
703 702 r = PyList_Append(reachable, val);
704 703 Py_DECREF(val);
705 704 if (r < 0)
706 705 goto bail;
707 706 if (includepath == 0)
708 707 continue;
709 708 }
710 709
711 710 /* Add its parents to the list of nodes to visit */
712 711 if (revnum == nullrev)
713 712 continue;
714 713 r = index_get_parents(self, revnum, parents, (int)len - 1);
715 714 if (r < 0)
716 715 goto bail;
717 716 for (i = 0; i < 2; i++) {
718 717 if (!(revstates[parents[i] + 1] & RS_SEEN) &&
719 718 parents[i] >= minroot) {
720 719 tovisit[lentovisit++] = parents[i];
721 720 revstates[parents[i] + 1] |= RS_SEEN;
722 721 }
723 722 }
724 723 }
725 724
726 725 /* Find all the nodes in between the roots we found and the heads
727 726 * and add them to the reachable set */
728 727 if (includepath == 1) {
729 728 long minidx = minroot;
730 729 if (minidx < 0)
731 730 minidx = 0;
732 731 for (i = minidx; i < len; i++) {
733 732 if (!(revstates[i + 1] & RS_SEEN))
734 733 continue;
735 734 r = index_get_parents(self, i, parents, (int)len - 1);
736 735 /* Corrupted index file, error is set from
737 736 * index_get_parents */
738 737 if (r < 0)
739 738 goto bail;
740 739 if (((revstates[parents[0] + 1] |
741 740 revstates[parents[1] + 1]) &
742 741 RS_REACHABLE) &&
743 742 !(revstates[i + 1] & RS_REACHABLE)) {
744 743 revstates[i + 1] |= RS_REACHABLE;
745 744 val = PyInt_FromSsize_t(i);
746 745 if (val == NULL)
747 746 goto bail;
748 747 r = PyList_Append(reachable, val);
749 748 Py_DECREF(val);
750 749 if (r < 0)
751 750 goto bail;
752 751 }
753 752 }
754 753 }
755 754
756 755 free(revstates);
757 756 free(tovisit);
758 757 return reachable;
759 758 bail:
760 759 Py_XDECREF(reachable);
761 760 free(revstates);
762 761 free(tovisit);
763 762 return NULL;
764 763 }
765 764
766 765 static PyObject *compute_phases_map_sets(indexObject *self, PyObject *args)
767 766 {
768 767 PyObject *roots = Py_None;
769 768 PyObject *ret = NULL;
770 769 PyObject *phasessize = NULL;
771 770 PyObject *phaseroots = NULL;
772 771 PyObject *phaseset = NULL;
773 772 PyObject *phasessetlist = NULL;
774 773 PyObject *rev = NULL;
775 774 Py_ssize_t len = index_length(self);
776 775 Py_ssize_t numphase = 0;
777 776 Py_ssize_t minrevallphases = 0;
778 777 Py_ssize_t minrevphase = 0;
779 778 Py_ssize_t i = 0;
780 779 char *phases = NULL;
781 780 long phase;
782 781
783 782 if (!PyArg_ParseTuple(args, "O", &roots))
784 783 goto done;
785 784 if (roots == NULL || !PyList_Check(roots)) {
786 785 PyErr_SetString(PyExc_TypeError, "roots must be a list");
787 786 goto done;
788 787 }
789 788
790 789 phases = calloc(
791 790 len, 1); /* phase per rev: {0: public, 1: draft, 2: secret} */
792 791 if (phases == NULL) {
793 792 PyErr_NoMemory();
794 793 goto done;
795 794 }
796 795 /* Put the phase information of all the roots in phases */
797 796 numphase = PyList_GET_SIZE(roots) + 1;
798 797 minrevallphases = len + 1;
799 798 phasessetlist = PyList_New(numphase);
800 799 if (phasessetlist == NULL)
801 800 goto done;
802 801
803 802 PyList_SET_ITEM(phasessetlist, 0, Py_None);
804 803 Py_INCREF(Py_None);
805 804
806 805 for (i = 0; i < numphase - 1; i++) {
807 806 phaseroots = PyList_GET_ITEM(roots, i);
808 807 phaseset = PySet_New(NULL);
809 808 if (phaseset == NULL)
810 809 goto release;
811 810 PyList_SET_ITEM(phasessetlist, i + 1, phaseset);
812 811 if (!PyList_Check(phaseroots)) {
813 812 PyErr_SetString(PyExc_TypeError,
814 813 "roots item must be a list");
815 814 goto release;
816 815 }
817 816 minrevphase =
818 817 add_roots_get_min(self, phaseroots, i + 1, phases);
819 818 if (minrevphase == -2) /* Error from add_roots_get_min */
820 819 goto release;
821 820 minrevallphases = MIN(minrevallphases, minrevphase);
822 821 }
823 822 /* Propagate the phase information from the roots to the revs */
824 823 if (minrevallphases != -1) {
825 824 int parents[2];
826 825 for (i = minrevallphases; i < len; i++) {
827 826 if (index_get_parents(self, i, parents, (int)len - 1) <
828 827 0)
829 828 goto release;
830 829 set_phase_from_parents(phases, parents[0], parents[1],
831 830 i);
832 831 }
833 832 }
834 833 /* Transform phase list to a python list */
835 834 phasessize = PyInt_FromSsize_t(len);
836 835 if (phasessize == NULL)
837 836 goto release;
838 837 for (i = 0; i < len; i++) {
839 838 phase = phases[i];
840 839 /* We only store the sets of phase for non public phase, the
841 840 * public phase is computed as a difference */
842 841 if (phase != 0) {
843 842 phaseset = PyList_GET_ITEM(phasessetlist, phase);
844 843 rev = PyInt_FromSsize_t(i);
845 844 if (rev == NULL)
846 845 goto release;
847 846 PySet_Add(phaseset, rev);
848 847 Py_XDECREF(rev);
849 848 }
850 849 }
851 850 ret = PyTuple_Pack(2, phasessize, phasessetlist);
852 851
853 852 release:
854 853 Py_XDECREF(phasessize);
855 854 Py_XDECREF(phasessetlist);
856 855 done:
857 856 free(phases);
858 857 return ret;
859 858 }
860 859
861 860 static PyObject *index_headrevs(indexObject *self, PyObject *args)
862 861 {
863 862 Py_ssize_t i, j, len;
864 863 char *nothead = NULL;
865 864 PyObject *heads = NULL;
866 865 PyObject *filter = NULL;
867 866 PyObject *filteredrevs = Py_None;
868 867
869 868 if (!PyArg_ParseTuple(args, "|O", &filteredrevs)) {
870 869 return NULL;
871 870 }
872 871
873 872 if (self->headrevs && filteredrevs == self->filteredrevs)
874 873 return list_copy(self->headrevs);
875 874
876 875 Py_DECREF(self->filteredrevs);
877 876 self->filteredrevs = filteredrevs;
878 877 Py_INCREF(filteredrevs);
879 878
880 879 if (filteredrevs != Py_None) {
881 880 filter = PyObject_GetAttrString(filteredrevs, "__contains__");
882 881 if (!filter) {
883 882 PyErr_SetString(
884 883 PyExc_TypeError,
885 884 "filteredrevs has no attribute __contains__");
886 885 goto bail;
887 886 }
888 887 }
889 888
890 889 len = index_length(self);
891 890 heads = PyList_New(0);
892 891 if (heads == NULL)
893 892 goto bail;
894 893 if (len == 0) {
895 894 PyObject *nullid = PyInt_FromLong(-1);
896 895 if (nullid == NULL || PyList_Append(heads, nullid) == -1) {
897 896 Py_XDECREF(nullid);
898 897 goto bail;
899 898 }
900 899 goto done;
901 900 }
902 901
903 902 nothead = calloc(len, 1);
904 903 if (nothead == NULL) {
905 904 PyErr_NoMemory();
906 905 goto bail;
907 906 }
908 907
909 908 for (i = len - 1; i >= 0; i--) {
910 909 int isfiltered;
911 910 int parents[2];
912 911
913 912 /* If nothead[i] == 1, it means we've seen an unfiltered child
914 913 * of this node already, and therefore this node is not
915 914 * filtered. So we can skip the expensive check_filter step.
916 915 */
917 916 if (nothead[i] != 1) {
918 917 isfiltered = check_filter(filter, i);
919 918 if (isfiltered == -1) {
920 919 PyErr_SetString(PyExc_TypeError,
921 920 "unable to check filter");
922 921 goto bail;
923 922 }
924 923
925 924 if (isfiltered) {
926 925 nothead[i] = 1;
927 926 continue;
928 927 }
929 928 }
930 929
931 930 if (index_get_parents(self, i, parents, (int)len - 1) < 0)
932 931 goto bail;
933 932 for (j = 0; j < 2; j++) {
934 933 if (parents[j] >= 0)
935 934 nothead[parents[j]] = 1;
936 935 }
937 936 }
938 937
939 938 for (i = 0; i < len; i++) {
940 939 PyObject *head;
941 940
942 941 if (nothead[i])
943 942 continue;
944 943 head = PyInt_FromSsize_t(i);
945 944 if (head == NULL || PyList_Append(heads, head) == -1) {
946 945 Py_XDECREF(head);
947 946 goto bail;
948 947 }
949 948 }
950 949
951 950 done:
952 951 self->headrevs = heads;
953 952 Py_XDECREF(filter);
954 953 free(nothead);
955 954 return list_copy(self->headrevs);
956 955 bail:
957 956 Py_XDECREF(filter);
958 957 Py_XDECREF(heads);
959 958 free(nothead);
960 959 return NULL;
961 960 }
962 961
963 962 /**
964 963 * Obtain the base revision index entry.
965 964 *
966 965 * Callers must ensure that rev >= 0 or illegal memory access may occur.
967 966 */
968 967 static inline int index_baserev(indexObject *self, int rev)
969 968 {
970 969 const char *data;
971 970 int result;
972 971
973 972 if (rev >= self->length) {
974 973 PyObject *tuple =
975 974 PyList_GET_ITEM(self->added, rev - self->length);
976 975 long ret;
977 976 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 3), &ret)) {
978 977 return -2;
979 978 }
980 979 result = (int)ret;
981 980 } else {
982 981 data = index_deref(self, rev);
983 982 if (data == NULL) {
984 983 return -2;
985 984 }
986 985
987 986 result = getbe32(data + 16);
988 987 }
989 988 if (result > rev) {
990 989 PyErr_Format(
991 990 PyExc_ValueError,
992 991 "corrupted revlog, revision base above revision: %d, %d",
993 992 rev, result);
994 993 return -2;
995 994 }
996 995 if (result < -1) {
997 996 PyErr_Format(
998 997 PyExc_ValueError,
999 998 "corrupted revlog, revision base out of range: %d, %d", rev,
1000 999 result);
1001 1000 return -2;
1002 1001 }
1003 1002 return result;
1004 1003 }
1005 1004
1006 1005 /**
1007 1006 * Find if a revision is a snapshot or not
1008 1007 *
1009 1008 * Only relevant for sparse-revlog case.
1010 1009 * Callers must ensure that rev is in a valid range.
1011 1010 */
1012 1011 static int index_issnapshotrev(indexObject *self, Py_ssize_t rev)
1013 1012 {
1014 1013 int ps[2];
1015 1014 Py_ssize_t base;
1016 1015 while (rev >= 0) {
1017 1016 base = (Py_ssize_t)index_baserev(self, rev);
1018 1017 if (base == rev) {
1019 1018 base = -1;
1020 1019 }
1021 1020 if (base == -2) {
1022 1021 assert(PyErr_Occurred());
1023 1022 return -1;
1024 1023 }
1025 1024 if (base == -1) {
1026 1025 return 1;
1027 1026 }
1028 1027 if (index_get_parents(self, rev, ps, (int)rev) < 0) {
1029 1028 assert(PyErr_Occurred());
1030 1029 return -1;
1031 1030 };
1032 1031 if (base == ps[0] || base == ps[1]) {
1033 1032 return 0;
1034 1033 }
1035 1034 rev = base;
1036 1035 }
1037 1036 return rev == -1;
1038 1037 }
1039 1038
1040 1039 static PyObject *index_issnapshot(indexObject *self, PyObject *value)
1041 1040 {
1042 1041 long rev;
1043 1042 int issnap;
1044 1043 Py_ssize_t length = index_length(self);
1045 1044
1046 1045 if (!pylong_to_long(value, &rev)) {
1047 1046 return NULL;
1048 1047 }
1049 1048 if (rev < -1 || rev >= length) {
1050 1049 PyErr_Format(PyExc_ValueError, "revlog index out of range: %ld",
1051 1050 rev);
1052 1051 return NULL;
1053 1052 };
1054 1053 issnap = index_issnapshotrev(self, (Py_ssize_t)rev);
1055 1054 if (issnap < 0) {
1056 1055 return NULL;
1057 1056 };
1058 1057 return PyBool_FromLong((long)issnap);
1059 1058 }
1060 1059
1061 1060 static PyObject *index_findsnapshots(indexObject *self, PyObject *args)
1062 1061 {
1063 1062 Py_ssize_t start_rev;
1064 1063 PyObject *cache;
1065 1064 Py_ssize_t base;
1066 1065 Py_ssize_t rev;
1067 1066 PyObject *key = NULL;
1068 1067 PyObject *value = NULL;
1069 1068 const Py_ssize_t length = index_length(self);
1070 1069 if (!PyArg_ParseTuple(args, "O!n", &PyDict_Type, &cache, &start_rev)) {
1071 1070 return NULL;
1072 1071 }
1073 1072 for (rev = start_rev; rev < length; rev++) {
1074 1073 int issnap;
1075 1074 PyObject *allvalues = NULL;
1076 1075 issnap = index_issnapshotrev(self, rev);
1077 1076 if (issnap < 0) {
1078 1077 goto bail;
1079 1078 }
1080 1079 if (issnap == 0) {
1081 1080 continue;
1082 1081 }
1083 1082 base = (Py_ssize_t)index_baserev(self, rev);
1084 1083 if (base == rev) {
1085 1084 base = -1;
1086 1085 }
1087 1086 if (base == -2) {
1088 1087 assert(PyErr_Occurred());
1089 1088 goto bail;
1090 1089 }
1091 1090 key = PyInt_FromSsize_t(base);
1092 1091 allvalues = PyDict_GetItem(cache, key);
1093 1092 if (allvalues == NULL && PyErr_Occurred()) {
1094 1093 goto bail;
1095 1094 }
1096 1095 if (allvalues == NULL) {
1097 1096 int r;
1098 1097 allvalues = PyList_New(0);
1099 1098 if (!allvalues) {
1100 1099 goto bail;
1101 1100 }
1102 1101 r = PyDict_SetItem(cache, key, allvalues);
1103 1102 Py_DECREF(allvalues);
1104 1103 if (r < 0) {
1105 1104 goto bail;
1106 1105 }
1107 1106 }
1108 1107 value = PyInt_FromSsize_t(rev);
1109 1108 if (PyList_Append(allvalues, value)) {
1110 1109 goto bail;
1111 1110 }
1112 1111 Py_CLEAR(key);
1113 1112 Py_CLEAR(value);
1114 1113 }
1115 1114 Py_RETURN_NONE;
1116 1115 bail:
1117 1116 Py_XDECREF(key);
1118 1117 Py_XDECREF(value);
1119 1118 return NULL;
1120 1119 }
1121 1120
1122 1121 static PyObject *index_deltachain(indexObject *self, PyObject *args)
1123 1122 {
1124 1123 int rev, generaldelta;
1125 1124 PyObject *stoparg;
1126 1125 int stoprev, iterrev, baserev = -1;
1127 1126 int stopped;
1128 1127 PyObject *chain = NULL, *result = NULL;
1129 1128 const Py_ssize_t length = index_length(self);
1130 1129
1131 1130 if (!PyArg_ParseTuple(args, "iOi", &rev, &stoparg, &generaldelta)) {
1132 1131 return NULL;
1133 1132 }
1134 1133
1135 1134 if (PyInt_Check(stoparg)) {
1136 1135 stoprev = (int)PyInt_AsLong(stoparg);
1137 1136 if (stoprev == -1 && PyErr_Occurred()) {
1138 1137 return NULL;
1139 1138 }
1140 1139 } else if (stoparg == Py_None) {
1141 1140 stoprev = -2;
1142 1141 } else {
1143 1142 PyErr_SetString(PyExc_ValueError,
1144 1143 "stoprev must be integer or None");
1145 1144 return NULL;
1146 1145 }
1147 1146
1148 1147 if (rev < 0 || rev >= length) {
1149 1148 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
1150 1149 return NULL;
1151 1150 }
1152 1151
1153 1152 chain = PyList_New(0);
1154 1153 if (chain == NULL) {
1155 1154 return NULL;
1156 1155 }
1157 1156
1158 1157 baserev = index_baserev(self, rev);
1159 1158
1160 1159 /* This should never happen. */
1161 1160 if (baserev <= -2) {
1162 1161 /* Error should be set by index_deref() */
1163 1162 assert(PyErr_Occurred());
1164 1163 goto bail;
1165 1164 }
1166 1165
1167 1166 iterrev = rev;
1168 1167
1169 1168 while (iterrev != baserev && iterrev != stoprev) {
1170 1169 PyObject *value = PyInt_FromLong(iterrev);
1171 1170 if (value == NULL) {
1172 1171 goto bail;
1173 1172 }
1174 1173 if (PyList_Append(chain, value)) {
1175 1174 Py_DECREF(value);
1176 1175 goto bail;
1177 1176 }
1178 1177 Py_DECREF(value);
1179 1178
1180 1179 if (generaldelta) {
1181 1180 iterrev = baserev;
1182 1181 } else {
1183 1182 iterrev--;
1184 1183 }
1185 1184
1186 1185 if (iterrev < 0) {
1187 1186 break;
1188 1187 }
1189 1188
1190 1189 if (iterrev >= length) {
1191 1190 PyErr_SetString(PyExc_IndexError,
1192 1191 "revision outside index");
1193 1192 return NULL;
1194 1193 }
1195 1194
1196 1195 baserev = index_baserev(self, iterrev);
1197 1196
1198 1197 /* This should never happen. */
1199 1198 if (baserev <= -2) {
1200 1199 /* Error should be set by index_deref() */
1201 1200 assert(PyErr_Occurred());
1202 1201 goto bail;
1203 1202 }
1204 1203 }
1205 1204
1206 1205 if (iterrev == stoprev) {
1207 1206 stopped = 1;
1208 1207 } else {
1209 1208 PyObject *value = PyInt_FromLong(iterrev);
1210 1209 if (value == NULL) {
1211 1210 goto bail;
1212 1211 }
1213 1212 if (PyList_Append(chain, value)) {
1214 1213 Py_DECREF(value);
1215 1214 goto bail;
1216 1215 }
1217 1216 Py_DECREF(value);
1218 1217
1219 1218 stopped = 0;
1220 1219 }
1221 1220
1222 1221 if (PyList_Reverse(chain)) {
1223 1222 goto bail;
1224 1223 }
1225 1224
1226 1225 result = Py_BuildValue("OO", chain, stopped ? Py_True : Py_False);
1227 1226 Py_DECREF(chain);
1228 1227 return result;
1229 1228
1230 1229 bail:
1231 1230 Py_DECREF(chain);
1232 1231 return NULL;
1233 1232 }
1234 1233
1235 1234 static inline int64_t
1236 1235 index_segment_span(indexObject *self, Py_ssize_t start_rev, Py_ssize_t end_rev)
1237 1236 {
1238 1237 int64_t start_offset;
1239 1238 int64_t end_offset;
1240 1239 int end_size;
1241 1240 start_offset = index_get_start(self, start_rev);
1242 1241 if (start_offset < 0) {
1243 1242 return -1;
1244 1243 }
1245 1244 end_offset = index_get_start(self, end_rev);
1246 1245 if (end_offset < 0) {
1247 1246 return -1;
1248 1247 }
1249 1248 end_size = index_get_length(self, end_rev);
1250 1249 if (end_size < 0) {
1251 1250 return -1;
1252 1251 }
1253 1252 if (end_offset < start_offset) {
1254 1253 PyErr_Format(PyExc_ValueError,
1255 1254 "corrupted revlog index: inconsistent offset "
1256 1255 "between revisions (%zd) and (%zd)",
1257 1256 start_rev, end_rev);
1258 1257 return -1;
1259 1258 }
1260 1259 return (end_offset - start_offset) + (int64_t)end_size;
1261 1260 }
1262 1261
1263 1262 /* returns endidx so that revs[startidx:endidx] has no empty trailing revs */
1264 1263 static Py_ssize_t trim_endidx(indexObject *self, const Py_ssize_t *revs,
1265 1264 Py_ssize_t startidx, Py_ssize_t endidx)
1266 1265 {
1267 1266 int length;
1268 1267 while (endidx > 1 && endidx > startidx) {
1269 1268 length = index_get_length(self, revs[endidx - 1]);
1270 1269 if (length < 0) {
1271 1270 return -1;
1272 1271 }
1273 1272 if (length != 0) {
1274 1273 break;
1275 1274 }
1276 1275 endidx -= 1;
1277 1276 }
1278 1277 return endidx;
1279 1278 }
1280 1279
1281 1280 struct Gap {
1282 1281 int64_t size;
1283 1282 Py_ssize_t idx;
1284 1283 };
1285 1284
1286 1285 static int gap_compare(const void *left, const void *right)
1287 1286 {
1288 1287 const struct Gap *l_left = ((const struct Gap *)left);
1289 1288 const struct Gap *l_right = ((const struct Gap *)right);
1290 1289 if (l_left->size < l_right->size) {
1291 1290 return -1;
1292 1291 } else if (l_left->size > l_right->size) {
1293 1292 return 1;
1294 1293 }
1295 1294 return 0;
1296 1295 }
1297 1296 static int Py_ssize_t_compare(const void *left, const void *right)
1298 1297 {
1299 1298 const Py_ssize_t l_left = *(const Py_ssize_t *)left;
1300 1299 const Py_ssize_t l_right = *(const Py_ssize_t *)right;
1301 1300 if (l_left < l_right) {
1302 1301 return -1;
1303 1302 } else if (l_left > l_right) {
1304 1303 return 1;
1305 1304 }
1306 1305 return 0;
1307 1306 }
1308 1307
1309 1308 static PyObject *index_slicechunktodensity(indexObject *self, PyObject *args)
1310 1309 {
1311 1310 /* method arguments */
1312 1311 PyObject *list_revs = NULL; /* revisions in the chain */
1313 1312 double targetdensity = 0; /* min density to achieve */
1314 1313 Py_ssize_t mingapsize = 0; /* threshold to ignore gaps */
1315 1314
1316 1315 /* other core variables */
1317 1316 Py_ssize_t idxlen = index_length(self);
1318 1317 Py_ssize_t i; /* used for various iteration */
1319 1318 PyObject *result = NULL; /* the final return of the function */
1320 1319
1321 1320 /* generic information about the delta chain being slice */
1322 1321 Py_ssize_t num_revs = 0; /* size of the full delta chain */
1323 1322 Py_ssize_t *revs = NULL; /* native array of revision in the chain */
1324 1323 int64_t chainpayload = 0; /* sum of all delta in the chain */
1325 1324 int64_t deltachainspan = 0; /* distance from first byte to last byte */
1326 1325
1327 1326 /* variable used for slicing the delta chain */
1328 1327 int64_t readdata = 0; /* amount of data currently planned to be read */
1329 1328 double density = 0; /* ration of payload data compared to read ones */
1330 1329 int64_t previous_end;
1331 1330 struct Gap *gaps = NULL; /* array of notable gap in the chain */
1332 1331 Py_ssize_t num_gaps =
1333 1332 0; /* total number of notable gap recorded so far */
1334 1333 Py_ssize_t *selected_indices = NULL; /* indices of gap skipped over */
1335 1334 Py_ssize_t num_selected = 0; /* number of gaps skipped */
1336 1335 PyObject *chunk = NULL; /* individual slice */
1337 1336 PyObject *allchunks = NULL; /* all slices */
1338 1337 Py_ssize_t previdx;
1339 1338
1340 1339 /* parsing argument */
1341 1340 if (!PyArg_ParseTuple(args, "O!dn", &PyList_Type, &list_revs,
1342 1341 &targetdensity, &mingapsize)) {
1343 1342 goto bail;
1344 1343 }
1345 1344
1346 1345 /* If the delta chain contains a single element, we do not need slicing
1347 1346 */
1348 1347 num_revs = PyList_GET_SIZE(list_revs);
1349 1348 if (num_revs <= 1) {
1350 1349 result = PyTuple_Pack(1, list_revs);
1351 1350 goto done;
1352 1351 }
1353 1352
1354 1353 /* Turn the python list into a native integer array (for efficiency) */
1355 1354 revs = (Py_ssize_t *)calloc(num_revs, sizeof(Py_ssize_t));
1356 1355 if (revs == NULL) {
1357 1356 PyErr_NoMemory();
1358 1357 goto bail;
1359 1358 }
1360 1359 for (i = 0; i < num_revs; i++) {
1361 1360 Py_ssize_t revnum = PyInt_AsLong(PyList_GET_ITEM(list_revs, i));
1362 1361 if (revnum == -1 && PyErr_Occurred()) {
1363 1362 goto bail;
1364 1363 }
1365 1364 if (revnum < nullrev || revnum >= idxlen) {
1366 1365 PyErr_Format(PyExc_IndexError,
1367 1366 "index out of range: %zd", revnum);
1368 1367 goto bail;
1369 1368 }
1370 1369 revs[i] = revnum;
1371 1370 }
1372 1371
1373 1372 /* Compute and check various property of the unsliced delta chain */
1374 1373 deltachainspan = index_segment_span(self, revs[0], revs[num_revs - 1]);
1375 1374 if (deltachainspan < 0) {
1376 1375 goto bail;
1377 1376 }
1378 1377
1379 1378 if (deltachainspan <= mingapsize) {
1380 1379 result = PyTuple_Pack(1, list_revs);
1381 1380 goto done;
1382 1381 }
1383 1382 chainpayload = 0;
1384 1383 for (i = 0; i < num_revs; i++) {
1385 1384 int tmp = index_get_length(self, revs[i]);
1386 1385 if (tmp < 0) {
1387 1386 goto bail;
1388 1387 }
1389 1388 chainpayload += tmp;
1390 1389 }
1391 1390
1392 1391 readdata = deltachainspan;
1393 1392 density = 1.0;
1394 1393
1395 1394 if (0 < deltachainspan) {
1396 1395 density = (double)chainpayload / (double)deltachainspan;
1397 1396 }
1398 1397
1399 1398 if (density >= targetdensity) {
1400 1399 result = PyTuple_Pack(1, list_revs);
1401 1400 goto done;
1402 1401 }
1403 1402
1404 1403 /* if chain is too sparse, look for relevant gaps */
1405 1404 gaps = (struct Gap *)calloc(num_revs, sizeof(struct Gap));
1406 1405 if (gaps == NULL) {
1407 1406 PyErr_NoMemory();
1408 1407 goto bail;
1409 1408 }
1410 1409
1411 1410 previous_end = -1;
1412 1411 for (i = 0; i < num_revs; i++) {
1413 1412 int64_t revstart;
1414 1413 int revsize;
1415 1414 revstart = index_get_start(self, revs[i]);
1416 1415 if (revstart < 0) {
1417 1416 goto bail;
1418 1417 };
1419 1418 revsize = index_get_length(self, revs[i]);
1420 1419 if (revsize < 0) {
1421 1420 goto bail;
1422 1421 };
1423 1422 if (revsize == 0) {
1424 1423 continue;
1425 1424 }
1426 1425 if (previous_end >= 0) {
1427 1426 int64_t gapsize = revstart - previous_end;
1428 1427 if (gapsize > mingapsize) {
1429 1428 gaps[num_gaps].size = gapsize;
1430 1429 gaps[num_gaps].idx = i;
1431 1430 num_gaps += 1;
1432 1431 }
1433 1432 }
1434 1433 previous_end = revstart + revsize;
1435 1434 }
1436 1435 if (num_gaps == 0) {
1437 1436 result = PyTuple_Pack(1, list_revs);
1438 1437 goto done;
1439 1438 }
1440 1439 qsort(gaps, num_gaps, sizeof(struct Gap), &gap_compare);
1441 1440
1442 1441 /* Slice the largest gap first, they improve the density the most */
1443 1442 selected_indices =
1444 1443 (Py_ssize_t *)malloc((num_gaps + 1) * sizeof(Py_ssize_t));
1445 1444 if (selected_indices == NULL) {
1446 1445 PyErr_NoMemory();
1447 1446 goto bail;
1448 1447 }
1449 1448
1450 1449 for (i = num_gaps - 1; i >= 0; i--) {
1451 1450 selected_indices[num_selected] = gaps[i].idx;
1452 1451 readdata -= gaps[i].size;
1453 1452 num_selected += 1;
1454 1453 if (readdata <= 0) {
1455 1454 density = 1.0;
1456 1455 } else {
1457 1456 density = (double)chainpayload / (double)readdata;
1458 1457 }
1459 1458 if (density >= targetdensity) {
1460 1459 break;
1461 1460 }
1462 1461 }
1463 1462 qsort(selected_indices, num_selected, sizeof(Py_ssize_t),
1464 1463 &Py_ssize_t_compare);
1465 1464
1466 1465 /* create the resulting slice */
1467 1466 allchunks = PyList_New(0);
1468 1467 if (allchunks == NULL) {
1469 1468 goto bail;
1470 1469 }
1471 1470 previdx = 0;
1472 1471 selected_indices[num_selected] = num_revs;
1473 1472 for (i = 0; i <= num_selected; i++) {
1474 1473 Py_ssize_t idx = selected_indices[i];
1475 1474 Py_ssize_t endidx = trim_endidx(self, revs, previdx, idx);
1476 1475 if (endidx < 0) {
1477 1476 goto bail;
1478 1477 }
1479 1478 if (previdx < endidx) {
1480 1479 chunk = PyList_GetSlice(list_revs, previdx, endidx);
1481 1480 if (chunk == NULL) {
1482 1481 goto bail;
1483 1482 }
1484 1483 if (PyList_Append(allchunks, chunk) == -1) {
1485 1484 goto bail;
1486 1485 }
1487 1486 Py_DECREF(chunk);
1488 1487 chunk = NULL;
1489 1488 }
1490 1489 previdx = idx;
1491 1490 }
1492 1491 result = allchunks;
1493 1492 goto done;
1494 1493
1495 1494 bail:
1496 1495 Py_XDECREF(allchunks);
1497 1496 Py_XDECREF(chunk);
1498 1497 done:
1499 1498 free(revs);
1500 1499 free(gaps);
1501 1500 free(selected_indices);
1502 1501 return result;
1503 1502 }
1504 1503
1505 1504 static inline int nt_level(const char *node, Py_ssize_t level)
1506 1505 {
1507 1506 int v = node[level >> 1];
1508 1507 if (!(level & 1))
1509 1508 v >>= 4;
1510 1509 return v & 0xf;
1511 1510 }
1512 1511
1513 1512 /*
1514 1513 * Return values:
1515 1514 *
1516 1515 * -4: match is ambiguous (multiple candidates)
1517 1516 * -2: not found
1518 1517 * rest: valid rev
1519 1518 */
1520 1519 static int nt_find(nodetree *self, const char *node, Py_ssize_t nodelen,
1521 1520 int hex)
1522 1521 {
1523 1522 int (*getnybble)(const char *, Py_ssize_t) = hex ? hexdigit : nt_level;
1524 1523 int level, maxlevel, off;
1525 1524
1526 1525 if (nodelen == 20 && node[0] == '\0' && memcmp(node, nullid, 20) == 0)
1527 1526 return -1;
1528 1527
1529 1528 if (hex)
1530 1529 maxlevel = nodelen > 40 ? 40 : (int)nodelen;
1531 1530 else
1532 1531 maxlevel = nodelen > 20 ? 40 : ((int)nodelen * 2);
1533 1532
1534 1533 for (level = off = 0; level < maxlevel; level++) {
1535 1534 int k = getnybble(node, level);
1536 1535 nodetreenode *n = &self->nodes[off];
1537 1536 int v = n->children[k];
1538 1537
1539 1538 if (v < 0) {
1540 1539 const char *n;
1541 1540 Py_ssize_t i;
1542 1541
1543 1542 v = -(v + 2);
1544 1543 n = index_node(self->index, v);
1545 1544 if (n == NULL)
1546 1545 return -2;
1547 1546 for (i = level; i < maxlevel; i++)
1548 1547 if (getnybble(node, i) != nt_level(n, i))
1549 1548 return -2;
1550 1549 return v;
1551 1550 }
1552 1551 if (v == 0)
1553 1552 return -2;
1554 1553 off = v;
1555 1554 }
1556 1555 /* multiple matches against an ambiguous prefix */
1557 1556 return -4;
1558 1557 }
1559 1558
1560 1559 static int nt_new(nodetree *self)
1561 1560 {
1562 1561 if (self->length == self->capacity) {
1563 1562 unsigned newcapacity;
1564 1563 nodetreenode *newnodes;
1565 1564 newcapacity = self->capacity * 2;
1566 1565 if (newcapacity >= INT_MAX / sizeof(nodetreenode)) {
1567 1566 PyErr_SetString(PyExc_MemoryError,
1568 1567 "overflow in nt_new");
1569 1568 return -1;
1570 1569 }
1571 1570 newnodes =
1572 1571 realloc(self->nodes, newcapacity * sizeof(nodetreenode));
1573 1572 if (newnodes == NULL) {
1574 1573 PyErr_SetString(PyExc_MemoryError, "out of memory");
1575 1574 return -1;
1576 1575 }
1577 1576 self->capacity = newcapacity;
1578 1577 self->nodes = newnodes;
1579 1578 memset(&self->nodes[self->length], 0,
1580 1579 sizeof(nodetreenode) * (self->capacity - self->length));
1581 1580 }
1582 1581 return self->length++;
1583 1582 }
1584 1583
1585 1584 static int nt_insert(nodetree *self, const char *node, int rev)
1586 1585 {
1587 1586 int level = 0;
1588 1587 int off = 0;
1589 1588
1590 1589 while (level < 40) {
1591 1590 int k = nt_level(node, level);
1592 1591 nodetreenode *n;
1593 1592 int v;
1594 1593
1595 1594 n = &self->nodes[off];
1596 1595 v = n->children[k];
1597 1596
1598 1597 if (v == 0) {
1599 1598 n->children[k] = -rev - 2;
1600 1599 return 0;
1601 1600 }
1602 1601 if (v < 0) {
1603 1602 const char *oldnode =
1604 1603 index_node_existing(self->index, -(v + 2));
1605 1604 int noff;
1606 1605
1607 1606 if (oldnode == NULL)
1608 1607 return -1;
1609 1608 if (!memcmp(oldnode, node, 20)) {
1610 1609 n->children[k] = -rev - 2;
1611 1610 return 0;
1612 1611 }
1613 1612 noff = nt_new(self);
1614 1613 if (noff == -1)
1615 1614 return -1;
1616 1615 /* self->nodes may have been changed by realloc */
1617 1616 self->nodes[off].children[k] = noff;
1618 1617 off = noff;
1619 1618 n = &self->nodes[off];
1620 1619 n->children[nt_level(oldnode, ++level)] = v;
1621 1620 if (level > self->depth)
1622 1621 self->depth = level;
1623 1622 self->splits += 1;
1624 1623 } else {
1625 1624 level += 1;
1626 1625 off = v;
1627 1626 }
1628 1627 }
1629 1628
1630 1629 return -1;
1631 1630 }
1632 1631
1633 1632 static PyObject *ntobj_insert(nodetreeObject *self, PyObject *args)
1634 1633 {
1635 1634 Py_ssize_t rev;
1636 1635 const char *node;
1637 1636 Py_ssize_t length;
1638 1637 if (!PyArg_ParseTuple(args, "n", &rev))
1639 1638 return NULL;
1640 1639 length = index_length(self->nt.index);
1641 1640 if (rev < 0 || rev >= length) {
1642 1641 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
1643 1642 return NULL;
1644 1643 }
1645 1644 node = index_node_existing(self->nt.index, rev);
1646 1645 if (nt_insert(&self->nt, node, (int)rev) == -1)
1647 1646 return NULL;
1648 1647 Py_RETURN_NONE;
1649 1648 }
1650 1649
1651 1650 static int nt_delete_node(nodetree *self, const char *node)
1652 1651 {
1653 1652 /* rev==-2 happens to get encoded as 0, which is interpreted as not set
1654 1653 */
1655 1654 return nt_insert(self, node, -2);
1656 1655 }
1657 1656
1658 1657 static int nt_init(nodetree *self, indexObject *index, unsigned capacity)
1659 1658 {
1660 1659 /* Initialize before overflow-checking to avoid nt_dealloc() crash. */
1661 1660 self->nodes = NULL;
1662 1661
1663 1662 self->index = index;
1664 1663 /* The input capacity is in terms of revisions, while the field is in
1665 1664 * terms of nodetree nodes. */
1666 1665 self->capacity = (capacity < 4 ? 4 : capacity / 2);
1667 1666 self->depth = 0;
1668 1667 self->splits = 0;
1669 1668 if ((size_t)self->capacity > INT_MAX / sizeof(nodetreenode)) {
1670 1669 PyErr_SetString(PyExc_ValueError, "overflow in init_nt");
1671 1670 return -1;
1672 1671 }
1673 1672 self->nodes = calloc(self->capacity, sizeof(nodetreenode));
1674 1673 if (self->nodes == NULL) {
1675 1674 PyErr_NoMemory();
1676 1675 return -1;
1677 1676 }
1678 1677 self->length = 1;
1679 1678 return 0;
1680 1679 }
1681 1680
1682 1681 static int ntobj_init(nodetreeObject *self, PyObject *args)
1683 1682 {
1684 1683 PyObject *index;
1685 1684 unsigned capacity;
1686 1685 if (!PyArg_ParseTuple(args, "O!I", &HgRevlogIndex_Type, &index,
1687 1686 &capacity))
1688 1687 return -1;
1689 1688 Py_INCREF(index);
1690 1689 return nt_init(&self->nt, (indexObject *)index, capacity);
1691 1690 }
1692 1691
1693 1692 static int nt_partialmatch(nodetree *self, const char *node, Py_ssize_t nodelen)
1694 1693 {
1695 1694 return nt_find(self, node, nodelen, 1);
1696 1695 }
1697 1696
1698 1697 /*
1699 1698 * Find the length of the shortest unique prefix of node.
1700 1699 *
1701 1700 * Return values:
1702 1701 *
1703 1702 * -3: error (exception set)
1704 1703 * -2: not found (no exception set)
1705 1704 * rest: length of shortest prefix
1706 1705 */
1707 1706 static int nt_shortest(nodetree *self, const char *node)
1708 1707 {
1709 1708 int level, off;
1710 1709
1711 1710 for (level = off = 0; level < 40; level++) {
1712 1711 int k, v;
1713 1712 nodetreenode *n = &self->nodes[off];
1714 1713 k = nt_level(node, level);
1715 1714 v = n->children[k];
1716 1715 if (v < 0) {
1717 1716 const char *n;
1718 1717 v = -(v + 2);
1719 1718 n = index_node_existing(self->index, v);
1720 1719 if (n == NULL)
1721 1720 return -3;
1722 1721 if (memcmp(node, n, 20) != 0)
1723 1722 /*
1724 1723 * Found a unique prefix, but it wasn't for the
1725 1724 * requested node (i.e the requested node does
1726 1725 * not exist).
1727 1726 */
1728 1727 return -2;
1729 1728 return level + 1;
1730 1729 }
1731 1730 if (v == 0)
1732 1731 return -2;
1733 1732 off = v;
1734 1733 }
1735 1734 /*
1736 1735 * The node was still not unique after 40 hex digits, so this won't
1737 1736 * happen. Also, if we get here, then there's a programming error in
1738 1737 * this file that made us insert a node longer than 40 hex digits.
1739 1738 */
1740 1739 PyErr_SetString(PyExc_Exception, "broken node tree");
1741 1740 return -3;
1742 1741 }
1743 1742
1744 1743 static PyObject *ntobj_shortest(nodetreeObject *self, PyObject *args)
1745 1744 {
1746 1745 PyObject *val;
1747 1746 char *node;
1748 1747 int length;
1749 1748
1750 1749 if (!PyArg_ParseTuple(args, "O", &val))
1751 1750 return NULL;
1752 1751 if (node_check(val, &node) == -1)
1753 1752 return NULL;
1754 1753
1755 1754 length = nt_shortest(&self->nt, node);
1756 1755 if (length == -3)
1757 1756 return NULL;
1758 1757 if (length == -2) {
1759 1758 raise_revlog_error();
1760 1759 return NULL;
1761 1760 }
1762 1761 return PyInt_FromLong(length);
1763 1762 }
1764 1763
1765 1764 static void nt_dealloc(nodetree *self)
1766 1765 {
1767 1766 free(self->nodes);
1768 1767 self->nodes = NULL;
1769 1768 }
1770 1769
1771 1770 static void ntobj_dealloc(nodetreeObject *self)
1772 1771 {
1773 1772 Py_XDECREF(self->nt.index);
1774 1773 nt_dealloc(&self->nt);
1775 1774 PyObject_Del(self);
1776 1775 }
1777 1776
1778 1777 static PyMethodDef ntobj_methods[] = {
1779 1778 {"insert", (PyCFunction)ntobj_insert, METH_VARARGS,
1780 1779 "insert an index entry"},
1781 1780 {"shortest", (PyCFunction)ntobj_shortest, METH_VARARGS,
1782 1781 "find length of shortest hex nodeid of a binary ID"},
1783 1782 {NULL} /* Sentinel */
1784 1783 };
1785 1784
1786 1785 static PyTypeObject nodetreeType = {
1787 1786 PyVarObject_HEAD_INIT(NULL, 0) /* header */
1788 1787 "parsers.nodetree", /* tp_name */
1789 1788 sizeof(nodetreeObject), /* tp_basicsize */
1790 1789 0, /* tp_itemsize */
1791 1790 (destructor)ntobj_dealloc, /* tp_dealloc */
1792 1791 0, /* tp_print */
1793 1792 0, /* tp_getattr */
1794 1793 0, /* tp_setattr */
1795 1794 0, /* tp_compare */
1796 1795 0, /* tp_repr */
1797 1796 0, /* tp_as_number */
1798 1797 0, /* tp_as_sequence */
1799 1798 0, /* tp_as_mapping */
1800 1799 0, /* tp_hash */
1801 1800 0, /* tp_call */
1802 1801 0, /* tp_str */
1803 1802 0, /* tp_getattro */
1804 1803 0, /* tp_setattro */
1805 1804 0, /* tp_as_buffer */
1806 1805 Py_TPFLAGS_DEFAULT, /* tp_flags */
1807 1806 "nodetree", /* tp_doc */
1808 1807 0, /* tp_traverse */
1809 1808 0, /* tp_clear */
1810 1809 0, /* tp_richcompare */
1811 1810 0, /* tp_weaklistoffset */
1812 1811 0, /* tp_iter */
1813 1812 0, /* tp_iternext */
1814 1813 ntobj_methods, /* tp_methods */
1815 1814 0, /* tp_members */
1816 1815 0, /* tp_getset */
1817 1816 0, /* tp_base */
1818 1817 0, /* tp_dict */
1819 1818 0, /* tp_descr_get */
1820 1819 0, /* tp_descr_set */
1821 1820 0, /* tp_dictoffset */
1822 1821 (initproc)ntobj_init, /* tp_init */
1823 1822 0, /* tp_alloc */
1824 1823 };
1825 1824
1826 1825 static int index_init_nt(indexObject *self)
1827 1826 {
1828 1827 if (!self->ntinitialized) {
1829 1828 if (nt_init(&self->nt, self, (int)self->raw_length) == -1) {
1830 1829 nt_dealloc(&self->nt);
1831 1830 return -1;
1832 1831 }
1833 1832 if (nt_insert(&self->nt, nullid, -1) == -1) {
1834 1833 nt_dealloc(&self->nt);
1835 1834 return -1;
1836 1835 }
1837 1836 self->ntinitialized = 1;
1838 1837 self->ntrev = (int)index_length(self);
1839 1838 self->ntlookups = 1;
1840 1839 self->ntmisses = 0;
1841 1840 }
1842 1841 return 0;
1843 1842 }
1844 1843
1845 1844 /*
1846 1845 * Return values:
1847 1846 *
1848 1847 * -3: error (exception set)
1849 1848 * -2: not found (no exception set)
1850 1849 * rest: valid rev
1851 1850 */
1852 1851 static int index_find_node(indexObject *self, const char *node,
1853 1852 Py_ssize_t nodelen)
1854 1853 {
1855 1854 int rev;
1856 1855
1857 1856 if (index_init_nt(self) == -1)
1858 1857 return -3;
1859 1858
1860 1859 self->ntlookups++;
1861 1860 rev = nt_find(&self->nt, node, nodelen, 0);
1862 1861 if (rev >= -1)
1863 1862 return rev;
1864 1863
1865 1864 /*
1866 1865 * For the first handful of lookups, we scan the entire index,
1867 1866 * and cache only the matching nodes. This optimizes for cases
1868 1867 * like "hg tip", where only a few nodes are accessed.
1869 1868 *
1870 1869 * After that, we cache every node we visit, using a single
1871 1870 * scan amortized over multiple lookups. This gives the best
1872 1871 * bulk performance, e.g. for "hg log".
1873 1872 */
1874 1873 if (self->ntmisses++ < 4) {
1875 1874 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1876 1875 const char *n = index_node_existing(self, rev);
1877 1876 if (n == NULL)
1878 1877 return -3;
1879 1878 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1880 1879 if (nt_insert(&self->nt, n, rev) == -1)
1881 1880 return -3;
1882 1881 break;
1883 1882 }
1884 1883 }
1885 1884 } else {
1886 1885 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1887 1886 const char *n = index_node_existing(self, rev);
1888 1887 if (n == NULL)
1889 1888 return -3;
1890 1889 if (nt_insert(&self->nt, n, rev) == -1) {
1891 1890 self->ntrev = rev + 1;
1892 1891 return -3;
1893 1892 }
1894 1893 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1895 1894 break;
1896 1895 }
1897 1896 }
1898 1897 self->ntrev = rev;
1899 1898 }
1900 1899
1901 1900 if (rev >= 0)
1902 1901 return rev;
1903 1902 return -2;
1904 1903 }
1905 1904
1906 1905 static PyObject *index_getitem(indexObject *self, PyObject *value)
1907 1906 {
1908 1907 char *node;
1909 1908 int rev;
1910 1909
1911 1910 if (PyInt_Check(value)) {
1912 1911 long idx;
1913 1912 if (!pylong_to_long(value, &idx)) {
1914 1913 return NULL;
1915 1914 }
1916 1915 return index_get(self, idx);
1917 1916 }
1918 1917
1919 1918 if (node_check(value, &node) == -1)
1920 1919 return NULL;
1921 1920 rev = index_find_node(self, node, 20);
1922 1921 if (rev >= -1)
1923 1922 return PyInt_FromLong(rev);
1924 1923 if (rev == -2)
1925 1924 raise_revlog_error();
1926 1925 return NULL;
1927 1926 }
1928 1927
1929 1928 /*
1930 1929 * Fully populate the radix tree.
1931 1930 */
1932 1931 static int index_populate_nt(indexObject *self)
1933 1932 {
1934 1933 int rev;
1935 1934 if (self->ntrev > 0) {
1936 1935 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1937 1936 const char *n = index_node_existing(self, rev);
1938 1937 if (n == NULL)
1939 1938 return -1;
1940 1939 if (nt_insert(&self->nt, n, rev) == -1)
1941 1940 return -1;
1942 1941 }
1943 1942 self->ntrev = -1;
1944 1943 }
1945 1944 return 0;
1946 1945 }
1947 1946
1948 1947 static PyObject *index_partialmatch(indexObject *self, PyObject *args)
1949 1948 {
1950 1949 const char *fullnode;
1951 1950 Py_ssize_t nodelen;
1952 1951 char *node;
1953 1952 int rev, i;
1954 1953
1955 1954 if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &node, &nodelen))
1956 1955 return NULL;
1957 1956
1958 1957 if (nodelen < 1) {
1959 1958 PyErr_SetString(PyExc_ValueError, "key too short");
1960 1959 return NULL;
1961 1960 }
1962 1961
1963 1962 if (nodelen > 40) {
1964 1963 PyErr_SetString(PyExc_ValueError, "key too long");
1965 1964 return NULL;
1966 1965 }
1967 1966
1968 1967 for (i = 0; i < nodelen; i++)
1969 1968 hexdigit(node, i);
1970 1969 if (PyErr_Occurred()) {
1971 1970 /* input contains non-hex characters */
1972 1971 PyErr_Clear();
1973 1972 Py_RETURN_NONE;
1974 1973 }
1975 1974
1976 1975 if (index_init_nt(self) == -1)
1977 1976 return NULL;
1978 1977 if (index_populate_nt(self) == -1)
1979 1978 return NULL;
1980 1979 rev = nt_partialmatch(&self->nt, node, nodelen);
1981 1980
1982 1981 switch (rev) {
1983 1982 case -4:
1984 1983 raise_revlog_error();
1985 1984 return NULL;
1986 1985 case -2:
1987 1986 Py_RETURN_NONE;
1988 1987 case -1:
1989 1988 return PyBytes_FromStringAndSize(nullid, 20);
1990 1989 }
1991 1990
1992 1991 fullnode = index_node_existing(self, rev);
1993 1992 if (fullnode == NULL) {
1994 1993 return NULL;
1995 1994 }
1996 1995 return PyBytes_FromStringAndSize(fullnode, 20);
1997 1996 }
1998 1997
1999 1998 static PyObject *index_shortest(indexObject *self, PyObject *args)
2000 1999 {
2001 2000 PyObject *val;
2002 2001 char *node;
2003 2002 int length;
2004 2003
2005 2004 if (!PyArg_ParseTuple(args, "O", &val))
2006 2005 return NULL;
2007 2006 if (node_check(val, &node) == -1)
2008 2007 return NULL;
2009 2008
2010 2009 self->ntlookups++;
2011 2010 if (index_init_nt(self) == -1)
2012 2011 return NULL;
2013 2012 if (index_populate_nt(self) == -1)
2014 2013 return NULL;
2015 2014 length = nt_shortest(&self->nt, node);
2016 2015 if (length == -3)
2017 2016 return NULL;
2018 2017 if (length == -2) {
2019 2018 raise_revlog_error();
2020 2019 return NULL;
2021 2020 }
2022 2021 return PyInt_FromLong(length);
2023 2022 }
2024 2023
2025 2024 static PyObject *index_m_get(indexObject *self, PyObject *args)
2026 2025 {
2027 2026 PyObject *val;
2028 2027 char *node;
2029 2028 int rev;
2030 2029
2031 2030 if (!PyArg_ParseTuple(args, "O", &val))
2032 2031 return NULL;
2033 2032 if (node_check(val, &node) == -1)
2034 2033 return NULL;
2035 2034 rev = index_find_node(self, node, 20);
2036 2035 if (rev == -3)
2037 2036 return NULL;
2038 2037 if (rev == -2)
2039 2038 Py_RETURN_NONE;
2040 2039 return PyInt_FromLong(rev);
2041 2040 }
2042 2041
2043 2042 static int index_contains(indexObject *self, PyObject *value)
2044 2043 {
2045 2044 char *node;
2046 2045
2047 2046 if (PyInt_Check(value)) {
2048 2047 long rev;
2049 2048 if (!pylong_to_long(value, &rev)) {
2050 2049 return -1;
2051 2050 }
2052 2051 return rev >= -1 && rev < index_length(self);
2053 2052 }
2054 2053
2055 2054 if (node_check(value, &node) == -1)
2056 2055 return -1;
2057 2056
2058 2057 switch (index_find_node(self, node, 20)) {
2059 2058 case -3:
2060 2059 return -1;
2061 2060 case -2:
2062 2061 return 0;
2063 2062 default:
2064 2063 return 1;
2065 2064 }
2066 2065 }
2067 2066
2068 2067 static PyObject *index_m_has_node(indexObject *self, PyObject *args)
2069 2068 {
2070 2069 int ret = index_contains(self, args);
2071 2070 if (ret < 0)
2072 2071 return NULL;
2073 2072 return PyBool_FromLong((long)ret);
2074 2073 }
2075 2074
2076 2075 static PyObject *index_m_rev(indexObject *self, PyObject *val)
2077 2076 {
2078 2077 char *node;
2079 2078 int rev;
2080 2079
2081 2080 if (node_check(val, &node) == -1)
2082 2081 return NULL;
2083 2082 rev = index_find_node(self, node, 20);
2084 2083 if (rev >= -1)
2085 2084 return PyInt_FromLong(rev);
2086 2085 if (rev == -2)
2087 2086 raise_revlog_error();
2088 2087 return NULL;
2089 2088 }
2090 2089
2091 2090 typedef uint64_t bitmask;
2092 2091
2093 2092 /*
2094 2093 * Given a disjoint set of revs, return all candidates for the
2095 2094 * greatest common ancestor. In revset notation, this is the set
2096 2095 * "heads(::a and ::b and ...)"
2097 2096 */
2098 2097 static PyObject *find_gca_candidates(indexObject *self, const int *revs,
2099 2098 int revcount)
2100 2099 {
2101 2100 const bitmask allseen = (1ull << revcount) - 1;
2102 2101 const bitmask poison = 1ull << revcount;
2103 2102 PyObject *gca = PyList_New(0);
2104 2103 int i, v, interesting;
2105 2104 int maxrev = -1;
2106 2105 bitmask sp;
2107 2106 bitmask *seen;
2108 2107
2109 2108 if (gca == NULL)
2110 2109 return PyErr_NoMemory();
2111 2110
2112 2111 for (i = 0; i < revcount; i++) {
2113 2112 if (revs[i] > maxrev)
2114 2113 maxrev = revs[i];
2115 2114 }
2116 2115
2117 2116 seen = calloc(sizeof(*seen), maxrev + 1);
2118 2117 if (seen == NULL) {
2119 2118 Py_DECREF(gca);
2120 2119 return PyErr_NoMemory();
2121 2120 }
2122 2121
2123 2122 for (i = 0; i < revcount; i++)
2124 2123 seen[revs[i]] = 1ull << i;
2125 2124
2126 2125 interesting = revcount;
2127 2126
2128 2127 for (v = maxrev; v >= 0 && interesting; v--) {
2129 2128 bitmask sv = seen[v];
2130 2129 int parents[2];
2131 2130
2132 2131 if (!sv)
2133 2132 continue;
2134 2133
2135 2134 if (sv < poison) {
2136 2135 interesting -= 1;
2137 2136 if (sv == allseen) {
2138 2137 PyObject *obj = PyInt_FromLong(v);
2139 2138 if (obj == NULL)
2140 2139 goto bail;
2141 2140 if (PyList_Append(gca, obj) == -1) {
2142 2141 Py_DECREF(obj);
2143 2142 goto bail;
2144 2143 }
2145 2144 sv |= poison;
2146 2145 for (i = 0; i < revcount; i++) {
2147 2146 if (revs[i] == v)
2148 2147 goto done;
2149 2148 }
2150 2149 }
2151 2150 }
2152 2151 if (index_get_parents(self, v, parents, maxrev) < 0)
2153 2152 goto bail;
2154 2153
2155 2154 for (i = 0; i < 2; i++) {
2156 2155 int p = parents[i];
2157 2156 if (p == -1)
2158 2157 continue;
2159 2158 sp = seen[p];
2160 2159 if (sv < poison) {
2161 2160 if (sp == 0) {
2162 2161 seen[p] = sv;
2163 2162 interesting++;
2164 2163 } else if (sp != sv)
2165 2164 seen[p] |= sv;
2166 2165 } else {
2167 2166 if (sp && sp < poison)
2168 2167 interesting--;
2169 2168 seen[p] = sv;
2170 2169 }
2171 2170 }
2172 2171 }
2173 2172
2174 2173 done:
2175 2174 free(seen);
2176 2175 return gca;
2177 2176 bail:
2178 2177 free(seen);
2179 2178 Py_XDECREF(gca);
2180 2179 return NULL;
2181 2180 }
2182 2181
2183 2182 /*
2184 2183 * Given a disjoint set of revs, return the subset with the longest
2185 2184 * path to the root.
2186 2185 */
2187 2186 static PyObject *find_deepest(indexObject *self, PyObject *revs)
2188 2187 {
2189 2188 const Py_ssize_t revcount = PyList_GET_SIZE(revs);
2190 2189 static const Py_ssize_t capacity = 24;
2191 2190 int *depth, *interesting = NULL;
2192 2191 int i, j, v, ninteresting;
2193 2192 PyObject *dict = NULL, *keys = NULL;
2194 2193 long *seen = NULL;
2195 2194 int maxrev = -1;
2196 2195 long final;
2197 2196
2198 2197 if (revcount > capacity) {
2199 2198 PyErr_Format(PyExc_OverflowError,
2200 2199 "bitset size (%ld) > capacity (%ld)",
2201 2200 (long)revcount, (long)capacity);
2202 2201 return NULL;
2203 2202 }
2204 2203
2205 2204 for (i = 0; i < revcount; i++) {
2206 2205 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
2207 2206 if (n > maxrev)
2208 2207 maxrev = n;
2209 2208 }
2210 2209
2211 2210 depth = calloc(sizeof(*depth), maxrev + 1);
2212 2211 if (depth == NULL)
2213 2212 return PyErr_NoMemory();
2214 2213
2215 2214 seen = calloc(sizeof(*seen), maxrev + 1);
2216 2215 if (seen == NULL) {
2217 2216 PyErr_NoMemory();
2218 2217 goto bail;
2219 2218 }
2220 2219
2221 2220 interesting = calloc(sizeof(*interesting), ((size_t)1) << revcount);
2222 2221 if (interesting == NULL) {
2223 2222 PyErr_NoMemory();
2224 2223 goto bail;
2225 2224 }
2226 2225
2227 2226 if (PyList_Sort(revs) == -1)
2228 2227 goto bail;
2229 2228
2230 2229 for (i = 0; i < revcount; i++) {
2231 2230 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
2232 2231 long b = 1l << i;
2233 2232 depth[n] = 1;
2234 2233 seen[n] = b;
2235 2234 interesting[b] = 1;
2236 2235 }
2237 2236
2238 2237 /* invariant: ninteresting is the number of non-zero entries in
2239 2238 * interesting. */
2240 2239 ninteresting = (int)revcount;
2241 2240
2242 2241 for (v = maxrev; v >= 0 && ninteresting > 1; v--) {
2243 2242 int dv = depth[v];
2244 2243 int parents[2];
2245 2244 long sv;
2246 2245
2247 2246 if (dv == 0)
2248 2247 continue;
2249 2248
2250 2249 sv = seen[v];
2251 2250 if (index_get_parents(self, v, parents, maxrev) < 0)
2252 2251 goto bail;
2253 2252
2254 2253 for (i = 0; i < 2; i++) {
2255 2254 int p = parents[i];
2256 2255 long sp;
2257 2256 int dp;
2258 2257
2259 2258 if (p == -1)
2260 2259 continue;
2261 2260
2262 2261 dp = depth[p];
2263 2262 sp = seen[p];
2264 2263 if (dp <= dv) {
2265 2264 depth[p] = dv + 1;
2266 2265 if (sp != sv) {
2267 2266 interesting[sv] += 1;
2268 2267 seen[p] = sv;
2269 2268 if (sp) {
2270 2269 interesting[sp] -= 1;
2271 2270 if (interesting[sp] == 0)
2272 2271 ninteresting -= 1;
2273 2272 }
2274 2273 }
2275 2274 } else if (dv == dp - 1) {
2276 2275 long nsp = sp | sv;
2277 2276 if (nsp == sp)
2278 2277 continue;
2279 2278 seen[p] = nsp;
2280 2279 interesting[sp] -= 1;
2281 2280 if (interesting[sp] == 0)
2282 2281 ninteresting -= 1;
2283 2282 if (interesting[nsp] == 0)
2284 2283 ninteresting += 1;
2285 2284 interesting[nsp] += 1;
2286 2285 }
2287 2286 }
2288 2287 interesting[sv] -= 1;
2289 2288 if (interesting[sv] == 0)
2290 2289 ninteresting -= 1;
2291 2290 }
2292 2291
2293 2292 final = 0;
2294 2293 j = ninteresting;
2295 2294 for (i = 0; i < (int)(2 << revcount) && j > 0; i++) {
2296 2295 if (interesting[i] == 0)
2297 2296 continue;
2298 2297 final |= i;
2299 2298 j -= 1;
2300 2299 }
2301 2300 if (final == 0) {
2302 2301 keys = PyList_New(0);
2303 2302 goto bail;
2304 2303 }
2305 2304
2306 2305 dict = PyDict_New();
2307 2306 if (dict == NULL)
2308 2307 goto bail;
2309 2308
2310 2309 for (i = 0; i < revcount; i++) {
2311 2310 PyObject *key;
2312 2311
2313 2312 if ((final & (1 << i)) == 0)
2314 2313 continue;
2315 2314
2316 2315 key = PyList_GET_ITEM(revs, i);
2317 2316 Py_INCREF(key);
2318 2317 Py_INCREF(Py_None);
2319 2318 if (PyDict_SetItem(dict, key, Py_None) == -1) {
2320 2319 Py_DECREF(key);
2321 2320 Py_DECREF(Py_None);
2322 2321 goto bail;
2323 2322 }
2324 2323 }
2325 2324
2326 2325 keys = PyDict_Keys(dict);
2327 2326
2328 2327 bail:
2329 2328 free(depth);
2330 2329 free(seen);
2331 2330 free(interesting);
2332 2331 Py_XDECREF(dict);
2333 2332
2334 2333 return keys;
2335 2334 }
2336 2335
2337 2336 /*
2338 2337 * Given a (possibly overlapping) set of revs, return all the
2339 2338 * common ancestors heads: heads(::args[0] and ::a[1] and ...)
2340 2339 */
2341 2340 static PyObject *index_commonancestorsheads(indexObject *self, PyObject *args)
2342 2341 {
2343 2342 PyObject *ret = NULL;
2344 2343 Py_ssize_t argcount, i, len;
2345 2344 bitmask repeat = 0;
2346 2345 int revcount = 0;
2347 2346 int *revs;
2348 2347
2349 2348 argcount = PySequence_Length(args);
2350 2349 revs = PyMem_Malloc(argcount * sizeof(*revs));
2351 2350 if (argcount > 0 && revs == NULL)
2352 2351 return PyErr_NoMemory();
2353 2352 len = index_length(self);
2354 2353
2355 2354 for (i = 0; i < argcount; i++) {
2356 2355 static const int capacity = 24;
2357 2356 PyObject *obj = PySequence_GetItem(args, i);
2358 2357 bitmask x;
2359 2358 long val;
2360 2359
2361 2360 if (!PyInt_Check(obj)) {
2362 2361 PyErr_SetString(PyExc_TypeError,
2363 2362 "arguments must all be ints");
2364 2363 Py_DECREF(obj);
2365 2364 goto bail;
2366 2365 }
2367 2366 val = PyInt_AsLong(obj);
2368 2367 Py_DECREF(obj);
2369 2368 if (val == -1) {
2370 2369 ret = PyList_New(0);
2371 2370 goto done;
2372 2371 }
2373 2372 if (val < 0 || val >= len) {
2374 2373 PyErr_SetString(PyExc_IndexError, "index out of range");
2375 2374 goto bail;
2376 2375 }
2377 2376 /* this cheesy bloom filter lets us avoid some more
2378 2377 * expensive duplicate checks in the common set-is-disjoint
2379 2378 * case */
2380 2379 x = 1ull << (val & 0x3f);
2381 2380 if (repeat & x) {
2382 2381 int k;
2383 2382 for (k = 0; k < revcount; k++) {
2384 2383 if (val == revs[k])
2385 2384 goto duplicate;
2386 2385 }
2387 2386 } else
2388 2387 repeat |= x;
2389 2388 if (revcount >= capacity) {
2390 2389 PyErr_Format(PyExc_OverflowError,
2391 2390 "bitset size (%d) > capacity (%d)",
2392 2391 revcount, capacity);
2393 2392 goto bail;
2394 2393 }
2395 2394 revs[revcount++] = (int)val;
2396 2395 duplicate:;
2397 2396 }
2398 2397
2399 2398 if (revcount == 0) {
2400 2399 ret = PyList_New(0);
2401 2400 goto done;
2402 2401 }
2403 2402 if (revcount == 1) {
2404 2403 PyObject *obj;
2405 2404 ret = PyList_New(1);
2406 2405 if (ret == NULL)
2407 2406 goto bail;
2408 2407 obj = PyInt_FromLong(revs[0]);
2409 2408 if (obj == NULL)
2410 2409 goto bail;
2411 2410 PyList_SET_ITEM(ret, 0, obj);
2412 2411 goto done;
2413 2412 }
2414 2413
2415 2414 ret = find_gca_candidates(self, revs, revcount);
2416 2415 if (ret == NULL)
2417 2416 goto bail;
2418 2417
2419 2418 done:
2420 2419 PyMem_Free(revs);
2421 2420 return ret;
2422 2421
2423 2422 bail:
2424 2423 PyMem_Free(revs);
2425 2424 Py_XDECREF(ret);
2426 2425 return NULL;
2427 2426 }
2428 2427
2429 2428 /*
2430 2429 * Given a (possibly overlapping) set of revs, return the greatest
2431 2430 * common ancestors: those with the longest path to the root.
2432 2431 */
2433 2432 static PyObject *index_ancestors(indexObject *self, PyObject *args)
2434 2433 {
2435 2434 PyObject *ret;
2436 2435 PyObject *gca = index_commonancestorsheads(self, args);
2437 2436 if (gca == NULL)
2438 2437 return NULL;
2439 2438
2440 2439 if (PyList_GET_SIZE(gca) <= 1) {
2441 2440 return gca;
2442 2441 }
2443 2442
2444 2443 ret = find_deepest(self, gca);
2445 2444 Py_DECREF(gca);
2446 2445 return ret;
2447 2446 }
2448 2447
2449 2448 /*
2450 2449 * Invalidate any trie entries introduced by added revs.
2451 2450 */
2452 2451 static void index_invalidate_added(indexObject *self, Py_ssize_t start)
2453 2452 {
2454 2453 Py_ssize_t i, len = PyList_GET_SIZE(self->added);
2455 2454
2456 2455 for (i = start; i < len; i++) {
2457 2456 PyObject *tuple = PyList_GET_ITEM(self->added, i);
2458 2457 PyObject *node = PyTuple_GET_ITEM(tuple, 7);
2459 2458
2460 2459 nt_delete_node(&self->nt, PyBytes_AS_STRING(node));
2461 2460 }
2462 2461
2463 2462 if (start == 0)
2464 2463 Py_CLEAR(self->added);
2465 2464 }
2466 2465
2467 2466 /*
2468 2467 * Delete a numeric range of revs, which must be at the end of the
2469 * range, but exclude the sentinel nullid entry.
2468 * range.
2470 2469 */
2471 2470 static int index_slice_del(indexObject *self, PyObject *item)
2472 2471 {
2473 2472 Py_ssize_t start, stop, step, slicelength;
2474 2473 Py_ssize_t length = index_length(self) + 1;
2475 2474 int ret = 0;
2476 2475
2477 2476 /* Argument changed from PySliceObject* to PyObject* in Python 3. */
2478 2477 #ifdef IS_PY3K
2479 2478 if (PySlice_GetIndicesEx(item, length, &start, &stop, &step,
2480 2479 &slicelength) < 0)
2481 2480 #else
2482 2481 if (PySlice_GetIndicesEx((PySliceObject *)item, length, &start, &stop,
2483 2482 &step, &slicelength) < 0)
2484 2483 #endif
2485 2484 return -1;
2486 2485
2487 2486 if (slicelength <= 0)
2488 2487 return 0;
2489 2488
2490 2489 if ((step < 0 && start < stop) || (step > 0 && start > stop))
2491 2490 stop = start;
2492 2491
2493 2492 if (step < 0) {
2494 2493 stop = start + 1;
2495 2494 start = stop + step * (slicelength - 1) - 1;
2496 2495 step = -step;
2497 2496 }
2498 2497
2499 2498 if (step != 1) {
2500 2499 PyErr_SetString(PyExc_ValueError,
2501 2500 "revlog index delete requires step size of 1");
2502 2501 return -1;
2503 2502 }
2504 2503
2505 2504 if (stop != length - 1) {
2506 2505 PyErr_SetString(PyExc_IndexError,
2507 2506 "revlog index deletion indices are invalid");
2508 2507 return -1;
2509 2508 }
2510 2509
2511 2510 if (start < self->length) {
2512 2511 if (self->ntinitialized) {
2513 2512 Py_ssize_t i;
2514 2513
2515 2514 for (i = start; i < self->length; i++) {
2516 2515 const char *node = index_node_existing(self, i);
2517 2516 if (node == NULL)
2518 2517 return -1;
2519 2518
2520 2519 nt_delete_node(&self->nt, node);
2521 2520 }
2522 2521 if (self->added)
2523 2522 index_invalidate_added(self, 0);
2524 2523 if (self->ntrev > start)
2525 2524 self->ntrev = (int)start;
2526 2525 }
2527 2526 self->length = start;
2528 2527 if (start < self->raw_length) {
2529 2528 if (self->cache) {
2530 2529 Py_ssize_t i;
2531 2530 for (i = start; i < self->raw_length; i++)
2532 2531 Py_CLEAR(self->cache[i]);
2533 2532 }
2534 2533 self->raw_length = start;
2535 2534 }
2536 2535 goto done;
2537 2536 }
2538 2537
2539 2538 if (self->ntinitialized) {
2540 2539 index_invalidate_added(self, start - self->length);
2541 2540 if (self->ntrev > start)
2542 2541 self->ntrev = (int)start;
2543 2542 }
2544 2543 if (self->added)
2545 2544 ret = PyList_SetSlice(self->added, start - self->length,
2546 2545 PyList_GET_SIZE(self->added), NULL);
2547 2546 done:
2548 2547 Py_CLEAR(self->headrevs);
2549 2548 return ret;
2550 2549 }
2551 2550
2552 2551 /*
2553 2552 * Supported ops:
2554 2553 *
2555 2554 * slice deletion
2556 2555 * string assignment (extend node->rev mapping)
2557 2556 * string deletion (shrink node->rev mapping)
2558 2557 */
2559 2558 static int index_assign_subscript(indexObject *self, PyObject *item,
2560 2559 PyObject *value)
2561 2560 {
2562 2561 char *node;
2563 2562 long rev;
2564 2563
2565 2564 if (PySlice_Check(item) && value == NULL)
2566 2565 return index_slice_del(self, item);
2567 2566
2568 2567 if (node_check(item, &node) == -1)
2569 2568 return -1;
2570 2569
2571 2570 if (value == NULL)
2572 2571 return self->ntinitialized ? nt_delete_node(&self->nt, node)
2573 2572 : 0;
2574 2573 rev = PyInt_AsLong(value);
2575 2574 if (rev > INT_MAX || rev < 0) {
2576 2575 if (!PyErr_Occurred())
2577 2576 PyErr_SetString(PyExc_ValueError, "rev out of range");
2578 2577 return -1;
2579 2578 }
2580 2579
2581 2580 if (index_init_nt(self) == -1)
2582 2581 return -1;
2583 2582 return nt_insert(&self->nt, node, (int)rev);
2584 2583 }
2585 2584
2586 2585 /*
2587 2586 * Find all RevlogNG entries in an index that has inline data. Update
2588 2587 * the optional "offsets" table with those entries.
2589 2588 */
2590 2589 static Py_ssize_t inline_scan(indexObject *self, const char **offsets)
2591 2590 {
2592 2591 const char *data = (const char *)self->buf.buf;
2593 2592 Py_ssize_t pos = 0;
2594 2593 Py_ssize_t end = self->buf.len;
2595 2594 long incr = v1_hdrsize;
2596 2595 Py_ssize_t len = 0;
2597 2596
2598 2597 while (pos + v1_hdrsize <= end && pos >= 0) {
2599 2598 uint32_t comp_len;
2600 2599 /* 3rd element of header is length of compressed inline data */
2601 2600 comp_len = getbe32(data + pos + 8);
2602 2601 incr = v1_hdrsize + comp_len;
2603 2602 if (offsets)
2604 2603 offsets[len] = data + pos;
2605 2604 len++;
2606 2605 pos += incr;
2607 2606 }
2608 2607
2609 2608 if (pos != end) {
2610 2609 if (!PyErr_Occurred())
2611 2610 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2612 2611 return -1;
2613 2612 }
2614 2613
2615 2614 return len;
2616 2615 }
2617 2616
2618 2617 static int index_init(indexObject *self, PyObject *args)
2619 2618 {
2620 2619 PyObject *data_obj, *inlined_obj;
2621 2620 Py_ssize_t size;
2622 2621
2623 2622 /* Initialize before argument-checking to avoid index_dealloc() crash.
2624 2623 */
2625 2624 self->raw_length = 0;
2626 2625 self->added = NULL;
2627 2626 self->cache = NULL;
2628 2627 self->data = NULL;
2629 2628 memset(&self->buf, 0, sizeof(self->buf));
2630 2629 self->headrevs = NULL;
2631 2630 self->filteredrevs = Py_None;
2632 2631 Py_INCREF(Py_None);
2633 2632 self->ntinitialized = 0;
2634 2633 self->offsets = NULL;
2635 2634
2636 2635 if (!PyArg_ParseTuple(args, "OO", &data_obj, &inlined_obj))
2637 2636 return -1;
2638 2637 if (!PyObject_CheckBuffer(data_obj)) {
2639 2638 PyErr_SetString(PyExc_TypeError,
2640 2639 "data does not support buffer interface");
2641 2640 return -1;
2642 2641 }
2643 2642
2644 2643 if (PyObject_GetBuffer(data_obj, &self->buf, PyBUF_SIMPLE) == -1)
2645 2644 return -1;
2646 2645 size = self->buf.len;
2647 2646
2648 2647 self->inlined = inlined_obj && PyObject_IsTrue(inlined_obj);
2649 2648 self->data = data_obj;
2650 2649
2651 2650 self->ntlookups = self->ntmisses = 0;
2652 2651 self->ntrev = -1;
2653 2652 Py_INCREF(self->data);
2654 2653
2655 2654 if (self->inlined) {
2656 2655 Py_ssize_t len = inline_scan(self, NULL);
2657 2656 if (len == -1)
2658 2657 goto bail;
2659 2658 self->raw_length = len;
2660 2659 self->length = len;
2661 2660 } else {
2662 2661 if (size % v1_hdrsize) {
2663 2662 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2664 2663 goto bail;
2665 2664 }
2666 2665 self->raw_length = size / v1_hdrsize;
2667 2666 self->length = self->raw_length;
2668 2667 }
2669 2668
2670 2669 return 0;
2671 2670 bail:
2672 2671 return -1;
2673 2672 }
2674 2673
2675 2674 static PyObject *index_nodemap(indexObject *self)
2676 2675 {
2677 2676 Py_INCREF(self);
2678 2677 return (PyObject *)self;
2679 2678 }
2680 2679
2681 2680 static void _index_clearcaches(indexObject *self)
2682 2681 {
2683 2682 if (self->cache) {
2684 2683 Py_ssize_t i;
2685 2684
2686 2685 for (i = 0; i < self->raw_length; i++)
2687 2686 Py_CLEAR(self->cache[i]);
2688 2687 free(self->cache);
2689 2688 self->cache = NULL;
2690 2689 }
2691 2690 if (self->offsets) {
2692 2691 PyMem_Free((void *)self->offsets);
2693 2692 self->offsets = NULL;
2694 2693 }
2695 2694 if (self->ntinitialized) {
2696 2695 nt_dealloc(&self->nt);
2697 2696 }
2698 2697 self->ntinitialized = 0;
2699 2698 Py_CLEAR(self->headrevs);
2700 2699 }
2701 2700
2702 2701 static PyObject *index_clearcaches(indexObject *self)
2703 2702 {
2704 2703 _index_clearcaches(self);
2705 2704 self->ntrev = -1;
2706 2705 self->ntlookups = self->ntmisses = 0;
2707 2706 Py_RETURN_NONE;
2708 2707 }
2709 2708
2710 2709 static void index_dealloc(indexObject *self)
2711 2710 {
2712 2711 _index_clearcaches(self);
2713 2712 Py_XDECREF(self->filteredrevs);
2714 2713 if (self->buf.buf) {
2715 2714 PyBuffer_Release(&self->buf);
2716 2715 memset(&self->buf, 0, sizeof(self->buf));
2717 2716 }
2718 2717 Py_XDECREF(self->data);
2719 2718 Py_XDECREF(self->added);
2720 2719 PyObject_Del(self);
2721 2720 }
2722 2721
2723 2722 static PySequenceMethods index_sequence_methods = {
2724 2723 (lenfunc)index_length, /* sq_length */
2725 2724 0, /* sq_concat */
2726 2725 0, /* sq_repeat */
2727 2726 (ssizeargfunc)index_get, /* sq_item */
2728 2727 0, /* sq_slice */
2729 2728 0, /* sq_ass_item */
2730 2729 0, /* sq_ass_slice */
2731 2730 (objobjproc)index_contains, /* sq_contains */
2732 2731 };
2733 2732
2734 2733 static PyMappingMethods index_mapping_methods = {
2735 2734 (lenfunc)index_length, /* mp_length */
2736 2735 (binaryfunc)index_getitem, /* mp_subscript */
2737 2736 (objobjargproc)index_assign_subscript, /* mp_ass_subscript */
2738 2737 };
2739 2738
2740 2739 static PyMethodDef index_methods[] = {
2741 2740 {"ancestors", (PyCFunction)index_ancestors, METH_VARARGS,
2742 2741 "return the gca set of the given revs"},
2743 2742 {"commonancestorsheads", (PyCFunction)index_commonancestorsheads,
2744 2743 METH_VARARGS,
2745 2744 "return the heads of the common ancestors of the given revs"},
2746 2745 {"clearcaches", (PyCFunction)index_clearcaches, METH_NOARGS,
2747 2746 "clear the index caches"},
2748 2747 {"get", (PyCFunction)index_m_get, METH_VARARGS, "get an index entry"},
2749 2748 {"get_rev", (PyCFunction)index_m_get, METH_VARARGS,
2750 2749 "return `rev` associated with a node or None"},
2751 2750 {"has_node", (PyCFunction)index_m_has_node, METH_O,
2752 2751 "return True if the node exist in the index"},
2753 2752 {"rev", (PyCFunction)index_m_rev, METH_O,
2754 2753 "return `rev` associated with a node or raise RevlogError"},
2755 2754 {"computephasesmapsets", (PyCFunction)compute_phases_map_sets, METH_VARARGS,
2756 2755 "compute phases"},
2757 2756 {"reachableroots2", (PyCFunction)reachableroots2, METH_VARARGS,
2758 2757 "reachableroots"},
2759 2758 {"headrevs", (PyCFunction)index_headrevs, METH_VARARGS,
2760 2759 "get head revisions"}, /* Can do filtering since 3.2 */
2761 2760 {"headrevsfiltered", (PyCFunction)index_headrevs, METH_VARARGS,
2762 2761 "get filtered head revisions"}, /* Can always do filtering */
2763 2762 {"issnapshot", (PyCFunction)index_issnapshot, METH_O,
2764 2763 "True if the object is a snapshot"},
2765 2764 {"findsnapshots", (PyCFunction)index_findsnapshots, METH_VARARGS,
2766 2765 "Gather snapshot data in a cache dict"},
2767 2766 {"deltachain", (PyCFunction)index_deltachain, METH_VARARGS,
2768 2767 "determine revisions with deltas to reconstruct fulltext"},
2769 2768 {"slicechunktodensity", (PyCFunction)index_slicechunktodensity,
2770 2769 METH_VARARGS, "determine revisions with deltas to reconstruct fulltext"},
2771 2770 {"append", (PyCFunction)index_append, METH_O, "append an index entry"},
2772 2771 {"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS,
2773 2772 "match a potentially ambiguous node ID"},
2774 2773 {"shortest", (PyCFunction)index_shortest, METH_VARARGS,
2775 2774 "find length of shortest hex nodeid of a binary ID"},
2776 2775 {"stats", (PyCFunction)index_stats, METH_NOARGS, "stats for the index"},
2777 2776 {NULL} /* Sentinel */
2778 2777 };
2779 2778
2780 2779 static PyGetSetDef index_getset[] = {
2781 2780 {"nodemap", (getter)index_nodemap, NULL, "nodemap", NULL},
2782 2781 {NULL} /* Sentinel */
2783 2782 };
2784 2783
2785 2784 PyTypeObject HgRevlogIndex_Type = {
2786 2785 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2787 2786 "parsers.index", /* tp_name */
2788 2787 sizeof(indexObject), /* tp_basicsize */
2789 2788 0, /* tp_itemsize */
2790 2789 (destructor)index_dealloc, /* tp_dealloc */
2791 2790 0, /* tp_print */
2792 2791 0, /* tp_getattr */
2793 2792 0, /* tp_setattr */
2794 2793 0, /* tp_compare */
2795 2794 0, /* tp_repr */
2796 2795 0, /* tp_as_number */
2797 2796 &index_sequence_methods, /* tp_as_sequence */
2798 2797 &index_mapping_methods, /* tp_as_mapping */
2799 2798 0, /* tp_hash */
2800 2799 0, /* tp_call */
2801 2800 0, /* tp_str */
2802 2801 0, /* tp_getattro */
2803 2802 0, /* tp_setattro */
2804 2803 0, /* tp_as_buffer */
2805 2804 Py_TPFLAGS_DEFAULT, /* tp_flags */
2806 2805 "revlog index", /* tp_doc */
2807 2806 0, /* tp_traverse */
2808 2807 0, /* tp_clear */
2809 2808 0, /* tp_richcompare */
2810 2809 0, /* tp_weaklistoffset */
2811 2810 0, /* tp_iter */
2812 2811 0, /* tp_iternext */
2813 2812 index_methods, /* tp_methods */
2814 2813 0, /* tp_members */
2815 2814 index_getset, /* tp_getset */
2816 2815 0, /* tp_base */
2817 2816 0, /* tp_dict */
2818 2817 0, /* tp_descr_get */
2819 2818 0, /* tp_descr_set */
2820 2819 0, /* tp_dictoffset */
2821 2820 (initproc)index_init, /* tp_init */
2822 2821 0, /* tp_alloc */
2823 2822 };
2824 2823
2825 2824 /*
2826 2825 * returns a tuple of the form (index, index, cache) with elements as
2827 2826 * follows:
2828 2827 *
2829 2828 * index: an index object that lazily parses RevlogNG records
2830 2829 * cache: if data is inlined, a tuple (0, index_file_content), else None
2831 2830 * index_file_content could be a string, or a buffer
2832 2831 *
2833 2832 * added complications are for backwards compatibility
2834 2833 */
2835 2834 PyObject *parse_index2(PyObject *self, PyObject *args)
2836 2835 {
2837 2836 PyObject *tuple = NULL, *cache = NULL;
2838 2837 indexObject *idx;
2839 2838 int ret;
2840 2839
2841 2840 idx = PyObject_New(indexObject, &HgRevlogIndex_Type);
2842 2841 if (idx == NULL)
2843 2842 goto bail;
2844 2843
2845 2844 ret = index_init(idx, args);
2846 2845 if (ret == -1)
2847 2846 goto bail;
2848 2847
2849 2848 if (idx->inlined) {
2850 2849 cache = Py_BuildValue("iO", 0, idx->data);
2851 2850 if (cache == NULL)
2852 2851 goto bail;
2853 2852 } else {
2854 2853 cache = Py_None;
2855 2854 Py_INCREF(cache);
2856 2855 }
2857 2856
2858 2857 tuple = Py_BuildValue("NN", idx, cache);
2859 2858 if (!tuple)
2860 2859 goto bail;
2861 2860 return tuple;
2862 2861
2863 2862 bail:
2864 2863 Py_XDECREF(idx);
2865 2864 Py_XDECREF(cache);
2866 2865 Py_XDECREF(tuple);
2867 2866 return NULL;
2868 2867 }
2869 2868
2870 2869 #ifdef WITH_RUST
2871 2870
2872 2871 /* rustlazyancestors: iteration over ancestors implemented in Rust
2873 2872 *
2874 2873 * This class holds a reference to an index and to the Rust iterator.
2875 2874 */
2876 2875 typedef struct rustlazyancestorsObjectStruct rustlazyancestorsObject;
2877 2876
2878 2877 struct rustlazyancestorsObjectStruct {
2879 2878 PyObject_HEAD
2880 2879 /* Type-specific fields go here. */
2881 2880 indexObject *index; /* Ref kept to avoid GC'ing the index */
2882 2881 void *iter; /* Rust iterator */
2883 2882 };
2884 2883
2885 2884 /* FFI exposed from Rust code */
2886 2885 rustlazyancestorsObject *rustlazyancestors_init(indexObject *index,
2887 2886 /* intrevs vector */
2888 2887 Py_ssize_t initrevslen,
2889 2888 long *initrevs, long stoprev,
2890 2889 int inclusive);
2891 2890 void rustlazyancestors_drop(rustlazyancestorsObject *self);
2892 2891 int rustlazyancestors_next(rustlazyancestorsObject *self);
2893 2892 int rustlazyancestors_contains(rustlazyancestorsObject *self, long rev);
2894 2893
2895 2894 /* CPython instance methods */
2896 2895 static int rustla_init(rustlazyancestorsObject *self, PyObject *args)
2897 2896 {
2898 2897 PyObject *initrevsarg = NULL;
2899 2898 PyObject *inclusivearg = NULL;
2900 2899 long stoprev = 0;
2901 2900 long *initrevs = NULL;
2902 2901 int inclusive = 0;
2903 2902 Py_ssize_t i;
2904 2903
2905 2904 indexObject *index;
2906 2905 if (!PyArg_ParseTuple(args, "O!O!lO!", &HgRevlogIndex_Type, &index,
2907 2906 &PyList_Type, &initrevsarg, &stoprev,
2908 2907 &PyBool_Type, &inclusivearg))
2909 2908 return -1;
2910 2909
2911 2910 Py_INCREF(index);
2912 2911 self->index = index;
2913 2912
2914 2913 if (inclusivearg == Py_True)
2915 2914 inclusive = 1;
2916 2915
2917 2916 Py_ssize_t linit = PyList_GET_SIZE(initrevsarg);
2918 2917
2919 2918 initrevs = (long *)calloc(linit, sizeof(long));
2920 2919
2921 2920 if (initrevs == NULL) {
2922 2921 PyErr_NoMemory();
2923 2922 goto bail;
2924 2923 }
2925 2924
2926 2925 for (i = 0; i < linit; i++) {
2927 2926 initrevs[i] = PyInt_AsLong(PyList_GET_ITEM(initrevsarg, i));
2928 2927 }
2929 2928 if (PyErr_Occurred())
2930 2929 goto bail;
2931 2930
2932 2931 self->iter =
2933 2932 rustlazyancestors_init(index, linit, initrevs, stoprev, inclusive);
2934 2933 if (self->iter == NULL) {
2935 2934 /* if this is because of GraphError::ParentOutOfRange
2936 2935 * HgRevlogIndex_GetParents() has already set the proper
2937 2936 * exception */
2938 2937 goto bail;
2939 2938 }
2940 2939
2941 2940 free(initrevs);
2942 2941 return 0;
2943 2942
2944 2943 bail:
2945 2944 free(initrevs);
2946 2945 return -1;
2947 2946 };
2948 2947
2949 2948 static void rustla_dealloc(rustlazyancestorsObject *self)
2950 2949 {
2951 2950 Py_XDECREF(self->index);
2952 2951 if (self->iter != NULL) { /* can happen if rustla_init failed */
2953 2952 rustlazyancestors_drop(self->iter);
2954 2953 }
2955 2954 PyObject_Del(self);
2956 2955 }
2957 2956
2958 2957 static PyObject *rustla_next(rustlazyancestorsObject *self)
2959 2958 {
2960 2959 int res = rustlazyancestors_next(self->iter);
2961 2960 if (res == -1) {
2962 2961 /* Setting an explicit exception seems unnecessary
2963 2962 * as examples from Python source code (Objects/rangeobjets.c
2964 2963 * and Modules/_io/stringio.c) seem to demonstrate.
2965 2964 */
2966 2965 return NULL;
2967 2966 }
2968 2967 return PyInt_FromLong(res);
2969 2968 }
2970 2969
2971 2970 static int rustla_contains(rustlazyancestorsObject *self, PyObject *rev)
2972 2971 {
2973 2972 long lrev;
2974 2973 if (!pylong_to_long(rev, &lrev)) {
2975 2974 PyErr_Clear();
2976 2975 return 0;
2977 2976 }
2978 2977 return rustlazyancestors_contains(self->iter, lrev);
2979 2978 }
2980 2979
2981 2980 static PySequenceMethods rustla_sequence_methods = {
2982 2981 0, /* sq_length */
2983 2982 0, /* sq_concat */
2984 2983 0, /* sq_repeat */
2985 2984 0, /* sq_item */
2986 2985 0, /* sq_slice */
2987 2986 0, /* sq_ass_item */
2988 2987 0, /* sq_ass_slice */
2989 2988 (objobjproc)rustla_contains, /* sq_contains */
2990 2989 };
2991 2990
2992 2991 static PyTypeObject rustlazyancestorsType = {
2993 2992 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2994 2993 "parsers.rustlazyancestors", /* tp_name */
2995 2994 sizeof(rustlazyancestorsObject), /* tp_basicsize */
2996 2995 0, /* tp_itemsize */
2997 2996 (destructor)rustla_dealloc, /* tp_dealloc */
2998 2997 0, /* tp_print */
2999 2998 0, /* tp_getattr */
3000 2999 0, /* tp_setattr */
3001 3000 0, /* tp_compare */
3002 3001 0, /* tp_repr */
3003 3002 0, /* tp_as_number */
3004 3003 &rustla_sequence_methods, /* tp_as_sequence */
3005 3004 0, /* tp_as_mapping */
3006 3005 0, /* tp_hash */
3007 3006 0, /* tp_call */
3008 3007 0, /* tp_str */
3009 3008 0, /* tp_getattro */
3010 3009 0, /* tp_setattro */
3011 3010 0, /* tp_as_buffer */
3012 3011 Py_TPFLAGS_DEFAULT, /* tp_flags */
3013 3012 "Iterator over ancestors, implemented in Rust", /* tp_doc */
3014 3013 0, /* tp_traverse */
3015 3014 0, /* tp_clear */
3016 3015 0, /* tp_richcompare */
3017 3016 0, /* tp_weaklistoffset */
3018 3017 0, /* tp_iter */
3019 3018 (iternextfunc)rustla_next, /* tp_iternext */
3020 3019 0, /* tp_methods */
3021 3020 0, /* tp_members */
3022 3021 0, /* tp_getset */
3023 3022 0, /* tp_base */
3024 3023 0, /* tp_dict */
3025 3024 0, /* tp_descr_get */
3026 3025 0, /* tp_descr_set */
3027 3026 0, /* tp_dictoffset */
3028 3027 (initproc)rustla_init, /* tp_init */
3029 3028 0, /* tp_alloc */
3030 3029 };
3031 3030 #endif /* WITH_RUST */
3032 3031
3033 3032 void revlog_module_init(PyObject *mod)
3034 3033 {
3035 3034 PyObject *caps = NULL;
3036 3035 HgRevlogIndex_Type.tp_new = PyType_GenericNew;
3037 3036 if (PyType_Ready(&HgRevlogIndex_Type) < 0)
3038 3037 return;
3039 3038 Py_INCREF(&HgRevlogIndex_Type);
3040 3039 PyModule_AddObject(mod, "index", (PyObject *)&HgRevlogIndex_Type);
3041 3040
3042 3041 nodetreeType.tp_new = PyType_GenericNew;
3043 3042 if (PyType_Ready(&nodetreeType) < 0)
3044 3043 return;
3045 3044 Py_INCREF(&nodetreeType);
3046 3045 PyModule_AddObject(mod, "nodetree", (PyObject *)&nodetreeType);
3047 3046
3048 3047 if (!nullentry) {
3049 3048 nullentry =
3050 3049 Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0, 0, -1,
3051 3050 -1, -1, -1, nullid, (Py_ssize_t)20);
3052 3051 }
3053 3052 if (nullentry)
3054 3053 PyObject_GC_UnTrack(nullentry);
3055 3054
3056 3055 caps = PyCapsule_New(HgRevlogIndex_GetParents,
3057 3056 "mercurial.cext.parsers.index_get_parents_CAPI",
3058 3057 NULL);
3059 3058 if (caps != NULL)
3060 3059 PyModule_AddObject(mod, "index_get_parents_CAPI", caps);
3061 3060
3062 3061 #ifdef WITH_RUST
3063 3062 rustlazyancestorsType.tp_new = PyType_GenericNew;
3064 3063 if (PyType_Ready(&rustlazyancestorsType) < 0)
3065 3064 return;
3066 3065 Py_INCREF(&rustlazyancestorsType);
3067 3066 PyModule_AddObject(mod, "rustlazyancestors",
3068 3067 (PyObject *)&rustlazyancestorsType);
3069 3068 #endif
3070 3069 }
General Comments 0
You need to be logged in to leave comments. Login now