##// END OF EJS Templates
cext: make HgRevlogIndex_GetParents private again...
marmoute -
r44957:9db11679 default
parent child Browse files
Show More
@@ -1,3082 +1,3082 b''
1 1 /*
2 2 parsers.c - efficient content parsing
3 3
4 4 Copyright 2008 Matt Mackall <mpm@selenic.com> and others
5 5
6 6 This software may be used and distributed according to the terms of
7 7 the GNU General Public License, incorporated herein by reference.
8 8 */
9 9
10 10 #define PY_SSIZE_T_CLEAN
11 11 #include <Python.h>
12 12 #include <assert.h>
13 13 #include <ctype.h>
14 14 #include <limits.h>
15 15 #include <stddef.h>
16 16 #include <stdlib.h>
17 17 #include <string.h>
18 18
19 19 #include "bitmanipulation.h"
20 20 #include "charencode.h"
21 21 #include "revlog.h"
22 22 #include "util.h"
23 23
24 24 #ifdef IS_PY3K
25 25 /* The mapping of Python types is meant to be temporary to get Python
26 26 * 3 to compile. We should remove this once Python 3 support is fully
27 27 * supported and proper types are used in the extensions themselves. */
28 28 #define PyInt_Check PyLong_Check
29 29 #define PyInt_FromLong PyLong_FromLong
30 30 #define PyInt_FromSsize_t PyLong_FromSsize_t
31 31 #define PyInt_AsLong PyLong_AsLong
32 32 #endif
33 33
34 34 typedef struct indexObjectStruct indexObject;
35 35
36 36 typedef struct {
37 37 int children[16];
38 38 } nodetreenode;
39 39
40 40 typedef struct {
41 41 int abi_version;
42 42 int (*index_parents)(PyObject *, int, int *);
43 43 } Revlog_CAPI;
44 44
45 45 /*
46 46 * A base-16 trie for fast node->rev mapping.
47 47 *
48 48 * Positive value is index of the next node in the trie
49 49 * Negative value is a leaf: -(rev + 2)
50 50 * Zero is empty
51 51 */
52 52 typedef struct {
53 53 indexObject *index;
54 54 nodetreenode *nodes;
55 55 unsigned length; /* # nodes in use */
56 56 unsigned capacity; /* # nodes allocated */
57 57 int depth; /* maximum depth of tree */
58 58 int splits; /* # splits performed */
59 59 } nodetree;
60 60
61 61 typedef struct {
62 62 PyObject_HEAD /* ; */
63 63 nodetree nt;
64 64 } nodetreeObject;
65 65
66 66 /*
67 67 * This class has two behaviors.
68 68 *
69 69 * When used in a list-like way (with integer keys), we decode an
70 70 * entry in a RevlogNG index file on demand. We have limited support for
71 71 * integer-keyed insert and delete, only at elements right before the
72 72 * end.
73 73 *
74 74 * With string keys, we lazily perform a reverse mapping from node to
75 75 * rev, using a base-16 trie.
76 76 */
77 77 struct indexObjectStruct {
78 78 PyObject_HEAD
79 79 /* Type-specific fields go here. */
80 80 PyObject *data; /* raw bytes of index */
81 81 Py_buffer buf; /* buffer of data */
82 82 PyObject **cache; /* cached tuples */
83 83 const char **offsets; /* populated on demand */
84 84 Py_ssize_t raw_length; /* original number of elements */
85 85 Py_ssize_t length; /* current number of elements */
86 86 PyObject *added; /* populated on demand */
87 87 PyObject *headrevs; /* cache, invalidated on changes */
88 88 PyObject *filteredrevs; /* filtered revs set */
89 89 nodetree nt; /* base-16 trie */
90 90 int ntinitialized; /* 0 or 1 */
91 91 int ntrev; /* last rev scanned */
92 92 int ntlookups; /* # lookups */
93 93 int ntmisses; /* # lookups that miss the cache */
94 94 int inlined;
95 95 };
96 96
97 97 static Py_ssize_t index_length(const indexObject *self)
98 98 {
99 99 if (self->added == NULL)
100 100 return self->length;
101 101 return self->length + PyList_GET_SIZE(self->added);
102 102 }
103 103
104 104 static PyObject *nullentry = NULL;
105 105 static const char nullid[20] = {0};
106 106 static const Py_ssize_t nullrev = -1;
107 107
108 108 static Py_ssize_t inline_scan(indexObject *self, const char **offsets);
109 109
110 110 #if LONG_MAX == 0x7fffffffL
111 111 static const char *const tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
112 112 #else
113 113 static const char *const tuple_format = PY23("kiiiiiis#", "kiiiiiiy#");
114 114 #endif
115 115
116 116 /* A RevlogNG v1 index entry is 64 bytes long. */
117 117 static const long v1_hdrsize = 64;
118 118
119 119 static void raise_revlog_error(void)
120 120 {
121 121 PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
122 122
123 123 mod = PyImport_ImportModule("mercurial.error");
124 124 if (mod == NULL) {
125 125 goto cleanup;
126 126 }
127 127
128 128 dict = PyModule_GetDict(mod);
129 129 if (dict == NULL) {
130 130 goto cleanup;
131 131 }
132 132 Py_INCREF(dict);
133 133
134 134 errclass = PyDict_GetItemString(dict, "RevlogError");
135 135 if (errclass == NULL) {
136 136 PyErr_SetString(PyExc_SystemError,
137 137 "could not find RevlogError");
138 138 goto cleanup;
139 139 }
140 140
141 141 /* value of exception is ignored by callers */
142 142 PyErr_SetString(errclass, "RevlogError");
143 143
144 144 cleanup:
145 145 Py_XDECREF(dict);
146 146 Py_XDECREF(mod);
147 147 }
148 148
149 149 /*
150 150 * Return a pointer to the beginning of a RevlogNG record.
151 151 */
152 152 static const char *index_deref(indexObject *self, Py_ssize_t pos)
153 153 {
154 154 if (self->inlined && pos > 0) {
155 155 if (self->offsets == NULL) {
156 156 self->offsets = PyMem_Malloc(self->raw_length *
157 157 sizeof(*self->offsets));
158 158 if (self->offsets == NULL)
159 159 return (const char *)PyErr_NoMemory();
160 160 inline_scan(self, self->offsets);
161 161 }
162 162 return self->offsets[pos];
163 163 }
164 164
165 165 return (const char *)(self->buf.buf) + pos * v1_hdrsize;
166 166 }
167 167
168 168 /*
169 169 * Get parents of the given rev.
170 170 *
171 171 * The specified rev must be valid and must not be nullrev. A returned
172 172 * parent revision may be nullrev, but is guaranteed to be in valid range.
173 173 */
174 174 static inline int index_get_parents(indexObject *self, Py_ssize_t rev, int *ps,
175 175 int maxrev)
176 176 {
177 177 if (rev >= self->length) {
178 178 long tmp;
179 179 PyObject *tuple =
180 180 PyList_GET_ITEM(self->added, rev - self->length);
181 181 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 5), &tmp)) {
182 182 return -1;
183 183 }
184 184 ps[0] = (int)tmp;
185 185 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 6), &tmp)) {
186 186 return -1;
187 187 }
188 188 ps[1] = (int)tmp;
189 189 } else {
190 190 const char *data = index_deref(self, rev);
191 191 ps[0] = getbe32(data + 24);
192 192 ps[1] = getbe32(data + 28);
193 193 }
194 194 /* If index file is corrupted, ps[] may point to invalid revisions. So
195 195 * there is a risk of buffer overflow to trust them unconditionally. */
196 196 if (ps[0] < -1 || ps[0] > maxrev || ps[1] < -1 || ps[1] > maxrev) {
197 197 PyErr_SetString(PyExc_ValueError, "parent out of range");
198 198 return -1;
199 199 }
200 200 return 0;
201 201 }
202 202
203 203 /*
204 204 * Get parents of the given rev.
205 205 *
206 206 * If the specified rev is out of range, IndexError will be raised. If the
207 207 * revlog entry is corrupted, ValueError may be raised.
208 208 *
209 209 * Returns 0 on success or -1 on failure.
210 210 */
211 int HgRevlogIndex_GetParents(PyObject *op, int rev, int *ps)
211 static int HgRevlogIndex_GetParents(PyObject *op, int rev, int *ps)
212 212 {
213 213 int tiprev;
214 214 if (!op || !HgRevlogIndex_Check(op) || !ps) {
215 215 PyErr_BadInternalCall();
216 216 return -1;
217 217 }
218 218 tiprev = (int)index_length((indexObject *)op) - 1;
219 219 if (rev < -1 || rev > tiprev) {
220 220 PyErr_Format(PyExc_IndexError, "rev out of range: %d", rev);
221 221 return -1;
222 222 } else if (rev == -1) {
223 223 ps[0] = ps[1] = -1;
224 224 return 0;
225 225 } else {
226 226 return index_get_parents((indexObject *)op, rev, ps, tiprev);
227 227 }
228 228 }
229 229
230 230 static inline int64_t index_get_start(indexObject *self, Py_ssize_t rev)
231 231 {
232 232 uint64_t offset;
233 233 if (rev == nullrev) {
234 234 return 0;
235 235 }
236 236 if (rev >= self->length) {
237 237 PyObject *tuple;
238 238 PyObject *pylong;
239 239 PY_LONG_LONG tmp;
240 240 tuple = PyList_GET_ITEM(self->added, rev - self->length);
241 241 pylong = PyTuple_GET_ITEM(tuple, 0);
242 242 tmp = PyLong_AsLongLong(pylong);
243 243 if (tmp == -1 && PyErr_Occurred()) {
244 244 return -1;
245 245 }
246 246 if (tmp < 0) {
247 247 PyErr_Format(PyExc_OverflowError,
248 248 "revlog entry size out of bound (%lld)",
249 249 (long long)tmp);
250 250 return -1;
251 251 }
252 252 offset = (uint64_t)tmp;
253 253 } else {
254 254 const char *data = index_deref(self, rev);
255 255 offset = getbe32(data + 4);
256 256 if (rev == 0) {
257 257 /* mask out version number for the first entry */
258 258 offset &= 0xFFFF;
259 259 } else {
260 260 uint32_t offset_high = getbe32(data);
261 261 offset |= ((uint64_t)offset_high) << 32;
262 262 }
263 263 }
264 264 return (int64_t)(offset >> 16);
265 265 }
266 266
267 267 static inline int index_get_length(indexObject *self, Py_ssize_t rev)
268 268 {
269 269 if (rev == nullrev) {
270 270 return 0;
271 271 }
272 272 if (rev >= self->length) {
273 273 PyObject *tuple;
274 274 PyObject *pylong;
275 275 long ret;
276 276 tuple = PyList_GET_ITEM(self->added, rev - self->length);
277 277 pylong = PyTuple_GET_ITEM(tuple, 1);
278 278 ret = PyInt_AsLong(pylong);
279 279 if (ret == -1 && PyErr_Occurred()) {
280 280 return -1;
281 281 }
282 282 if (ret < 0 || ret > (long)INT_MAX) {
283 283 PyErr_Format(PyExc_OverflowError,
284 284 "revlog entry size out of bound (%ld)",
285 285 ret);
286 286 return -1;
287 287 }
288 288 return (int)ret;
289 289 } else {
290 290 const char *data = index_deref(self, rev);
291 291 int tmp = (int)getbe32(data + 8);
292 292 if (tmp < 0) {
293 293 PyErr_Format(PyExc_OverflowError,
294 294 "revlog entry size out of bound (%d)",
295 295 tmp);
296 296 return -1;
297 297 }
298 298 return tmp;
299 299 }
300 300 }
301 301
302 302 /*
303 303 * RevlogNG format (all in big endian, data may be inlined):
304 304 * 6 bytes: offset
305 305 * 2 bytes: flags
306 306 * 4 bytes: compressed length
307 307 * 4 bytes: uncompressed length
308 308 * 4 bytes: base revision
309 309 * 4 bytes: link revision
310 310 * 4 bytes: parent 1 revision
311 311 * 4 bytes: parent 2 revision
312 312 * 32 bytes: nodeid (only 20 bytes used)
313 313 */
314 314 static PyObject *index_get(indexObject *self, Py_ssize_t pos)
315 315 {
316 316 uint64_t offset_flags;
317 317 int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
318 318 const char *c_node_id;
319 319 const char *data;
320 320 Py_ssize_t length = index_length(self);
321 321 PyObject *entry;
322 322
323 323 if (pos == nullrev) {
324 324 Py_INCREF(nullentry);
325 325 return nullentry;
326 326 }
327 327
328 328 if (pos < 0 || pos >= length) {
329 329 PyErr_SetString(PyExc_IndexError, "revlog index out of range");
330 330 return NULL;
331 331 }
332 332
333 333 if (pos >= self->length) {
334 334 PyObject *obj;
335 335 obj = PyList_GET_ITEM(self->added, pos - self->length);
336 336 Py_INCREF(obj);
337 337 return obj;
338 338 }
339 339
340 340 if (self->cache) {
341 341 if (self->cache[pos]) {
342 342 Py_INCREF(self->cache[pos]);
343 343 return self->cache[pos];
344 344 }
345 345 } else {
346 346 self->cache = calloc(self->raw_length, sizeof(PyObject *));
347 347 if (self->cache == NULL)
348 348 return PyErr_NoMemory();
349 349 }
350 350
351 351 data = index_deref(self, pos);
352 352 if (data == NULL)
353 353 return NULL;
354 354
355 355 offset_flags = getbe32(data + 4);
356 356 if (pos == 0) /* mask out version number for the first entry */
357 357 offset_flags &= 0xFFFF;
358 358 else {
359 359 uint32_t offset_high = getbe32(data);
360 360 offset_flags |= ((uint64_t)offset_high) << 32;
361 361 }
362 362
363 363 comp_len = getbe32(data + 8);
364 364 uncomp_len = getbe32(data + 12);
365 365 base_rev = getbe32(data + 16);
366 366 link_rev = getbe32(data + 20);
367 367 parent_1 = getbe32(data + 24);
368 368 parent_2 = getbe32(data + 28);
369 369 c_node_id = data + 32;
370 370
371 371 entry = Py_BuildValue(tuple_format, offset_flags, comp_len, uncomp_len,
372 372 base_rev, link_rev, parent_1, parent_2, c_node_id,
373 373 (Py_ssize_t)20);
374 374
375 375 if (entry) {
376 376 PyObject_GC_UnTrack(entry);
377 377 Py_INCREF(entry);
378 378 }
379 379
380 380 self->cache[pos] = entry;
381 381
382 382 return entry;
383 383 }
384 384
385 385 /*
386 386 * Return the 20-byte SHA of the node corresponding to the given rev.
387 387 */
388 388 static const char *index_node(indexObject *self, Py_ssize_t pos)
389 389 {
390 390 Py_ssize_t length = index_length(self);
391 391 const char *data;
392 392
393 393 if (pos == nullrev)
394 394 return nullid;
395 395
396 396 if (pos >= length)
397 397 return NULL;
398 398
399 399 if (pos >= self->length) {
400 400 PyObject *tuple, *str;
401 401 tuple = PyList_GET_ITEM(self->added, pos - self->length);
402 402 str = PyTuple_GetItem(tuple, 7);
403 403 return str ? PyBytes_AS_STRING(str) : NULL;
404 404 }
405 405
406 406 data = index_deref(self, pos);
407 407 return data ? data + 32 : NULL;
408 408 }
409 409
410 410 /*
411 411 * Return the 20-byte SHA of the node corresponding to the given rev. The
412 412 * rev is assumed to be existing. If not, an exception is set.
413 413 */
414 414 static const char *index_node_existing(indexObject *self, Py_ssize_t pos)
415 415 {
416 416 const char *node = index_node(self, pos);
417 417 if (node == NULL) {
418 418 PyErr_Format(PyExc_IndexError, "could not access rev %d",
419 419 (int)pos);
420 420 }
421 421 return node;
422 422 }
423 423
424 424 static int nt_insert(nodetree *self, const char *node, int rev);
425 425
426 426 static int node_check(PyObject *obj, char **node)
427 427 {
428 428 Py_ssize_t nodelen;
429 429 if (PyBytes_AsStringAndSize(obj, node, &nodelen) == -1)
430 430 return -1;
431 431 if (nodelen == 20)
432 432 return 0;
433 433 PyErr_SetString(PyExc_ValueError, "20-byte hash required");
434 434 return -1;
435 435 }
436 436
437 437 static PyObject *index_append(indexObject *self, PyObject *obj)
438 438 {
439 439 char *node;
440 440 Py_ssize_t len;
441 441
442 442 if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj) != 8) {
443 443 PyErr_SetString(PyExc_TypeError, "8-tuple required");
444 444 return NULL;
445 445 }
446 446
447 447 if (node_check(PyTuple_GET_ITEM(obj, 7), &node) == -1)
448 448 return NULL;
449 449
450 450 len = index_length(self);
451 451
452 452 if (self->added == NULL) {
453 453 self->added = PyList_New(0);
454 454 if (self->added == NULL)
455 455 return NULL;
456 456 }
457 457
458 458 if (PyList_Append(self->added, obj) == -1)
459 459 return NULL;
460 460
461 461 if (self->ntinitialized)
462 462 nt_insert(&self->nt, node, (int)len);
463 463
464 464 Py_CLEAR(self->headrevs);
465 465 Py_RETURN_NONE;
466 466 }
467 467
468 468 static PyObject *index_stats(indexObject *self)
469 469 {
470 470 PyObject *obj = PyDict_New();
471 471 PyObject *s = NULL;
472 472 PyObject *t = NULL;
473 473
474 474 if (obj == NULL)
475 475 return NULL;
476 476
477 477 #define istat(__n, __d) \
478 478 do { \
479 479 s = PyBytes_FromString(__d); \
480 480 t = PyInt_FromSsize_t(self->__n); \
481 481 if (!s || !t) \
482 482 goto bail; \
483 483 if (PyDict_SetItem(obj, s, t) == -1) \
484 484 goto bail; \
485 485 Py_CLEAR(s); \
486 486 Py_CLEAR(t); \
487 487 } while (0)
488 488
489 489 if (self->added) {
490 490 Py_ssize_t len = PyList_GET_SIZE(self->added);
491 491 s = PyBytes_FromString("index entries added");
492 492 t = PyInt_FromSsize_t(len);
493 493 if (!s || !t)
494 494 goto bail;
495 495 if (PyDict_SetItem(obj, s, t) == -1)
496 496 goto bail;
497 497 Py_CLEAR(s);
498 498 Py_CLEAR(t);
499 499 }
500 500
501 501 if (self->raw_length != self->length)
502 502 istat(raw_length, "revs on disk");
503 503 istat(length, "revs in memory");
504 504 istat(ntlookups, "node trie lookups");
505 505 istat(ntmisses, "node trie misses");
506 506 istat(ntrev, "node trie last rev scanned");
507 507 if (self->ntinitialized) {
508 508 istat(nt.capacity, "node trie capacity");
509 509 istat(nt.depth, "node trie depth");
510 510 istat(nt.length, "node trie count");
511 511 istat(nt.splits, "node trie splits");
512 512 }
513 513
514 514 #undef istat
515 515
516 516 return obj;
517 517
518 518 bail:
519 519 Py_XDECREF(obj);
520 520 Py_XDECREF(s);
521 521 Py_XDECREF(t);
522 522 return NULL;
523 523 }
524 524
525 525 /*
526 526 * When we cache a list, we want to be sure the caller can't mutate
527 527 * the cached copy.
528 528 */
529 529 static PyObject *list_copy(PyObject *list)
530 530 {
531 531 Py_ssize_t len = PyList_GET_SIZE(list);
532 532 PyObject *newlist = PyList_New(len);
533 533 Py_ssize_t i;
534 534
535 535 if (newlist == NULL)
536 536 return NULL;
537 537
538 538 for (i = 0; i < len; i++) {
539 539 PyObject *obj = PyList_GET_ITEM(list, i);
540 540 Py_INCREF(obj);
541 541 PyList_SET_ITEM(newlist, i, obj);
542 542 }
543 543
544 544 return newlist;
545 545 }
546 546
547 547 static int check_filter(PyObject *filter, Py_ssize_t arg)
548 548 {
549 549 if (filter) {
550 550 PyObject *arglist, *result;
551 551 int isfiltered;
552 552
553 553 arglist = Py_BuildValue("(n)", arg);
554 554 if (!arglist) {
555 555 return -1;
556 556 }
557 557
558 558 result = PyEval_CallObject(filter, arglist);
559 559 Py_DECREF(arglist);
560 560 if (!result) {
561 561 return -1;
562 562 }
563 563
564 564 /* PyObject_IsTrue returns 1 if true, 0 if false, -1 if error,
565 565 * same as this function, so we can just return it directly.*/
566 566 isfiltered = PyObject_IsTrue(result);
567 567 Py_DECREF(result);
568 568 return isfiltered;
569 569 } else {
570 570 return 0;
571 571 }
572 572 }
573 573
574 574 static Py_ssize_t add_roots_get_min(indexObject *self, PyObject *list,
575 575 Py_ssize_t marker, char *phases)
576 576 {
577 577 PyObject *iter = NULL;
578 578 PyObject *iter_item = NULL;
579 579 Py_ssize_t min_idx = index_length(self) + 2;
580 580 long iter_item_long;
581 581
582 582 if (PyList_GET_SIZE(list) != 0) {
583 583 iter = PyObject_GetIter(list);
584 584 if (iter == NULL)
585 585 return -2;
586 586 while ((iter_item = PyIter_Next(iter))) {
587 587 if (!pylong_to_long(iter_item, &iter_item_long)) {
588 588 Py_DECREF(iter_item);
589 589 return -2;
590 590 }
591 591 Py_DECREF(iter_item);
592 592 if (iter_item_long < min_idx)
593 593 min_idx = iter_item_long;
594 594 phases[iter_item_long] = (char)marker;
595 595 }
596 596 Py_DECREF(iter);
597 597 }
598 598
599 599 return min_idx;
600 600 }
601 601
602 602 static inline void set_phase_from_parents(char *phases, int parent_1,
603 603 int parent_2, Py_ssize_t i)
604 604 {
605 605 if (parent_1 >= 0 && phases[parent_1] > phases[i])
606 606 phases[i] = phases[parent_1];
607 607 if (parent_2 >= 0 && phases[parent_2] > phases[i])
608 608 phases[i] = phases[parent_2];
609 609 }
610 610
611 611 static PyObject *reachableroots2(indexObject *self, PyObject *args)
612 612 {
613 613
614 614 /* Input */
615 615 long minroot;
616 616 PyObject *includepatharg = NULL;
617 617 int includepath = 0;
618 618 /* heads and roots are lists */
619 619 PyObject *heads = NULL;
620 620 PyObject *roots = NULL;
621 621 PyObject *reachable = NULL;
622 622
623 623 PyObject *val;
624 624 Py_ssize_t len = index_length(self);
625 625 long revnum;
626 626 Py_ssize_t k;
627 627 Py_ssize_t i;
628 628 Py_ssize_t l;
629 629 int r;
630 630 int parents[2];
631 631
632 632 /* Internal data structure:
633 633 * tovisit: array of length len+1 (all revs + nullrev), filled upto
634 634 * lentovisit
635 635 *
636 636 * revstates: array of length len+1 (all revs + nullrev) */
637 637 int *tovisit = NULL;
638 638 long lentovisit = 0;
639 639 enum { RS_SEEN = 1, RS_ROOT = 2, RS_REACHABLE = 4 };
640 640 char *revstates = NULL;
641 641
642 642 /* Get arguments */
643 643 if (!PyArg_ParseTuple(args, "lO!O!O!", &minroot, &PyList_Type, &heads,
644 644 &PyList_Type, &roots, &PyBool_Type,
645 645 &includepatharg))
646 646 goto bail;
647 647
648 648 if (includepatharg == Py_True)
649 649 includepath = 1;
650 650
651 651 /* Initialize return set */
652 652 reachable = PyList_New(0);
653 653 if (reachable == NULL)
654 654 goto bail;
655 655
656 656 /* Initialize internal datastructures */
657 657 tovisit = (int *)malloc((len + 1) * sizeof(int));
658 658 if (tovisit == NULL) {
659 659 PyErr_NoMemory();
660 660 goto bail;
661 661 }
662 662
663 663 revstates = (char *)calloc(len + 1, 1);
664 664 if (revstates == NULL) {
665 665 PyErr_NoMemory();
666 666 goto bail;
667 667 }
668 668
669 669 l = PyList_GET_SIZE(roots);
670 670 for (i = 0; i < l; i++) {
671 671 revnum = PyInt_AsLong(PyList_GET_ITEM(roots, i));
672 672 if (revnum == -1 && PyErr_Occurred())
673 673 goto bail;
674 674 /* If root is out of range, e.g. wdir(), it must be unreachable
675 675 * from heads. So we can just ignore it. */
676 676 if (revnum + 1 < 0 || revnum + 1 >= len + 1)
677 677 continue;
678 678 revstates[revnum + 1] |= RS_ROOT;
679 679 }
680 680
681 681 /* Populate tovisit with all the heads */
682 682 l = PyList_GET_SIZE(heads);
683 683 for (i = 0; i < l; i++) {
684 684 revnum = PyInt_AsLong(PyList_GET_ITEM(heads, i));
685 685 if (revnum == -1 && PyErr_Occurred())
686 686 goto bail;
687 687 if (revnum + 1 < 0 || revnum + 1 >= len + 1) {
688 688 PyErr_SetString(PyExc_IndexError, "head out of range");
689 689 goto bail;
690 690 }
691 691 if (!(revstates[revnum + 1] & RS_SEEN)) {
692 692 tovisit[lentovisit++] = (int)revnum;
693 693 revstates[revnum + 1] |= RS_SEEN;
694 694 }
695 695 }
696 696
697 697 /* Visit the tovisit list and find the reachable roots */
698 698 k = 0;
699 699 while (k < lentovisit) {
700 700 /* Add the node to reachable if it is a root*/
701 701 revnum = tovisit[k++];
702 702 if (revstates[revnum + 1] & RS_ROOT) {
703 703 revstates[revnum + 1] |= RS_REACHABLE;
704 704 val = PyInt_FromLong(revnum);
705 705 if (val == NULL)
706 706 goto bail;
707 707 r = PyList_Append(reachable, val);
708 708 Py_DECREF(val);
709 709 if (r < 0)
710 710 goto bail;
711 711 if (includepath == 0)
712 712 continue;
713 713 }
714 714
715 715 /* Add its parents to the list of nodes to visit */
716 716 if (revnum == nullrev)
717 717 continue;
718 718 r = index_get_parents(self, revnum, parents, (int)len - 1);
719 719 if (r < 0)
720 720 goto bail;
721 721 for (i = 0; i < 2; i++) {
722 722 if (!(revstates[parents[i] + 1] & RS_SEEN) &&
723 723 parents[i] >= minroot) {
724 724 tovisit[lentovisit++] = parents[i];
725 725 revstates[parents[i] + 1] |= RS_SEEN;
726 726 }
727 727 }
728 728 }
729 729
730 730 /* Find all the nodes in between the roots we found and the heads
731 731 * and add them to the reachable set */
732 732 if (includepath == 1) {
733 733 long minidx = minroot;
734 734 if (minidx < 0)
735 735 minidx = 0;
736 736 for (i = minidx; i < len; i++) {
737 737 if (!(revstates[i + 1] & RS_SEEN))
738 738 continue;
739 739 r = index_get_parents(self, i, parents, (int)len - 1);
740 740 /* Corrupted index file, error is set from
741 741 * index_get_parents */
742 742 if (r < 0)
743 743 goto bail;
744 744 if (((revstates[parents[0] + 1] |
745 745 revstates[parents[1] + 1]) &
746 746 RS_REACHABLE) &&
747 747 !(revstates[i + 1] & RS_REACHABLE)) {
748 748 revstates[i + 1] |= RS_REACHABLE;
749 749 val = PyInt_FromSsize_t(i);
750 750 if (val == NULL)
751 751 goto bail;
752 752 r = PyList_Append(reachable, val);
753 753 Py_DECREF(val);
754 754 if (r < 0)
755 755 goto bail;
756 756 }
757 757 }
758 758 }
759 759
760 760 free(revstates);
761 761 free(tovisit);
762 762 return reachable;
763 763 bail:
764 764 Py_XDECREF(reachable);
765 765 free(revstates);
766 766 free(tovisit);
767 767 return NULL;
768 768 }
769 769
770 770 static PyObject *compute_phases_map_sets(indexObject *self, PyObject *args)
771 771 {
772 772 PyObject *roots = Py_None;
773 773 PyObject *ret = NULL;
774 774 PyObject *phasessize = NULL;
775 775 PyObject *phaseroots = NULL;
776 776 PyObject *phaseset = NULL;
777 777 PyObject *phasessetlist = NULL;
778 778 PyObject *rev = NULL;
779 779 Py_ssize_t len = index_length(self);
780 780 Py_ssize_t numphase = 0;
781 781 Py_ssize_t minrevallphases = 0;
782 782 Py_ssize_t minrevphase = 0;
783 783 Py_ssize_t i = 0;
784 784 char *phases = NULL;
785 785 long phase;
786 786
787 787 if (!PyArg_ParseTuple(args, "O", &roots))
788 788 goto done;
789 789 if (roots == NULL || !PyList_Check(roots)) {
790 790 PyErr_SetString(PyExc_TypeError, "roots must be a list");
791 791 goto done;
792 792 }
793 793
794 794 phases = calloc(
795 795 len, 1); /* phase per rev: {0: public, 1: draft, 2: secret} */
796 796 if (phases == NULL) {
797 797 PyErr_NoMemory();
798 798 goto done;
799 799 }
800 800 /* Put the phase information of all the roots in phases */
801 801 numphase = PyList_GET_SIZE(roots) + 1;
802 802 minrevallphases = len + 1;
803 803 phasessetlist = PyList_New(numphase);
804 804 if (phasessetlist == NULL)
805 805 goto done;
806 806
807 807 PyList_SET_ITEM(phasessetlist, 0, Py_None);
808 808 Py_INCREF(Py_None);
809 809
810 810 for (i = 0; i < numphase - 1; i++) {
811 811 phaseroots = PyList_GET_ITEM(roots, i);
812 812 phaseset = PySet_New(NULL);
813 813 if (phaseset == NULL)
814 814 goto release;
815 815 PyList_SET_ITEM(phasessetlist, i + 1, phaseset);
816 816 if (!PyList_Check(phaseroots)) {
817 817 PyErr_SetString(PyExc_TypeError,
818 818 "roots item must be a list");
819 819 goto release;
820 820 }
821 821 minrevphase =
822 822 add_roots_get_min(self, phaseroots, i + 1, phases);
823 823 if (minrevphase == -2) /* Error from add_roots_get_min */
824 824 goto release;
825 825 minrevallphases = MIN(minrevallphases, minrevphase);
826 826 }
827 827 /* Propagate the phase information from the roots to the revs */
828 828 if (minrevallphases != -1) {
829 829 int parents[2];
830 830 for (i = minrevallphases; i < len; i++) {
831 831 if (index_get_parents(self, i, parents, (int)len - 1) <
832 832 0)
833 833 goto release;
834 834 set_phase_from_parents(phases, parents[0], parents[1],
835 835 i);
836 836 }
837 837 }
838 838 /* Transform phase list to a python list */
839 839 phasessize = PyInt_FromSsize_t(len);
840 840 if (phasessize == NULL)
841 841 goto release;
842 842 for (i = 0; i < len; i++) {
843 843 phase = phases[i];
844 844 /* We only store the sets of phase for non public phase, the
845 845 * public phase is computed as a difference */
846 846 if (phase != 0) {
847 847 phaseset = PyList_GET_ITEM(phasessetlist, phase);
848 848 rev = PyInt_FromSsize_t(i);
849 849 if (rev == NULL)
850 850 goto release;
851 851 PySet_Add(phaseset, rev);
852 852 Py_XDECREF(rev);
853 853 }
854 854 }
855 855 ret = PyTuple_Pack(2, phasessize, phasessetlist);
856 856
857 857 release:
858 858 Py_XDECREF(phasessize);
859 859 Py_XDECREF(phasessetlist);
860 860 done:
861 861 free(phases);
862 862 return ret;
863 863 }
864 864
865 865 static PyObject *index_headrevs(indexObject *self, PyObject *args)
866 866 {
867 867 Py_ssize_t i, j, len;
868 868 char *nothead = NULL;
869 869 PyObject *heads = NULL;
870 870 PyObject *filter = NULL;
871 871 PyObject *filteredrevs = Py_None;
872 872
873 873 if (!PyArg_ParseTuple(args, "|O", &filteredrevs)) {
874 874 return NULL;
875 875 }
876 876
877 877 if (self->headrevs && filteredrevs == self->filteredrevs)
878 878 return list_copy(self->headrevs);
879 879
880 880 Py_DECREF(self->filteredrevs);
881 881 self->filteredrevs = filteredrevs;
882 882 Py_INCREF(filteredrevs);
883 883
884 884 if (filteredrevs != Py_None) {
885 885 filter = PyObject_GetAttrString(filteredrevs, "__contains__");
886 886 if (!filter) {
887 887 PyErr_SetString(
888 888 PyExc_TypeError,
889 889 "filteredrevs has no attribute __contains__");
890 890 goto bail;
891 891 }
892 892 }
893 893
894 894 len = index_length(self);
895 895 heads = PyList_New(0);
896 896 if (heads == NULL)
897 897 goto bail;
898 898 if (len == 0) {
899 899 PyObject *nullid = PyInt_FromLong(-1);
900 900 if (nullid == NULL || PyList_Append(heads, nullid) == -1) {
901 901 Py_XDECREF(nullid);
902 902 goto bail;
903 903 }
904 904 goto done;
905 905 }
906 906
907 907 nothead = calloc(len, 1);
908 908 if (nothead == NULL) {
909 909 PyErr_NoMemory();
910 910 goto bail;
911 911 }
912 912
913 913 for (i = len - 1; i >= 0; i--) {
914 914 int isfiltered;
915 915 int parents[2];
916 916
917 917 /* If nothead[i] == 1, it means we've seen an unfiltered child
918 918 * of this node already, and therefore this node is not
919 919 * filtered. So we can skip the expensive check_filter step.
920 920 */
921 921 if (nothead[i] != 1) {
922 922 isfiltered = check_filter(filter, i);
923 923 if (isfiltered == -1) {
924 924 PyErr_SetString(PyExc_TypeError,
925 925 "unable to check filter");
926 926 goto bail;
927 927 }
928 928
929 929 if (isfiltered) {
930 930 nothead[i] = 1;
931 931 continue;
932 932 }
933 933 }
934 934
935 935 if (index_get_parents(self, i, parents, (int)len - 1) < 0)
936 936 goto bail;
937 937 for (j = 0; j < 2; j++) {
938 938 if (parents[j] >= 0)
939 939 nothead[parents[j]] = 1;
940 940 }
941 941 }
942 942
943 943 for (i = 0; i < len; i++) {
944 944 PyObject *head;
945 945
946 946 if (nothead[i])
947 947 continue;
948 948 head = PyInt_FromSsize_t(i);
949 949 if (head == NULL || PyList_Append(heads, head) == -1) {
950 950 Py_XDECREF(head);
951 951 goto bail;
952 952 }
953 953 }
954 954
955 955 done:
956 956 self->headrevs = heads;
957 957 Py_XDECREF(filter);
958 958 free(nothead);
959 959 return list_copy(self->headrevs);
960 960 bail:
961 961 Py_XDECREF(filter);
962 962 Py_XDECREF(heads);
963 963 free(nothead);
964 964 return NULL;
965 965 }
966 966
967 967 /**
968 968 * Obtain the base revision index entry.
969 969 *
970 970 * Callers must ensure that rev >= 0 or illegal memory access may occur.
971 971 */
972 972 static inline int index_baserev(indexObject *self, int rev)
973 973 {
974 974 const char *data;
975 975 int result;
976 976
977 977 if (rev >= self->length) {
978 978 PyObject *tuple =
979 979 PyList_GET_ITEM(self->added, rev - self->length);
980 980 long ret;
981 981 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 3), &ret)) {
982 982 return -2;
983 983 }
984 984 result = (int)ret;
985 985 } else {
986 986 data = index_deref(self, rev);
987 987 if (data == NULL) {
988 988 return -2;
989 989 }
990 990
991 991 result = getbe32(data + 16);
992 992 }
993 993 if (result > rev) {
994 994 PyErr_Format(
995 995 PyExc_ValueError,
996 996 "corrupted revlog, revision base above revision: %d, %d",
997 997 rev, result);
998 998 return -2;
999 999 }
1000 1000 if (result < -1) {
1001 1001 PyErr_Format(
1002 1002 PyExc_ValueError,
1003 1003 "corrupted revlog, revision base out of range: %d, %d", rev,
1004 1004 result);
1005 1005 return -2;
1006 1006 }
1007 1007 return result;
1008 1008 }
1009 1009
1010 1010 /**
1011 1011 * Find if a revision is a snapshot or not
1012 1012 *
1013 1013 * Only relevant for sparse-revlog case.
1014 1014 * Callers must ensure that rev is in a valid range.
1015 1015 */
1016 1016 static int index_issnapshotrev(indexObject *self, Py_ssize_t rev)
1017 1017 {
1018 1018 int ps[2];
1019 1019 Py_ssize_t base;
1020 1020 while (rev >= 0) {
1021 1021 base = (Py_ssize_t)index_baserev(self, rev);
1022 1022 if (base == rev) {
1023 1023 base = -1;
1024 1024 }
1025 1025 if (base == -2) {
1026 1026 assert(PyErr_Occurred());
1027 1027 return -1;
1028 1028 }
1029 1029 if (base == -1) {
1030 1030 return 1;
1031 1031 }
1032 1032 if (index_get_parents(self, rev, ps, (int)rev) < 0) {
1033 1033 assert(PyErr_Occurred());
1034 1034 return -1;
1035 1035 };
1036 1036 if (base == ps[0] || base == ps[1]) {
1037 1037 return 0;
1038 1038 }
1039 1039 rev = base;
1040 1040 }
1041 1041 return rev == -1;
1042 1042 }
1043 1043
1044 1044 static PyObject *index_issnapshot(indexObject *self, PyObject *value)
1045 1045 {
1046 1046 long rev;
1047 1047 int issnap;
1048 1048 Py_ssize_t length = index_length(self);
1049 1049
1050 1050 if (!pylong_to_long(value, &rev)) {
1051 1051 return NULL;
1052 1052 }
1053 1053 if (rev < -1 || rev >= length) {
1054 1054 PyErr_Format(PyExc_ValueError, "revlog index out of range: %ld",
1055 1055 rev);
1056 1056 return NULL;
1057 1057 };
1058 1058 issnap = index_issnapshotrev(self, (Py_ssize_t)rev);
1059 1059 if (issnap < 0) {
1060 1060 return NULL;
1061 1061 };
1062 1062 return PyBool_FromLong((long)issnap);
1063 1063 }
1064 1064
1065 1065 static PyObject *index_findsnapshots(indexObject *self, PyObject *args)
1066 1066 {
1067 1067 Py_ssize_t start_rev;
1068 1068 PyObject *cache;
1069 1069 Py_ssize_t base;
1070 1070 Py_ssize_t rev;
1071 1071 PyObject *key = NULL;
1072 1072 PyObject *value = NULL;
1073 1073 const Py_ssize_t length = index_length(self);
1074 1074 if (!PyArg_ParseTuple(args, "O!n", &PyDict_Type, &cache, &start_rev)) {
1075 1075 return NULL;
1076 1076 }
1077 1077 for (rev = start_rev; rev < length; rev++) {
1078 1078 int issnap;
1079 1079 PyObject *allvalues = NULL;
1080 1080 issnap = index_issnapshotrev(self, rev);
1081 1081 if (issnap < 0) {
1082 1082 goto bail;
1083 1083 }
1084 1084 if (issnap == 0) {
1085 1085 continue;
1086 1086 }
1087 1087 base = (Py_ssize_t)index_baserev(self, rev);
1088 1088 if (base == rev) {
1089 1089 base = -1;
1090 1090 }
1091 1091 if (base == -2) {
1092 1092 assert(PyErr_Occurred());
1093 1093 goto bail;
1094 1094 }
1095 1095 key = PyInt_FromSsize_t(base);
1096 1096 allvalues = PyDict_GetItem(cache, key);
1097 1097 if (allvalues == NULL && PyErr_Occurred()) {
1098 1098 goto bail;
1099 1099 }
1100 1100 if (allvalues == NULL) {
1101 1101 int r;
1102 1102 allvalues = PyList_New(0);
1103 1103 if (!allvalues) {
1104 1104 goto bail;
1105 1105 }
1106 1106 r = PyDict_SetItem(cache, key, allvalues);
1107 1107 Py_DECREF(allvalues);
1108 1108 if (r < 0) {
1109 1109 goto bail;
1110 1110 }
1111 1111 }
1112 1112 value = PyInt_FromSsize_t(rev);
1113 1113 if (PyList_Append(allvalues, value)) {
1114 1114 goto bail;
1115 1115 }
1116 1116 Py_CLEAR(key);
1117 1117 Py_CLEAR(value);
1118 1118 }
1119 1119 Py_RETURN_NONE;
1120 1120 bail:
1121 1121 Py_XDECREF(key);
1122 1122 Py_XDECREF(value);
1123 1123 return NULL;
1124 1124 }
1125 1125
1126 1126 static PyObject *index_deltachain(indexObject *self, PyObject *args)
1127 1127 {
1128 1128 int rev, generaldelta;
1129 1129 PyObject *stoparg;
1130 1130 int stoprev, iterrev, baserev = -1;
1131 1131 int stopped;
1132 1132 PyObject *chain = NULL, *result = NULL;
1133 1133 const Py_ssize_t length = index_length(self);
1134 1134
1135 1135 if (!PyArg_ParseTuple(args, "iOi", &rev, &stoparg, &generaldelta)) {
1136 1136 return NULL;
1137 1137 }
1138 1138
1139 1139 if (PyInt_Check(stoparg)) {
1140 1140 stoprev = (int)PyInt_AsLong(stoparg);
1141 1141 if (stoprev == -1 && PyErr_Occurred()) {
1142 1142 return NULL;
1143 1143 }
1144 1144 } else if (stoparg == Py_None) {
1145 1145 stoprev = -2;
1146 1146 } else {
1147 1147 PyErr_SetString(PyExc_ValueError,
1148 1148 "stoprev must be integer or None");
1149 1149 return NULL;
1150 1150 }
1151 1151
1152 1152 if (rev < 0 || rev >= length) {
1153 1153 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
1154 1154 return NULL;
1155 1155 }
1156 1156
1157 1157 chain = PyList_New(0);
1158 1158 if (chain == NULL) {
1159 1159 return NULL;
1160 1160 }
1161 1161
1162 1162 baserev = index_baserev(self, rev);
1163 1163
1164 1164 /* This should never happen. */
1165 1165 if (baserev <= -2) {
1166 1166 /* Error should be set by index_deref() */
1167 1167 assert(PyErr_Occurred());
1168 1168 goto bail;
1169 1169 }
1170 1170
1171 1171 iterrev = rev;
1172 1172
1173 1173 while (iterrev != baserev && iterrev != stoprev) {
1174 1174 PyObject *value = PyInt_FromLong(iterrev);
1175 1175 if (value == NULL) {
1176 1176 goto bail;
1177 1177 }
1178 1178 if (PyList_Append(chain, value)) {
1179 1179 Py_DECREF(value);
1180 1180 goto bail;
1181 1181 }
1182 1182 Py_DECREF(value);
1183 1183
1184 1184 if (generaldelta) {
1185 1185 iterrev = baserev;
1186 1186 } else {
1187 1187 iterrev--;
1188 1188 }
1189 1189
1190 1190 if (iterrev < 0) {
1191 1191 break;
1192 1192 }
1193 1193
1194 1194 if (iterrev >= length) {
1195 1195 PyErr_SetString(PyExc_IndexError,
1196 1196 "revision outside index");
1197 1197 return NULL;
1198 1198 }
1199 1199
1200 1200 baserev = index_baserev(self, iterrev);
1201 1201
1202 1202 /* This should never happen. */
1203 1203 if (baserev <= -2) {
1204 1204 /* Error should be set by index_deref() */
1205 1205 assert(PyErr_Occurred());
1206 1206 goto bail;
1207 1207 }
1208 1208 }
1209 1209
1210 1210 if (iterrev == stoprev) {
1211 1211 stopped = 1;
1212 1212 } else {
1213 1213 PyObject *value = PyInt_FromLong(iterrev);
1214 1214 if (value == NULL) {
1215 1215 goto bail;
1216 1216 }
1217 1217 if (PyList_Append(chain, value)) {
1218 1218 Py_DECREF(value);
1219 1219 goto bail;
1220 1220 }
1221 1221 Py_DECREF(value);
1222 1222
1223 1223 stopped = 0;
1224 1224 }
1225 1225
1226 1226 if (PyList_Reverse(chain)) {
1227 1227 goto bail;
1228 1228 }
1229 1229
1230 1230 result = Py_BuildValue("OO", chain, stopped ? Py_True : Py_False);
1231 1231 Py_DECREF(chain);
1232 1232 return result;
1233 1233
1234 1234 bail:
1235 1235 Py_DECREF(chain);
1236 1236 return NULL;
1237 1237 }
1238 1238
1239 1239 static inline int64_t
1240 1240 index_segment_span(indexObject *self, Py_ssize_t start_rev, Py_ssize_t end_rev)
1241 1241 {
1242 1242 int64_t start_offset;
1243 1243 int64_t end_offset;
1244 1244 int end_size;
1245 1245 start_offset = index_get_start(self, start_rev);
1246 1246 if (start_offset < 0) {
1247 1247 return -1;
1248 1248 }
1249 1249 end_offset = index_get_start(self, end_rev);
1250 1250 if (end_offset < 0) {
1251 1251 return -1;
1252 1252 }
1253 1253 end_size = index_get_length(self, end_rev);
1254 1254 if (end_size < 0) {
1255 1255 return -1;
1256 1256 }
1257 1257 if (end_offset < start_offset) {
1258 1258 PyErr_Format(PyExc_ValueError,
1259 1259 "corrupted revlog index: inconsistent offset "
1260 1260 "between revisions (%zd) and (%zd)",
1261 1261 start_rev, end_rev);
1262 1262 return -1;
1263 1263 }
1264 1264 return (end_offset - start_offset) + (int64_t)end_size;
1265 1265 }
1266 1266
1267 1267 /* returns endidx so that revs[startidx:endidx] has no empty trailing revs */
1268 1268 static Py_ssize_t trim_endidx(indexObject *self, const Py_ssize_t *revs,
1269 1269 Py_ssize_t startidx, Py_ssize_t endidx)
1270 1270 {
1271 1271 int length;
1272 1272 while (endidx > 1 && endidx > startidx) {
1273 1273 length = index_get_length(self, revs[endidx - 1]);
1274 1274 if (length < 0) {
1275 1275 return -1;
1276 1276 }
1277 1277 if (length != 0) {
1278 1278 break;
1279 1279 }
1280 1280 endidx -= 1;
1281 1281 }
1282 1282 return endidx;
1283 1283 }
1284 1284
1285 1285 struct Gap {
1286 1286 int64_t size;
1287 1287 Py_ssize_t idx;
1288 1288 };
1289 1289
1290 1290 static int gap_compare(const void *left, const void *right)
1291 1291 {
1292 1292 const struct Gap *l_left = ((const struct Gap *)left);
1293 1293 const struct Gap *l_right = ((const struct Gap *)right);
1294 1294 if (l_left->size < l_right->size) {
1295 1295 return -1;
1296 1296 } else if (l_left->size > l_right->size) {
1297 1297 return 1;
1298 1298 }
1299 1299 return 0;
1300 1300 }
1301 1301 static int Py_ssize_t_compare(const void *left, const void *right)
1302 1302 {
1303 1303 const Py_ssize_t l_left = *(const Py_ssize_t *)left;
1304 1304 const Py_ssize_t l_right = *(const Py_ssize_t *)right;
1305 1305 if (l_left < l_right) {
1306 1306 return -1;
1307 1307 } else if (l_left > l_right) {
1308 1308 return 1;
1309 1309 }
1310 1310 return 0;
1311 1311 }
1312 1312
1313 1313 static PyObject *index_slicechunktodensity(indexObject *self, PyObject *args)
1314 1314 {
1315 1315 /* method arguments */
1316 1316 PyObject *list_revs = NULL; /* revisions in the chain */
1317 1317 double targetdensity = 0; /* min density to achieve */
1318 1318 Py_ssize_t mingapsize = 0; /* threshold to ignore gaps */
1319 1319
1320 1320 /* other core variables */
1321 1321 Py_ssize_t idxlen = index_length(self);
1322 1322 Py_ssize_t i; /* used for various iteration */
1323 1323 PyObject *result = NULL; /* the final return of the function */
1324 1324
1325 1325 /* generic information about the delta chain being slice */
1326 1326 Py_ssize_t num_revs = 0; /* size of the full delta chain */
1327 1327 Py_ssize_t *revs = NULL; /* native array of revision in the chain */
1328 1328 int64_t chainpayload = 0; /* sum of all delta in the chain */
1329 1329 int64_t deltachainspan = 0; /* distance from first byte to last byte */
1330 1330
1331 1331 /* variable used for slicing the delta chain */
1332 1332 int64_t readdata = 0; /* amount of data currently planned to be read */
1333 1333 double density = 0; /* ration of payload data compared to read ones */
1334 1334 int64_t previous_end;
1335 1335 struct Gap *gaps = NULL; /* array of notable gap in the chain */
1336 1336 Py_ssize_t num_gaps =
1337 1337 0; /* total number of notable gap recorded so far */
1338 1338 Py_ssize_t *selected_indices = NULL; /* indices of gap skipped over */
1339 1339 Py_ssize_t num_selected = 0; /* number of gaps skipped */
1340 1340 PyObject *chunk = NULL; /* individual slice */
1341 1341 PyObject *allchunks = NULL; /* all slices */
1342 1342 Py_ssize_t previdx;
1343 1343
1344 1344 /* parsing argument */
1345 1345 if (!PyArg_ParseTuple(args, "O!dn", &PyList_Type, &list_revs,
1346 1346 &targetdensity, &mingapsize)) {
1347 1347 goto bail;
1348 1348 }
1349 1349
1350 1350 /* If the delta chain contains a single element, we do not need slicing
1351 1351 */
1352 1352 num_revs = PyList_GET_SIZE(list_revs);
1353 1353 if (num_revs <= 1) {
1354 1354 result = PyTuple_Pack(1, list_revs);
1355 1355 goto done;
1356 1356 }
1357 1357
1358 1358 /* Turn the python list into a native integer array (for efficiency) */
1359 1359 revs = (Py_ssize_t *)calloc(num_revs, sizeof(Py_ssize_t));
1360 1360 if (revs == NULL) {
1361 1361 PyErr_NoMemory();
1362 1362 goto bail;
1363 1363 }
1364 1364 for (i = 0; i < num_revs; i++) {
1365 1365 Py_ssize_t revnum = PyInt_AsLong(PyList_GET_ITEM(list_revs, i));
1366 1366 if (revnum == -1 && PyErr_Occurred()) {
1367 1367 goto bail;
1368 1368 }
1369 1369 if (revnum < nullrev || revnum >= idxlen) {
1370 1370 PyErr_Format(PyExc_IndexError,
1371 1371 "index out of range: %zd", revnum);
1372 1372 goto bail;
1373 1373 }
1374 1374 revs[i] = revnum;
1375 1375 }
1376 1376
1377 1377 /* Compute and check various property of the unsliced delta chain */
1378 1378 deltachainspan = index_segment_span(self, revs[0], revs[num_revs - 1]);
1379 1379 if (deltachainspan < 0) {
1380 1380 goto bail;
1381 1381 }
1382 1382
1383 1383 if (deltachainspan <= mingapsize) {
1384 1384 result = PyTuple_Pack(1, list_revs);
1385 1385 goto done;
1386 1386 }
1387 1387 chainpayload = 0;
1388 1388 for (i = 0; i < num_revs; i++) {
1389 1389 int tmp = index_get_length(self, revs[i]);
1390 1390 if (tmp < 0) {
1391 1391 goto bail;
1392 1392 }
1393 1393 chainpayload += tmp;
1394 1394 }
1395 1395
1396 1396 readdata = deltachainspan;
1397 1397 density = 1.0;
1398 1398
1399 1399 if (0 < deltachainspan) {
1400 1400 density = (double)chainpayload / (double)deltachainspan;
1401 1401 }
1402 1402
1403 1403 if (density >= targetdensity) {
1404 1404 result = PyTuple_Pack(1, list_revs);
1405 1405 goto done;
1406 1406 }
1407 1407
1408 1408 /* if chain is too sparse, look for relevant gaps */
1409 1409 gaps = (struct Gap *)calloc(num_revs, sizeof(struct Gap));
1410 1410 if (gaps == NULL) {
1411 1411 PyErr_NoMemory();
1412 1412 goto bail;
1413 1413 }
1414 1414
1415 1415 previous_end = -1;
1416 1416 for (i = 0; i < num_revs; i++) {
1417 1417 int64_t revstart;
1418 1418 int revsize;
1419 1419 revstart = index_get_start(self, revs[i]);
1420 1420 if (revstart < 0) {
1421 1421 goto bail;
1422 1422 };
1423 1423 revsize = index_get_length(self, revs[i]);
1424 1424 if (revsize < 0) {
1425 1425 goto bail;
1426 1426 };
1427 1427 if (revsize == 0) {
1428 1428 continue;
1429 1429 }
1430 1430 if (previous_end >= 0) {
1431 1431 int64_t gapsize = revstart - previous_end;
1432 1432 if (gapsize > mingapsize) {
1433 1433 gaps[num_gaps].size = gapsize;
1434 1434 gaps[num_gaps].idx = i;
1435 1435 num_gaps += 1;
1436 1436 }
1437 1437 }
1438 1438 previous_end = revstart + revsize;
1439 1439 }
1440 1440 if (num_gaps == 0) {
1441 1441 result = PyTuple_Pack(1, list_revs);
1442 1442 goto done;
1443 1443 }
1444 1444 qsort(gaps, num_gaps, sizeof(struct Gap), &gap_compare);
1445 1445
1446 1446 /* Slice the largest gap first, they improve the density the most */
1447 1447 selected_indices =
1448 1448 (Py_ssize_t *)malloc((num_gaps + 1) * sizeof(Py_ssize_t));
1449 1449 if (selected_indices == NULL) {
1450 1450 PyErr_NoMemory();
1451 1451 goto bail;
1452 1452 }
1453 1453
1454 1454 for (i = num_gaps - 1; i >= 0; i--) {
1455 1455 selected_indices[num_selected] = gaps[i].idx;
1456 1456 readdata -= gaps[i].size;
1457 1457 num_selected += 1;
1458 1458 if (readdata <= 0) {
1459 1459 density = 1.0;
1460 1460 } else {
1461 1461 density = (double)chainpayload / (double)readdata;
1462 1462 }
1463 1463 if (density >= targetdensity) {
1464 1464 break;
1465 1465 }
1466 1466 }
1467 1467 qsort(selected_indices, num_selected, sizeof(Py_ssize_t),
1468 1468 &Py_ssize_t_compare);
1469 1469
1470 1470 /* create the resulting slice */
1471 1471 allchunks = PyList_New(0);
1472 1472 if (allchunks == NULL) {
1473 1473 goto bail;
1474 1474 }
1475 1475 previdx = 0;
1476 1476 selected_indices[num_selected] = num_revs;
1477 1477 for (i = 0; i <= num_selected; i++) {
1478 1478 Py_ssize_t idx = selected_indices[i];
1479 1479 Py_ssize_t endidx = trim_endidx(self, revs, previdx, idx);
1480 1480 if (endidx < 0) {
1481 1481 goto bail;
1482 1482 }
1483 1483 if (previdx < endidx) {
1484 1484 chunk = PyList_GetSlice(list_revs, previdx, endidx);
1485 1485 if (chunk == NULL) {
1486 1486 goto bail;
1487 1487 }
1488 1488 if (PyList_Append(allchunks, chunk) == -1) {
1489 1489 goto bail;
1490 1490 }
1491 1491 Py_DECREF(chunk);
1492 1492 chunk = NULL;
1493 1493 }
1494 1494 previdx = idx;
1495 1495 }
1496 1496 result = allchunks;
1497 1497 goto done;
1498 1498
1499 1499 bail:
1500 1500 Py_XDECREF(allchunks);
1501 1501 Py_XDECREF(chunk);
1502 1502 done:
1503 1503 free(revs);
1504 1504 free(gaps);
1505 1505 free(selected_indices);
1506 1506 return result;
1507 1507 }
1508 1508
1509 1509 static inline int nt_level(const char *node, Py_ssize_t level)
1510 1510 {
1511 1511 int v = node[level >> 1];
1512 1512 if (!(level & 1))
1513 1513 v >>= 4;
1514 1514 return v & 0xf;
1515 1515 }
1516 1516
1517 1517 /*
1518 1518 * Return values:
1519 1519 *
1520 1520 * -4: match is ambiguous (multiple candidates)
1521 1521 * -2: not found
1522 1522 * rest: valid rev
1523 1523 */
1524 1524 static int nt_find(nodetree *self, const char *node, Py_ssize_t nodelen,
1525 1525 int hex)
1526 1526 {
1527 1527 int (*getnybble)(const char *, Py_ssize_t) = hex ? hexdigit : nt_level;
1528 1528 int level, maxlevel, off;
1529 1529
1530 1530 if (nodelen == 20 && node[0] == '\0' && memcmp(node, nullid, 20) == 0)
1531 1531 return -1;
1532 1532
1533 1533 if (hex)
1534 1534 maxlevel = nodelen > 40 ? 40 : (int)nodelen;
1535 1535 else
1536 1536 maxlevel = nodelen > 20 ? 40 : ((int)nodelen * 2);
1537 1537
1538 1538 for (level = off = 0; level < maxlevel; level++) {
1539 1539 int k = getnybble(node, level);
1540 1540 nodetreenode *n = &self->nodes[off];
1541 1541 int v = n->children[k];
1542 1542
1543 1543 if (v < 0) {
1544 1544 const char *n;
1545 1545 Py_ssize_t i;
1546 1546
1547 1547 v = -(v + 2);
1548 1548 n = index_node(self->index, v);
1549 1549 if (n == NULL)
1550 1550 return -2;
1551 1551 for (i = level; i < maxlevel; i++)
1552 1552 if (getnybble(node, i) != nt_level(n, i))
1553 1553 return -2;
1554 1554 return v;
1555 1555 }
1556 1556 if (v == 0)
1557 1557 return -2;
1558 1558 off = v;
1559 1559 }
1560 1560 /* multiple matches against an ambiguous prefix */
1561 1561 return -4;
1562 1562 }
1563 1563
1564 1564 static int nt_new(nodetree *self)
1565 1565 {
1566 1566 if (self->length == self->capacity) {
1567 1567 unsigned newcapacity;
1568 1568 nodetreenode *newnodes;
1569 1569 newcapacity = self->capacity * 2;
1570 1570 if (newcapacity >= INT_MAX / sizeof(nodetreenode)) {
1571 1571 PyErr_SetString(PyExc_MemoryError,
1572 1572 "overflow in nt_new");
1573 1573 return -1;
1574 1574 }
1575 1575 newnodes =
1576 1576 realloc(self->nodes, newcapacity * sizeof(nodetreenode));
1577 1577 if (newnodes == NULL) {
1578 1578 PyErr_SetString(PyExc_MemoryError, "out of memory");
1579 1579 return -1;
1580 1580 }
1581 1581 self->capacity = newcapacity;
1582 1582 self->nodes = newnodes;
1583 1583 memset(&self->nodes[self->length], 0,
1584 1584 sizeof(nodetreenode) * (self->capacity - self->length));
1585 1585 }
1586 1586 return self->length++;
1587 1587 }
1588 1588
1589 1589 static int nt_insert(nodetree *self, const char *node, int rev)
1590 1590 {
1591 1591 int level = 0;
1592 1592 int off = 0;
1593 1593
1594 1594 while (level < 40) {
1595 1595 int k = nt_level(node, level);
1596 1596 nodetreenode *n;
1597 1597 int v;
1598 1598
1599 1599 n = &self->nodes[off];
1600 1600 v = n->children[k];
1601 1601
1602 1602 if (v == 0) {
1603 1603 n->children[k] = -rev - 2;
1604 1604 return 0;
1605 1605 }
1606 1606 if (v < 0) {
1607 1607 const char *oldnode =
1608 1608 index_node_existing(self->index, -(v + 2));
1609 1609 int noff;
1610 1610
1611 1611 if (oldnode == NULL)
1612 1612 return -1;
1613 1613 if (!memcmp(oldnode, node, 20)) {
1614 1614 n->children[k] = -rev - 2;
1615 1615 return 0;
1616 1616 }
1617 1617 noff = nt_new(self);
1618 1618 if (noff == -1)
1619 1619 return -1;
1620 1620 /* self->nodes may have been changed by realloc */
1621 1621 self->nodes[off].children[k] = noff;
1622 1622 off = noff;
1623 1623 n = &self->nodes[off];
1624 1624 n->children[nt_level(oldnode, ++level)] = v;
1625 1625 if (level > self->depth)
1626 1626 self->depth = level;
1627 1627 self->splits += 1;
1628 1628 } else {
1629 1629 level += 1;
1630 1630 off = v;
1631 1631 }
1632 1632 }
1633 1633
1634 1634 return -1;
1635 1635 }
1636 1636
1637 1637 static PyObject *ntobj_insert(nodetreeObject *self, PyObject *args)
1638 1638 {
1639 1639 Py_ssize_t rev;
1640 1640 const char *node;
1641 1641 Py_ssize_t length;
1642 1642 if (!PyArg_ParseTuple(args, "n", &rev))
1643 1643 return NULL;
1644 1644 length = index_length(self->nt.index);
1645 1645 if (rev < 0 || rev >= length) {
1646 1646 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
1647 1647 return NULL;
1648 1648 }
1649 1649 node = index_node_existing(self->nt.index, rev);
1650 1650 if (nt_insert(&self->nt, node, (int)rev) == -1)
1651 1651 return NULL;
1652 1652 Py_RETURN_NONE;
1653 1653 }
1654 1654
1655 1655 static int nt_delete_node(nodetree *self, const char *node)
1656 1656 {
1657 1657 /* rev==-2 happens to get encoded as 0, which is interpreted as not set
1658 1658 */
1659 1659 return nt_insert(self, node, -2);
1660 1660 }
1661 1661
1662 1662 static int nt_init(nodetree *self, indexObject *index, unsigned capacity)
1663 1663 {
1664 1664 /* Initialize before overflow-checking to avoid nt_dealloc() crash. */
1665 1665 self->nodes = NULL;
1666 1666
1667 1667 self->index = index;
1668 1668 /* The input capacity is in terms of revisions, while the field is in
1669 1669 * terms of nodetree nodes. */
1670 1670 self->capacity = (capacity < 4 ? 4 : capacity / 2);
1671 1671 self->depth = 0;
1672 1672 self->splits = 0;
1673 1673 if ((size_t)self->capacity > INT_MAX / sizeof(nodetreenode)) {
1674 1674 PyErr_SetString(PyExc_ValueError, "overflow in init_nt");
1675 1675 return -1;
1676 1676 }
1677 1677 self->nodes = calloc(self->capacity, sizeof(nodetreenode));
1678 1678 if (self->nodes == NULL) {
1679 1679 PyErr_NoMemory();
1680 1680 return -1;
1681 1681 }
1682 1682 self->length = 1;
1683 1683 return 0;
1684 1684 }
1685 1685
1686 1686 static int ntobj_init(nodetreeObject *self, PyObject *args)
1687 1687 {
1688 1688 PyObject *index;
1689 1689 unsigned capacity;
1690 1690 if (!PyArg_ParseTuple(args, "O!I", &HgRevlogIndex_Type, &index,
1691 1691 &capacity))
1692 1692 return -1;
1693 1693 Py_INCREF(index);
1694 1694 return nt_init(&self->nt, (indexObject *)index, capacity);
1695 1695 }
1696 1696
1697 1697 static int nt_partialmatch(nodetree *self, const char *node, Py_ssize_t nodelen)
1698 1698 {
1699 1699 return nt_find(self, node, nodelen, 1);
1700 1700 }
1701 1701
1702 1702 /*
1703 1703 * Find the length of the shortest unique prefix of node.
1704 1704 *
1705 1705 * Return values:
1706 1706 *
1707 1707 * -3: error (exception set)
1708 1708 * -2: not found (no exception set)
1709 1709 * rest: length of shortest prefix
1710 1710 */
1711 1711 static int nt_shortest(nodetree *self, const char *node)
1712 1712 {
1713 1713 int level, off;
1714 1714
1715 1715 for (level = off = 0; level < 40; level++) {
1716 1716 int k, v;
1717 1717 nodetreenode *n = &self->nodes[off];
1718 1718 k = nt_level(node, level);
1719 1719 v = n->children[k];
1720 1720 if (v < 0) {
1721 1721 const char *n;
1722 1722 v = -(v + 2);
1723 1723 n = index_node_existing(self->index, v);
1724 1724 if (n == NULL)
1725 1725 return -3;
1726 1726 if (memcmp(node, n, 20) != 0)
1727 1727 /*
1728 1728 * Found a unique prefix, but it wasn't for the
1729 1729 * requested node (i.e the requested node does
1730 1730 * not exist).
1731 1731 */
1732 1732 return -2;
1733 1733 return level + 1;
1734 1734 }
1735 1735 if (v == 0)
1736 1736 return -2;
1737 1737 off = v;
1738 1738 }
1739 1739 /*
1740 1740 * The node was still not unique after 40 hex digits, so this won't
1741 1741 * happen. Also, if we get here, then there's a programming error in
1742 1742 * this file that made us insert a node longer than 40 hex digits.
1743 1743 */
1744 1744 PyErr_SetString(PyExc_Exception, "broken node tree");
1745 1745 return -3;
1746 1746 }
1747 1747
1748 1748 static PyObject *ntobj_shortest(nodetreeObject *self, PyObject *args)
1749 1749 {
1750 1750 PyObject *val;
1751 1751 char *node;
1752 1752 int length;
1753 1753
1754 1754 if (!PyArg_ParseTuple(args, "O", &val))
1755 1755 return NULL;
1756 1756 if (node_check(val, &node) == -1)
1757 1757 return NULL;
1758 1758
1759 1759 length = nt_shortest(&self->nt, node);
1760 1760 if (length == -3)
1761 1761 return NULL;
1762 1762 if (length == -2) {
1763 1763 raise_revlog_error();
1764 1764 return NULL;
1765 1765 }
1766 1766 return PyInt_FromLong(length);
1767 1767 }
1768 1768
1769 1769 static void nt_dealloc(nodetree *self)
1770 1770 {
1771 1771 free(self->nodes);
1772 1772 self->nodes = NULL;
1773 1773 }
1774 1774
1775 1775 static void ntobj_dealloc(nodetreeObject *self)
1776 1776 {
1777 1777 Py_XDECREF(self->nt.index);
1778 1778 nt_dealloc(&self->nt);
1779 1779 PyObject_Del(self);
1780 1780 }
1781 1781
1782 1782 static PyMethodDef ntobj_methods[] = {
1783 1783 {"insert", (PyCFunction)ntobj_insert, METH_VARARGS,
1784 1784 "insert an index entry"},
1785 1785 {"shortest", (PyCFunction)ntobj_shortest, METH_VARARGS,
1786 1786 "find length of shortest hex nodeid of a binary ID"},
1787 1787 {NULL} /* Sentinel */
1788 1788 };
1789 1789
1790 1790 static PyTypeObject nodetreeType = {
1791 1791 PyVarObject_HEAD_INIT(NULL, 0) /* header */
1792 1792 "parsers.nodetree", /* tp_name */
1793 1793 sizeof(nodetreeObject), /* tp_basicsize */
1794 1794 0, /* tp_itemsize */
1795 1795 (destructor)ntobj_dealloc, /* tp_dealloc */
1796 1796 0, /* tp_print */
1797 1797 0, /* tp_getattr */
1798 1798 0, /* tp_setattr */
1799 1799 0, /* tp_compare */
1800 1800 0, /* tp_repr */
1801 1801 0, /* tp_as_number */
1802 1802 0, /* tp_as_sequence */
1803 1803 0, /* tp_as_mapping */
1804 1804 0, /* tp_hash */
1805 1805 0, /* tp_call */
1806 1806 0, /* tp_str */
1807 1807 0, /* tp_getattro */
1808 1808 0, /* tp_setattro */
1809 1809 0, /* tp_as_buffer */
1810 1810 Py_TPFLAGS_DEFAULT, /* tp_flags */
1811 1811 "nodetree", /* tp_doc */
1812 1812 0, /* tp_traverse */
1813 1813 0, /* tp_clear */
1814 1814 0, /* tp_richcompare */
1815 1815 0, /* tp_weaklistoffset */
1816 1816 0, /* tp_iter */
1817 1817 0, /* tp_iternext */
1818 1818 ntobj_methods, /* tp_methods */
1819 1819 0, /* tp_members */
1820 1820 0, /* tp_getset */
1821 1821 0, /* tp_base */
1822 1822 0, /* tp_dict */
1823 1823 0, /* tp_descr_get */
1824 1824 0, /* tp_descr_set */
1825 1825 0, /* tp_dictoffset */
1826 1826 (initproc)ntobj_init, /* tp_init */
1827 1827 0, /* tp_alloc */
1828 1828 };
1829 1829
1830 1830 static int index_init_nt(indexObject *self)
1831 1831 {
1832 1832 if (!self->ntinitialized) {
1833 1833 if (nt_init(&self->nt, self, (int)self->raw_length) == -1) {
1834 1834 nt_dealloc(&self->nt);
1835 1835 return -1;
1836 1836 }
1837 1837 if (nt_insert(&self->nt, nullid, -1) == -1) {
1838 1838 nt_dealloc(&self->nt);
1839 1839 return -1;
1840 1840 }
1841 1841 self->ntinitialized = 1;
1842 1842 self->ntrev = (int)index_length(self);
1843 1843 self->ntlookups = 1;
1844 1844 self->ntmisses = 0;
1845 1845 }
1846 1846 return 0;
1847 1847 }
1848 1848
1849 1849 /*
1850 1850 * Return values:
1851 1851 *
1852 1852 * -3: error (exception set)
1853 1853 * -2: not found (no exception set)
1854 1854 * rest: valid rev
1855 1855 */
1856 1856 static int index_find_node(indexObject *self, const char *node,
1857 1857 Py_ssize_t nodelen)
1858 1858 {
1859 1859 int rev;
1860 1860
1861 1861 if (index_init_nt(self) == -1)
1862 1862 return -3;
1863 1863
1864 1864 self->ntlookups++;
1865 1865 rev = nt_find(&self->nt, node, nodelen, 0);
1866 1866 if (rev >= -1)
1867 1867 return rev;
1868 1868
1869 1869 /*
1870 1870 * For the first handful of lookups, we scan the entire index,
1871 1871 * and cache only the matching nodes. This optimizes for cases
1872 1872 * like "hg tip", where only a few nodes are accessed.
1873 1873 *
1874 1874 * After that, we cache every node we visit, using a single
1875 1875 * scan amortized over multiple lookups. This gives the best
1876 1876 * bulk performance, e.g. for "hg log".
1877 1877 */
1878 1878 if (self->ntmisses++ < 4) {
1879 1879 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1880 1880 const char *n = index_node_existing(self, rev);
1881 1881 if (n == NULL)
1882 1882 return -3;
1883 1883 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1884 1884 if (nt_insert(&self->nt, n, rev) == -1)
1885 1885 return -3;
1886 1886 break;
1887 1887 }
1888 1888 }
1889 1889 } else {
1890 1890 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1891 1891 const char *n = index_node_existing(self, rev);
1892 1892 if (n == NULL)
1893 1893 return -3;
1894 1894 if (nt_insert(&self->nt, n, rev) == -1) {
1895 1895 self->ntrev = rev + 1;
1896 1896 return -3;
1897 1897 }
1898 1898 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1899 1899 break;
1900 1900 }
1901 1901 }
1902 1902 self->ntrev = rev;
1903 1903 }
1904 1904
1905 1905 if (rev >= 0)
1906 1906 return rev;
1907 1907 return -2;
1908 1908 }
1909 1909
1910 1910 static PyObject *index_getitem(indexObject *self, PyObject *value)
1911 1911 {
1912 1912 char *node;
1913 1913 int rev;
1914 1914
1915 1915 if (PyInt_Check(value)) {
1916 1916 long idx;
1917 1917 if (!pylong_to_long(value, &idx)) {
1918 1918 return NULL;
1919 1919 }
1920 1920 return index_get(self, idx);
1921 1921 }
1922 1922
1923 1923 if (node_check(value, &node) == -1)
1924 1924 return NULL;
1925 1925 rev = index_find_node(self, node, 20);
1926 1926 if (rev >= -1)
1927 1927 return PyInt_FromLong(rev);
1928 1928 if (rev == -2)
1929 1929 raise_revlog_error();
1930 1930 return NULL;
1931 1931 }
1932 1932
1933 1933 /*
1934 1934 * Fully populate the radix tree.
1935 1935 */
1936 1936 static int index_populate_nt(indexObject *self)
1937 1937 {
1938 1938 int rev;
1939 1939 if (self->ntrev > 0) {
1940 1940 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1941 1941 const char *n = index_node_existing(self, rev);
1942 1942 if (n == NULL)
1943 1943 return -1;
1944 1944 if (nt_insert(&self->nt, n, rev) == -1)
1945 1945 return -1;
1946 1946 }
1947 1947 self->ntrev = -1;
1948 1948 }
1949 1949 return 0;
1950 1950 }
1951 1951
1952 1952 static PyObject *index_partialmatch(indexObject *self, PyObject *args)
1953 1953 {
1954 1954 const char *fullnode;
1955 1955 Py_ssize_t nodelen;
1956 1956 char *node;
1957 1957 int rev, i;
1958 1958
1959 1959 if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &node, &nodelen))
1960 1960 return NULL;
1961 1961
1962 1962 if (nodelen < 1) {
1963 1963 PyErr_SetString(PyExc_ValueError, "key too short");
1964 1964 return NULL;
1965 1965 }
1966 1966
1967 1967 if (nodelen > 40) {
1968 1968 PyErr_SetString(PyExc_ValueError, "key too long");
1969 1969 return NULL;
1970 1970 }
1971 1971
1972 1972 for (i = 0; i < nodelen; i++)
1973 1973 hexdigit(node, i);
1974 1974 if (PyErr_Occurred()) {
1975 1975 /* input contains non-hex characters */
1976 1976 PyErr_Clear();
1977 1977 Py_RETURN_NONE;
1978 1978 }
1979 1979
1980 1980 if (index_init_nt(self) == -1)
1981 1981 return NULL;
1982 1982 if (index_populate_nt(self) == -1)
1983 1983 return NULL;
1984 1984 rev = nt_partialmatch(&self->nt, node, nodelen);
1985 1985
1986 1986 switch (rev) {
1987 1987 case -4:
1988 1988 raise_revlog_error();
1989 1989 return NULL;
1990 1990 case -2:
1991 1991 Py_RETURN_NONE;
1992 1992 case -1:
1993 1993 return PyBytes_FromStringAndSize(nullid, 20);
1994 1994 }
1995 1995
1996 1996 fullnode = index_node_existing(self, rev);
1997 1997 if (fullnode == NULL) {
1998 1998 return NULL;
1999 1999 }
2000 2000 return PyBytes_FromStringAndSize(fullnode, 20);
2001 2001 }
2002 2002
2003 2003 static PyObject *index_shortest(indexObject *self, PyObject *args)
2004 2004 {
2005 2005 PyObject *val;
2006 2006 char *node;
2007 2007 int length;
2008 2008
2009 2009 if (!PyArg_ParseTuple(args, "O", &val))
2010 2010 return NULL;
2011 2011 if (node_check(val, &node) == -1)
2012 2012 return NULL;
2013 2013
2014 2014 self->ntlookups++;
2015 2015 if (index_init_nt(self) == -1)
2016 2016 return NULL;
2017 2017 if (index_populate_nt(self) == -1)
2018 2018 return NULL;
2019 2019 length = nt_shortest(&self->nt, node);
2020 2020 if (length == -3)
2021 2021 return NULL;
2022 2022 if (length == -2) {
2023 2023 raise_revlog_error();
2024 2024 return NULL;
2025 2025 }
2026 2026 return PyInt_FromLong(length);
2027 2027 }
2028 2028
2029 2029 static PyObject *index_m_get(indexObject *self, PyObject *args)
2030 2030 {
2031 2031 PyObject *val;
2032 2032 char *node;
2033 2033 int rev;
2034 2034
2035 2035 if (!PyArg_ParseTuple(args, "O", &val))
2036 2036 return NULL;
2037 2037 if (node_check(val, &node) == -1)
2038 2038 return NULL;
2039 2039 rev = index_find_node(self, node, 20);
2040 2040 if (rev == -3)
2041 2041 return NULL;
2042 2042 if (rev == -2)
2043 2043 Py_RETURN_NONE;
2044 2044 return PyInt_FromLong(rev);
2045 2045 }
2046 2046
2047 2047 static int index_contains(indexObject *self, PyObject *value)
2048 2048 {
2049 2049 char *node;
2050 2050
2051 2051 if (PyInt_Check(value)) {
2052 2052 long rev;
2053 2053 if (!pylong_to_long(value, &rev)) {
2054 2054 return -1;
2055 2055 }
2056 2056 return rev >= -1 && rev < index_length(self);
2057 2057 }
2058 2058
2059 2059 if (node_check(value, &node) == -1)
2060 2060 return -1;
2061 2061
2062 2062 switch (index_find_node(self, node, 20)) {
2063 2063 case -3:
2064 2064 return -1;
2065 2065 case -2:
2066 2066 return 0;
2067 2067 default:
2068 2068 return 1;
2069 2069 }
2070 2070 }
2071 2071
2072 2072 static PyObject *index_m_has_node(indexObject *self, PyObject *args)
2073 2073 {
2074 2074 int ret = index_contains(self, args);
2075 2075 if (ret < 0)
2076 2076 return NULL;
2077 2077 return PyBool_FromLong((long)ret);
2078 2078 }
2079 2079
2080 2080 static PyObject *index_m_rev(indexObject *self, PyObject *val)
2081 2081 {
2082 2082 char *node;
2083 2083 int rev;
2084 2084
2085 2085 if (node_check(val, &node) == -1)
2086 2086 return NULL;
2087 2087 rev = index_find_node(self, node, 20);
2088 2088 if (rev >= -1)
2089 2089 return PyInt_FromLong(rev);
2090 2090 if (rev == -2)
2091 2091 raise_revlog_error();
2092 2092 return NULL;
2093 2093 }
2094 2094
2095 2095 typedef uint64_t bitmask;
2096 2096
2097 2097 /*
2098 2098 * Given a disjoint set of revs, return all candidates for the
2099 2099 * greatest common ancestor. In revset notation, this is the set
2100 2100 * "heads(::a and ::b and ...)"
2101 2101 */
2102 2102 static PyObject *find_gca_candidates(indexObject *self, const int *revs,
2103 2103 int revcount)
2104 2104 {
2105 2105 const bitmask allseen = (1ull << revcount) - 1;
2106 2106 const bitmask poison = 1ull << revcount;
2107 2107 PyObject *gca = PyList_New(0);
2108 2108 int i, v, interesting;
2109 2109 int maxrev = -1;
2110 2110 bitmask sp;
2111 2111 bitmask *seen;
2112 2112
2113 2113 if (gca == NULL)
2114 2114 return PyErr_NoMemory();
2115 2115
2116 2116 for (i = 0; i < revcount; i++) {
2117 2117 if (revs[i] > maxrev)
2118 2118 maxrev = revs[i];
2119 2119 }
2120 2120
2121 2121 seen = calloc(sizeof(*seen), maxrev + 1);
2122 2122 if (seen == NULL) {
2123 2123 Py_DECREF(gca);
2124 2124 return PyErr_NoMemory();
2125 2125 }
2126 2126
2127 2127 for (i = 0; i < revcount; i++)
2128 2128 seen[revs[i]] = 1ull << i;
2129 2129
2130 2130 interesting = revcount;
2131 2131
2132 2132 for (v = maxrev; v >= 0 && interesting; v--) {
2133 2133 bitmask sv = seen[v];
2134 2134 int parents[2];
2135 2135
2136 2136 if (!sv)
2137 2137 continue;
2138 2138
2139 2139 if (sv < poison) {
2140 2140 interesting -= 1;
2141 2141 if (sv == allseen) {
2142 2142 PyObject *obj = PyInt_FromLong(v);
2143 2143 if (obj == NULL)
2144 2144 goto bail;
2145 2145 if (PyList_Append(gca, obj) == -1) {
2146 2146 Py_DECREF(obj);
2147 2147 goto bail;
2148 2148 }
2149 2149 sv |= poison;
2150 2150 for (i = 0; i < revcount; i++) {
2151 2151 if (revs[i] == v)
2152 2152 goto done;
2153 2153 }
2154 2154 }
2155 2155 }
2156 2156 if (index_get_parents(self, v, parents, maxrev) < 0)
2157 2157 goto bail;
2158 2158
2159 2159 for (i = 0; i < 2; i++) {
2160 2160 int p = parents[i];
2161 2161 if (p == -1)
2162 2162 continue;
2163 2163 sp = seen[p];
2164 2164 if (sv < poison) {
2165 2165 if (sp == 0) {
2166 2166 seen[p] = sv;
2167 2167 interesting++;
2168 2168 } else if (sp != sv)
2169 2169 seen[p] |= sv;
2170 2170 } else {
2171 2171 if (sp && sp < poison)
2172 2172 interesting--;
2173 2173 seen[p] = sv;
2174 2174 }
2175 2175 }
2176 2176 }
2177 2177
2178 2178 done:
2179 2179 free(seen);
2180 2180 return gca;
2181 2181 bail:
2182 2182 free(seen);
2183 2183 Py_XDECREF(gca);
2184 2184 return NULL;
2185 2185 }
2186 2186
2187 2187 /*
2188 2188 * Given a disjoint set of revs, return the subset with the longest
2189 2189 * path to the root.
2190 2190 */
2191 2191 static PyObject *find_deepest(indexObject *self, PyObject *revs)
2192 2192 {
2193 2193 const Py_ssize_t revcount = PyList_GET_SIZE(revs);
2194 2194 static const Py_ssize_t capacity = 24;
2195 2195 int *depth, *interesting = NULL;
2196 2196 int i, j, v, ninteresting;
2197 2197 PyObject *dict = NULL, *keys = NULL;
2198 2198 long *seen = NULL;
2199 2199 int maxrev = -1;
2200 2200 long final;
2201 2201
2202 2202 if (revcount > capacity) {
2203 2203 PyErr_Format(PyExc_OverflowError,
2204 2204 "bitset size (%ld) > capacity (%ld)",
2205 2205 (long)revcount, (long)capacity);
2206 2206 return NULL;
2207 2207 }
2208 2208
2209 2209 for (i = 0; i < revcount; i++) {
2210 2210 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
2211 2211 if (n > maxrev)
2212 2212 maxrev = n;
2213 2213 }
2214 2214
2215 2215 depth = calloc(sizeof(*depth), maxrev + 1);
2216 2216 if (depth == NULL)
2217 2217 return PyErr_NoMemory();
2218 2218
2219 2219 seen = calloc(sizeof(*seen), maxrev + 1);
2220 2220 if (seen == NULL) {
2221 2221 PyErr_NoMemory();
2222 2222 goto bail;
2223 2223 }
2224 2224
2225 2225 interesting = calloc(sizeof(*interesting), ((size_t)1) << revcount);
2226 2226 if (interesting == NULL) {
2227 2227 PyErr_NoMemory();
2228 2228 goto bail;
2229 2229 }
2230 2230
2231 2231 if (PyList_Sort(revs) == -1)
2232 2232 goto bail;
2233 2233
2234 2234 for (i = 0; i < revcount; i++) {
2235 2235 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
2236 2236 long b = 1l << i;
2237 2237 depth[n] = 1;
2238 2238 seen[n] = b;
2239 2239 interesting[b] = 1;
2240 2240 }
2241 2241
2242 2242 /* invariant: ninteresting is the number of non-zero entries in
2243 2243 * interesting. */
2244 2244 ninteresting = (int)revcount;
2245 2245
2246 2246 for (v = maxrev; v >= 0 && ninteresting > 1; v--) {
2247 2247 int dv = depth[v];
2248 2248 int parents[2];
2249 2249 long sv;
2250 2250
2251 2251 if (dv == 0)
2252 2252 continue;
2253 2253
2254 2254 sv = seen[v];
2255 2255 if (index_get_parents(self, v, parents, maxrev) < 0)
2256 2256 goto bail;
2257 2257
2258 2258 for (i = 0; i < 2; i++) {
2259 2259 int p = parents[i];
2260 2260 long sp;
2261 2261 int dp;
2262 2262
2263 2263 if (p == -1)
2264 2264 continue;
2265 2265
2266 2266 dp = depth[p];
2267 2267 sp = seen[p];
2268 2268 if (dp <= dv) {
2269 2269 depth[p] = dv + 1;
2270 2270 if (sp != sv) {
2271 2271 interesting[sv] += 1;
2272 2272 seen[p] = sv;
2273 2273 if (sp) {
2274 2274 interesting[sp] -= 1;
2275 2275 if (interesting[sp] == 0)
2276 2276 ninteresting -= 1;
2277 2277 }
2278 2278 }
2279 2279 } else if (dv == dp - 1) {
2280 2280 long nsp = sp | sv;
2281 2281 if (nsp == sp)
2282 2282 continue;
2283 2283 seen[p] = nsp;
2284 2284 interesting[sp] -= 1;
2285 2285 if (interesting[sp] == 0)
2286 2286 ninteresting -= 1;
2287 2287 if (interesting[nsp] == 0)
2288 2288 ninteresting += 1;
2289 2289 interesting[nsp] += 1;
2290 2290 }
2291 2291 }
2292 2292 interesting[sv] -= 1;
2293 2293 if (interesting[sv] == 0)
2294 2294 ninteresting -= 1;
2295 2295 }
2296 2296
2297 2297 final = 0;
2298 2298 j = ninteresting;
2299 2299 for (i = 0; i < (int)(2 << revcount) && j > 0; i++) {
2300 2300 if (interesting[i] == 0)
2301 2301 continue;
2302 2302 final |= i;
2303 2303 j -= 1;
2304 2304 }
2305 2305 if (final == 0) {
2306 2306 keys = PyList_New(0);
2307 2307 goto bail;
2308 2308 }
2309 2309
2310 2310 dict = PyDict_New();
2311 2311 if (dict == NULL)
2312 2312 goto bail;
2313 2313
2314 2314 for (i = 0; i < revcount; i++) {
2315 2315 PyObject *key;
2316 2316
2317 2317 if ((final & (1 << i)) == 0)
2318 2318 continue;
2319 2319
2320 2320 key = PyList_GET_ITEM(revs, i);
2321 2321 Py_INCREF(key);
2322 2322 Py_INCREF(Py_None);
2323 2323 if (PyDict_SetItem(dict, key, Py_None) == -1) {
2324 2324 Py_DECREF(key);
2325 2325 Py_DECREF(Py_None);
2326 2326 goto bail;
2327 2327 }
2328 2328 }
2329 2329
2330 2330 keys = PyDict_Keys(dict);
2331 2331
2332 2332 bail:
2333 2333 free(depth);
2334 2334 free(seen);
2335 2335 free(interesting);
2336 2336 Py_XDECREF(dict);
2337 2337
2338 2338 return keys;
2339 2339 }
2340 2340
2341 2341 /*
2342 2342 * Given a (possibly overlapping) set of revs, return all the
2343 2343 * common ancestors heads: heads(::args[0] and ::a[1] and ...)
2344 2344 */
2345 2345 static PyObject *index_commonancestorsheads(indexObject *self, PyObject *args)
2346 2346 {
2347 2347 PyObject *ret = NULL;
2348 2348 Py_ssize_t argcount, i, len;
2349 2349 bitmask repeat = 0;
2350 2350 int revcount = 0;
2351 2351 int *revs;
2352 2352
2353 2353 argcount = PySequence_Length(args);
2354 2354 revs = PyMem_Malloc(argcount * sizeof(*revs));
2355 2355 if (argcount > 0 && revs == NULL)
2356 2356 return PyErr_NoMemory();
2357 2357 len = index_length(self);
2358 2358
2359 2359 for (i = 0; i < argcount; i++) {
2360 2360 static const int capacity = 24;
2361 2361 PyObject *obj = PySequence_GetItem(args, i);
2362 2362 bitmask x;
2363 2363 long val;
2364 2364
2365 2365 if (!PyInt_Check(obj)) {
2366 2366 PyErr_SetString(PyExc_TypeError,
2367 2367 "arguments must all be ints");
2368 2368 Py_DECREF(obj);
2369 2369 goto bail;
2370 2370 }
2371 2371 val = PyInt_AsLong(obj);
2372 2372 Py_DECREF(obj);
2373 2373 if (val == -1) {
2374 2374 ret = PyList_New(0);
2375 2375 goto done;
2376 2376 }
2377 2377 if (val < 0 || val >= len) {
2378 2378 PyErr_SetString(PyExc_IndexError, "index out of range");
2379 2379 goto bail;
2380 2380 }
2381 2381 /* this cheesy bloom filter lets us avoid some more
2382 2382 * expensive duplicate checks in the common set-is-disjoint
2383 2383 * case */
2384 2384 x = 1ull << (val & 0x3f);
2385 2385 if (repeat & x) {
2386 2386 int k;
2387 2387 for (k = 0; k < revcount; k++) {
2388 2388 if (val == revs[k])
2389 2389 goto duplicate;
2390 2390 }
2391 2391 } else
2392 2392 repeat |= x;
2393 2393 if (revcount >= capacity) {
2394 2394 PyErr_Format(PyExc_OverflowError,
2395 2395 "bitset size (%d) > capacity (%d)",
2396 2396 revcount, capacity);
2397 2397 goto bail;
2398 2398 }
2399 2399 revs[revcount++] = (int)val;
2400 2400 duplicate:;
2401 2401 }
2402 2402
2403 2403 if (revcount == 0) {
2404 2404 ret = PyList_New(0);
2405 2405 goto done;
2406 2406 }
2407 2407 if (revcount == 1) {
2408 2408 PyObject *obj;
2409 2409 ret = PyList_New(1);
2410 2410 if (ret == NULL)
2411 2411 goto bail;
2412 2412 obj = PyInt_FromLong(revs[0]);
2413 2413 if (obj == NULL)
2414 2414 goto bail;
2415 2415 PyList_SET_ITEM(ret, 0, obj);
2416 2416 goto done;
2417 2417 }
2418 2418
2419 2419 ret = find_gca_candidates(self, revs, revcount);
2420 2420 if (ret == NULL)
2421 2421 goto bail;
2422 2422
2423 2423 done:
2424 2424 PyMem_Free(revs);
2425 2425 return ret;
2426 2426
2427 2427 bail:
2428 2428 PyMem_Free(revs);
2429 2429 Py_XDECREF(ret);
2430 2430 return NULL;
2431 2431 }
2432 2432
2433 2433 /*
2434 2434 * Given a (possibly overlapping) set of revs, return the greatest
2435 2435 * common ancestors: those with the longest path to the root.
2436 2436 */
2437 2437 static PyObject *index_ancestors(indexObject *self, PyObject *args)
2438 2438 {
2439 2439 PyObject *ret;
2440 2440 PyObject *gca = index_commonancestorsheads(self, args);
2441 2441 if (gca == NULL)
2442 2442 return NULL;
2443 2443
2444 2444 if (PyList_GET_SIZE(gca) <= 1) {
2445 2445 return gca;
2446 2446 }
2447 2447
2448 2448 ret = find_deepest(self, gca);
2449 2449 Py_DECREF(gca);
2450 2450 return ret;
2451 2451 }
2452 2452
2453 2453 /*
2454 2454 * Invalidate any trie entries introduced by added revs.
2455 2455 */
2456 2456 static void index_invalidate_added(indexObject *self, Py_ssize_t start)
2457 2457 {
2458 2458 Py_ssize_t i, len = PyList_GET_SIZE(self->added);
2459 2459
2460 2460 for (i = start; i < len; i++) {
2461 2461 PyObject *tuple = PyList_GET_ITEM(self->added, i);
2462 2462 PyObject *node = PyTuple_GET_ITEM(tuple, 7);
2463 2463
2464 2464 nt_delete_node(&self->nt, PyBytes_AS_STRING(node));
2465 2465 }
2466 2466
2467 2467 if (start == 0)
2468 2468 Py_CLEAR(self->added);
2469 2469 }
2470 2470
2471 2471 /*
2472 2472 * Delete a numeric range of revs, which must be at the end of the
2473 2473 * range.
2474 2474 */
2475 2475 static int index_slice_del(indexObject *self, PyObject *item)
2476 2476 {
2477 2477 Py_ssize_t start, stop, step, slicelength;
2478 2478 Py_ssize_t length = index_length(self) + 1;
2479 2479 int ret = 0;
2480 2480
2481 2481 /* Argument changed from PySliceObject* to PyObject* in Python 3. */
2482 2482 #ifdef IS_PY3K
2483 2483 if (PySlice_GetIndicesEx(item, length, &start, &stop, &step,
2484 2484 &slicelength) < 0)
2485 2485 #else
2486 2486 if (PySlice_GetIndicesEx((PySliceObject *)item, length, &start, &stop,
2487 2487 &step, &slicelength) < 0)
2488 2488 #endif
2489 2489 return -1;
2490 2490
2491 2491 if (slicelength <= 0)
2492 2492 return 0;
2493 2493
2494 2494 if ((step < 0 && start < stop) || (step > 0 && start > stop))
2495 2495 stop = start;
2496 2496
2497 2497 if (step < 0) {
2498 2498 stop = start + 1;
2499 2499 start = stop + step * (slicelength - 1) - 1;
2500 2500 step = -step;
2501 2501 }
2502 2502
2503 2503 if (step != 1) {
2504 2504 PyErr_SetString(PyExc_ValueError,
2505 2505 "revlog index delete requires step size of 1");
2506 2506 return -1;
2507 2507 }
2508 2508
2509 2509 if (stop != length - 1) {
2510 2510 PyErr_SetString(PyExc_IndexError,
2511 2511 "revlog index deletion indices are invalid");
2512 2512 return -1;
2513 2513 }
2514 2514
2515 2515 if (start < self->length) {
2516 2516 if (self->ntinitialized) {
2517 2517 Py_ssize_t i;
2518 2518
2519 2519 for (i = start; i < self->length; i++) {
2520 2520 const char *node = index_node_existing(self, i);
2521 2521 if (node == NULL)
2522 2522 return -1;
2523 2523
2524 2524 nt_delete_node(&self->nt, node);
2525 2525 }
2526 2526 if (self->added)
2527 2527 index_invalidate_added(self, 0);
2528 2528 if (self->ntrev > start)
2529 2529 self->ntrev = (int)start;
2530 2530 } else if (self->added) {
2531 2531 Py_CLEAR(self->added);
2532 2532 }
2533 2533
2534 2534 self->length = start;
2535 2535 if (start < self->raw_length) {
2536 2536 if (self->cache) {
2537 2537 Py_ssize_t i;
2538 2538 for (i = start; i < self->raw_length; i++)
2539 2539 Py_CLEAR(self->cache[i]);
2540 2540 }
2541 2541 self->raw_length = start;
2542 2542 }
2543 2543 goto done;
2544 2544 }
2545 2545
2546 2546 if (self->ntinitialized) {
2547 2547 index_invalidate_added(self, start - self->length);
2548 2548 if (self->ntrev > start)
2549 2549 self->ntrev = (int)start;
2550 2550 }
2551 2551 if (self->added)
2552 2552 ret = PyList_SetSlice(self->added, start - self->length,
2553 2553 PyList_GET_SIZE(self->added), NULL);
2554 2554 done:
2555 2555 Py_CLEAR(self->headrevs);
2556 2556 return ret;
2557 2557 }
2558 2558
2559 2559 /*
2560 2560 * Supported ops:
2561 2561 *
2562 2562 * slice deletion
2563 2563 * string assignment (extend node->rev mapping)
2564 2564 * string deletion (shrink node->rev mapping)
2565 2565 */
2566 2566 static int index_assign_subscript(indexObject *self, PyObject *item,
2567 2567 PyObject *value)
2568 2568 {
2569 2569 char *node;
2570 2570 long rev;
2571 2571
2572 2572 if (PySlice_Check(item) && value == NULL)
2573 2573 return index_slice_del(self, item);
2574 2574
2575 2575 if (node_check(item, &node) == -1)
2576 2576 return -1;
2577 2577
2578 2578 if (value == NULL)
2579 2579 return self->ntinitialized ? nt_delete_node(&self->nt, node)
2580 2580 : 0;
2581 2581 rev = PyInt_AsLong(value);
2582 2582 if (rev > INT_MAX || rev < 0) {
2583 2583 if (!PyErr_Occurred())
2584 2584 PyErr_SetString(PyExc_ValueError, "rev out of range");
2585 2585 return -1;
2586 2586 }
2587 2587
2588 2588 if (index_init_nt(self) == -1)
2589 2589 return -1;
2590 2590 return nt_insert(&self->nt, node, (int)rev);
2591 2591 }
2592 2592
2593 2593 /*
2594 2594 * Find all RevlogNG entries in an index that has inline data. Update
2595 2595 * the optional "offsets" table with those entries.
2596 2596 */
2597 2597 static Py_ssize_t inline_scan(indexObject *self, const char **offsets)
2598 2598 {
2599 2599 const char *data = (const char *)self->buf.buf;
2600 2600 Py_ssize_t pos = 0;
2601 2601 Py_ssize_t end = self->buf.len;
2602 2602 long incr = v1_hdrsize;
2603 2603 Py_ssize_t len = 0;
2604 2604
2605 2605 while (pos + v1_hdrsize <= end && pos >= 0) {
2606 2606 uint32_t comp_len;
2607 2607 /* 3rd element of header is length of compressed inline data */
2608 2608 comp_len = getbe32(data + pos + 8);
2609 2609 incr = v1_hdrsize + comp_len;
2610 2610 if (offsets)
2611 2611 offsets[len] = data + pos;
2612 2612 len++;
2613 2613 pos += incr;
2614 2614 }
2615 2615
2616 2616 if (pos != end) {
2617 2617 if (!PyErr_Occurred())
2618 2618 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2619 2619 return -1;
2620 2620 }
2621 2621
2622 2622 return len;
2623 2623 }
2624 2624
2625 2625 static int index_init(indexObject *self, PyObject *args)
2626 2626 {
2627 2627 PyObject *data_obj, *inlined_obj;
2628 2628 Py_ssize_t size;
2629 2629
2630 2630 /* Initialize before argument-checking to avoid index_dealloc() crash.
2631 2631 */
2632 2632 self->raw_length = 0;
2633 2633 self->added = NULL;
2634 2634 self->cache = NULL;
2635 2635 self->data = NULL;
2636 2636 memset(&self->buf, 0, sizeof(self->buf));
2637 2637 self->headrevs = NULL;
2638 2638 self->filteredrevs = Py_None;
2639 2639 Py_INCREF(Py_None);
2640 2640 self->ntinitialized = 0;
2641 2641 self->offsets = NULL;
2642 2642
2643 2643 if (!PyArg_ParseTuple(args, "OO", &data_obj, &inlined_obj))
2644 2644 return -1;
2645 2645 if (!PyObject_CheckBuffer(data_obj)) {
2646 2646 PyErr_SetString(PyExc_TypeError,
2647 2647 "data does not support buffer interface");
2648 2648 return -1;
2649 2649 }
2650 2650
2651 2651 if (PyObject_GetBuffer(data_obj, &self->buf, PyBUF_SIMPLE) == -1)
2652 2652 return -1;
2653 2653 size = self->buf.len;
2654 2654
2655 2655 self->inlined = inlined_obj && PyObject_IsTrue(inlined_obj);
2656 2656 self->data = data_obj;
2657 2657
2658 2658 self->ntlookups = self->ntmisses = 0;
2659 2659 self->ntrev = -1;
2660 2660 Py_INCREF(self->data);
2661 2661
2662 2662 if (self->inlined) {
2663 2663 Py_ssize_t len = inline_scan(self, NULL);
2664 2664 if (len == -1)
2665 2665 goto bail;
2666 2666 self->raw_length = len;
2667 2667 self->length = len;
2668 2668 } else {
2669 2669 if (size % v1_hdrsize) {
2670 2670 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2671 2671 goto bail;
2672 2672 }
2673 2673 self->raw_length = size / v1_hdrsize;
2674 2674 self->length = self->raw_length;
2675 2675 }
2676 2676
2677 2677 return 0;
2678 2678 bail:
2679 2679 return -1;
2680 2680 }
2681 2681
2682 2682 static PyObject *index_nodemap(indexObject *self)
2683 2683 {
2684 2684 Py_INCREF(self);
2685 2685 return (PyObject *)self;
2686 2686 }
2687 2687
2688 2688 static void _index_clearcaches(indexObject *self)
2689 2689 {
2690 2690 if (self->cache) {
2691 2691 Py_ssize_t i;
2692 2692
2693 2693 for (i = 0; i < self->raw_length; i++)
2694 2694 Py_CLEAR(self->cache[i]);
2695 2695 free(self->cache);
2696 2696 self->cache = NULL;
2697 2697 }
2698 2698 if (self->offsets) {
2699 2699 PyMem_Free((void *)self->offsets);
2700 2700 self->offsets = NULL;
2701 2701 }
2702 2702 if (self->ntinitialized) {
2703 2703 nt_dealloc(&self->nt);
2704 2704 }
2705 2705 self->ntinitialized = 0;
2706 2706 Py_CLEAR(self->headrevs);
2707 2707 }
2708 2708
2709 2709 static PyObject *index_clearcaches(indexObject *self)
2710 2710 {
2711 2711 _index_clearcaches(self);
2712 2712 self->ntrev = -1;
2713 2713 self->ntlookups = self->ntmisses = 0;
2714 2714 Py_RETURN_NONE;
2715 2715 }
2716 2716
2717 2717 static void index_dealloc(indexObject *self)
2718 2718 {
2719 2719 _index_clearcaches(self);
2720 2720 Py_XDECREF(self->filteredrevs);
2721 2721 if (self->buf.buf) {
2722 2722 PyBuffer_Release(&self->buf);
2723 2723 memset(&self->buf, 0, sizeof(self->buf));
2724 2724 }
2725 2725 Py_XDECREF(self->data);
2726 2726 Py_XDECREF(self->added);
2727 2727 PyObject_Del(self);
2728 2728 }
2729 2729
2730 2730 static PySequenceMethods index_sequence_methods = {
2731 2731 (lenfunc)index_length, /* sq_length */
2732 2732 0, /* sq_concat */
2733 2733 0, /* sq_repeat */
2734 2734 (ssizeargfunc)index_get, /* sq_item */
2735 2735 0, /* sq_slice */
2736 2736 0, /* sq_ass_item */
2737 2737 0, /* sq_ass_slice */
2738 2738 (objobjproc)index_contains, /* sq_contains */
2739 2739 };
2740 2740
2741 2741 static PyMappingMethods index_mapping_methods = {
2742 2742 (lenfunc)index_length, /* mp_length */
2743 2743 (binaryfunc)index_getitem, /* mp_subscript */
2744 2744 (objobjargproc)index_assign_subscript, /* mp_ass_subscript */
2745 2745 };
2746 2746
2747 2747 static PyMethodDef index_methods[] = {
2748 2748 {"ancestors", (PyCFunction)index_ancestors, METH_VARARGS,
2749 2749 "return the gca set of the given revs"},
2750 2750 {"commonancestorsheads", (PyCFunction)index_commonancestorsheads,
2751 2751 METH_VARARGS,
2752 2752 "return the heads of the common ancestors of the given revs"},
2753 2753 {"clearcaches", (PyCFunction)index_clearcaches, METH_NOARGS,
2754 2754 "clear the index caches"},
2755 2755 {"get", (PyCFunction)index_m_get, METH_VARARGS, "get an index entry"},
2756 2756 {"get_rev", (PyCFunction)index_m_get, METH_VARARGS,
2757 2757 "return `rev` associated with a node or None"},
2758 2758 {"has_node", (PyCFunction)index_m_has_node, METH_O,
2759 2759 "return True if the node exist in the index"},
2760 2760 {"rev", (PyCFunction)index_m_rev, METH_O,
2761 2761 "return `rev` associated with a node or raise RevlogError"},
2762 2762 {"computephasesmapsets", (PyCFunction)compute_phases_map_sets, METH_VARARGS,
2763 2763 "compute phases"},
2764 2764 {"reachableroots2", (PyCFunction)reachableroots2, METH_VARARGS,
2765 2765 "reachableroots"},
2766 2766 {"headrevs", (PyCFunction)index_headrevs, METH_VARARGS,
2767 2767 "get head revisions"}, /* Can do filtering since 3.2 */
2768 2768 {"headrevsfiltered", (PyCFunction)index_headrevs, METH_VARARGS,
2769 2769 "get filtered head revisions"}, /* Can always do filtering */
2770 2770 {"issnapshot", (PyCFunction)index_issnapshot, METH_O,
2771 2771 "True if the object is a snapshot"},
2772 2772 {"findsnapshots", (PyCFunction)index_findsnapshots, METH_VARARGS,
2773 2773 "Gather snapshot data in a cache dict"},
2774 2774 {"deltachain", (PyCFunction)index_deltachain, METH_VARARGS,
2775 2775 "determine revisions with deltas to reconstruct fulltext"},
2776 2776 {"slicechunktodensity", (PyCFunction)index_slicechunktodensity,
2777 2777 METH_VARARGS, "determine revisions with deltas to reconstruct fulltext"},
2778 2778 {"append", (PyCFunction)index_append, METH_O, "append an index entry"},
2779 2779 {"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS,
2780 2780 "match a potentially ambiguous node ID"},
2781 2781 {"shortest", (PyCFunction)index_shortest, METH_VARARGS,
2782 2782 "find length of shortest hex nodeid of a binary ID"},
2783 2783 {"stats", (PyCFunction)index_stats, METH_NOARGS, "stats for the index"},
2784 2784 {NULL} /* Sentinel */
2785 2785 };
2786 2786
2787 2787 static PyGetSetDef index_getset[] = {
2788 2788 {"nodemap", (getter)index_nodemap, NULL, "nodemap", NULL},
2789 2789 {NULL} /* Sentinel */
2790 2790 };
2791 2791
2792 2792 PyTypeObject HgRevlogIndex_Type = {
2793 2793 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2794 2794 "parsers.index", /* tp_name */
2795 2795 sizeof(indexObject), /* tp_basicsize */
2796 2796 0, /* tp_itemsize */
2797 2797 (destructor)index_dealloc, /* tp_dealloc */
2798 2798 0, /* tp_print */
2799 2799 0, /* tp_getattr */
2800 2800 0, /* tp_setattr */
2801 2801 0, /* tp_compare */
2802 2802 0, /* tp_repr */
2803 2803 0, /* tp_as_number */
2804 2804 &index_sequence_methods, /* tp_as_sequence */
2805 2805 &index_mapping_methods, /* tp_as_mapping */
2806 2806 0, /* tp_hash */
2807 2807 0, /* tp_call */
2808 2808 0, /* tp_str */
2809 2809 0, /* tp_getattro */
2810 2810 0, /* tp_setattro */
2811 2811 0, /* tp_as_buffer */
2812 2812 Py_TPFLAGS_DEFAULT, /* tp_flags */
2813 2813 "revlog index", /* tp_doc */
2814 2814 0, /* tp_traverse */
2815 2815 0, /* tp_clear */
2816 2816 0, /* tp_richcompare */
2817 2817 0, /* tp_weaklistoffset */
2818 2818 0, /* tp_iter */
2819 2819 0, /* tp_iternext */
2820 2820 index_methods, /* tp_methods */
2821 2821 0, /* tp_members */
2822 2822 index_getset, /* tp_getset */
2823 2823 0, /* tp_base */
2824 2824 0, /* tp_dict */
2825 2825 0, /* tp_descr_get */
2826 2826 0, /* tp_descr_set */
2827 2827 0, /* tp_dictoffset */
2828 2828 (initproc)index_init, /* tp_init */
2829 2829 0, /* tp_alloc */
2830 2830 };
2831 2831
2832 2832 /*
2833 2833 * returns a tuple of the form (index, index, cache) with elements as
2834 2834 * follows:
2835 2835 *
2836 2836 * index: an index object that lazily parses RevlogNG records
2837 2837 * cache: if data is inlined, a tuple (0, index_file_content), else None
2838 2838 * index_file_content could be a string, or a buffer
2839 2839 *
2840 2840 * added complications are for backwards compatibility
2841 2841 */
2842 2842 PyObject *parse_index2(PyObject *self, PyObject *args)
2843 2843 {
2844 2844 PyObject *tuple = NULL, *cache = NULL;
2845 2845 indexObject *idx;
2846 2846 int ret;
2847 2847
2848 2848 idx = PyObject_New(indexObject, &HgRevlogIndex_Type);
2849 2849 if (idx == NULL)
2850 2850 goto bail;
2851 2851
2852 2852 ret = index_init(idx, args);
2853 2853 if (ret == -1)
2854 2854 goto bail;
2855 2855
2856 2856 if (idx->inlined) {
2857 2857 cache = Py_BuildValue("iO", 0, idx->data);
2858 2858 if (cache == NULL)
2859 2859 goto bail;
2860 2860 } else {
2861 2861 cache = Py_None;
2862 2862 Py_INCREF(cache);
2863 2863 }
2864 2864
2865 2865 tuple = Py_BuildValue("NN", idx, cache);
2866 2866 if (!tuple)
2867 2867 goto bail;
2868 2868 return tuple;
2869 2869
2870 2870 bail:
2871 2871 Py_XDECREF(idx);
2872 2872 Py_XDECREF(cache);
2873 2873 Py_XDECREF(tuple);
2874 2874 return NULL;
2875 2875 }
2876 2876
2877 2877 #ifdef WITH_RUST
2878 2878
2879 2879 /* rustlazyancestors: iteration over ancestors implemented in Rust
2880 2880 *
2881 2881 * This class holds a reference to an index and to the Rust iterator.
2882 2882 */
2883 2883 typedef struct rustlazyancestorsObjectStruct rustlazyancestorsObject;
2884 2884
2885 2885 struct rustlazyancestorsObjectStruct {
2886 2886 PyObject_HEAD
2887 2887 /* Type-specific fields go here. */
2888 2888 indexObject *index; /* Ref kept to avoid GC'ing the index */
2889 2889 void *iter; /* Rust iterator */
2890 2890 };
2891 2891
2892 2892 /* FFI exposed from Rust code */
2893 2893 rustlazyancestorsObject *rustlazyancestors_init(indexObject *index,
2894 2894 /* intrevs vector */
2895 2895 Py_ssize_t initrevslen,
2896 2896 long *initrevs, long stoprev,
2897 2897 int inclusive);
2898 2898 void rustlazyancestors_drop(rustlazyancestorsObject *self);
2899 2899 int rustlazyancestors_next(rustlazyancestorsObject *self);
2900 2900 int rustlazyancestors_contains(rustlazyancestorsObject *self, long rev);
2901 2901
2902 2902 /* CPython instance methods */
2903 2903 static int rustla_init(rustlazyancestorsObject *self, PyObject *args)
2904 2904 {
2905 2905 PyObject *initrevsarg = NULL;
2906 2906 PyObject *inclusivearg = NULL;
2907 2907 long stoprev = 0;
2908 2908 long *initrevs = NULL;
2909 2909 int inclusive = 0;
2910 2910 Py_ssize_t i;
2911 2911
2912 2912 indexObject *index;
2913 2913 if (!PyArg_ParseTuple(args, "O!O!lO!", &HgRevlogIndex_Type, &index,
2914 2914 &PyList_Type, &initrevsarg, &stoprev,
2915 2915 &PyBool_Type, &inclusivearg))
2916 2916 return -1;
2917 2917
2918 2918 Py_INCREF(index);
2919 2919 self->index = index;
2920 2920
2921 2921 if (inclusivearg == Py_True)
2922 2922 inclusive = 1;
2923 2923
2924 2924 Py_ssize_t linit = PyList_GET_SIZE(initrevsarg);
2925 2925
2926 2926 initrevs = (long *)calloc(linit, sizeof(long));
2927 2927
2928 2928 if (initrevs == NULL) {
2929 2929 PyErr_NoMemory();
2930 2930 goto bail;
2931 2931 }
2932 2932
2933 2933 for (i = 0; i < linit; i++) {
2934 2934 initrevs[i] = PyInt_AsLong(PyList_GET_ITEM(initrevsarg, i));
2935 2935 }
2936 2936 if (PyErr_Occurred())
2937 2937 goto bail;
2938 2938
2939 2939 self->iter =
2940 2940 rustlazyancestors_init(index, linit, initrevs, stoprev, inclusive);
2941 2941 if (self->iter == NULL) {
2942 2942 /* if this is because of GraphError::ParentOutOfRange
2943 2943 * HgRevlogIndex_GetParents() has already set the proper
2944 2944 * exception */
2945 2945 goto bail;
2946 2946 }
2947 2947
2948 2948 free(initrevs);
2949 2949 return 0;
2950 2950
2951 2951 bail:
2952 2952 free(initrevs);
2953 2953 return -1;
2954 2954 };
2955 2955
2956 2956 static void rustla_dealloc(rustlazyancestorsObject *self)
2957 2957 {
2958 2958 Py_XDECREF(self->index);
2959 2959 if (self->iter != NULL) { /* can happen if rustla_init failed */
2960 2960 rustlazyancestors_drop(self->iter);
2961 2961 }
2962 2962 PyObject_Del(self);
2963 2963 }
2964 2964
2965 2965 static PyObject *rustla_next(rustlazyancestorsObject *self)
2966 2966 {
2967 2967 int res = rustlazyancestors_next(self->iter);
2968 2968 if (res == -1) {
2969 2969 /* Setting an explicit exception seems unnecessary
2970 2970 * as examples from Python source code (Objects/rangeobjets.c
2971 2971 * and Modules/_io/stringio.c) seem to demonstrate.
2972 2972 */
2973 2973 return NULL;
2974 2974 }
2975 2975 return PyInt_FromLong(res);
2976 2976 }
2977 2977
2978 2978 static int rustla_contains(rustlazyancestorsObject *self, PyObject *rev)
2979 2979 {
2980 2980 long lrev;
2981 2981 if (!pylong_to_long(rev, &lrev)) {
2982 2982 PyErr_Clear();
2983 2983 return 0;
2984 2984 }
2985 2985 return rustlazyancestors_contains(self->iter, lrev);
2986 2986 }
2987 2987
2988 2988 static PySequenceMethods rustla_sequence_methods = {
2989 2989 0, /* sq_length */
2990 2990 0, /* sq_concat */
2991 2991 0, /* sq_repeat */
2992 2992 0, /* sq_item */
2993 2993 0, /* sq_slice */
2994 2994 0, /* sq_ass_item */
2995 2995 0, /* sq_ass_slice */
2996 2996 (objobjproc)rustla_contains, /* sq_contains */
2997 2997 };
2998 2998
2999 2999 static PyTypeObject rustlazyancestorsType = {
3000 3000 PyVarObject_HEAD_INIT(NULL, 0) /* header */
3001 3001 "parsers.rustlazyancestors", /* tp_name */
3002 3002 sizeof(rustlazyancestorsObject), /* tp_basicsize */
3003 3003 0, /* tp_itemsize */
3004 3004 (destructor)rustla_dealloc, /* tp_dealloc */
3005 3005 0, /* tp_print */
3006 3006 0, /* tp_getattr */
3007 3007 0, /* tp_setattr */
3008 3008 0, /* tp_compare */
3009 3009 0, /* tp_repr */
3010 3010 0, /* tp_as_number */
3011 3011 &rustla_sequence_methods, /* tp_as_sequence */
3012 3012 0, /* tp_as_mapping */
3013 3013 0, /* tp_hash */
3014 3014 0, /* tp_call */
3015 3015 0, /* tp_str */
3016 3016 0, /* tp_getattro */
3017 3017 0, /* tp_setattro */
3018 3018 0, /* tp_as_buffer */
3019 3019 Py_TPFLAGS_DEFAULT, /* tp_flags */
3020 3020 "Iterator over ancestors, implemented in Rust", /* tp_doc */
3021 3021 0, /* tp_traverse */
3022 3022 0, /* tp_clear */
3023 3023 0, /* tp_richcompare */
3024 3024 0, /* tp_weaklistoffset */
3025 3025 0, /* tp_iter */
3026 3026 (iternextfunc)rustla_next, /* tp_iternext */
3027 3027 0, /* tp_methods */
3028 3028 0, /* tp_members */
3029 3029 0, /* tp_getset */
3030 3030 0, /* tp_base */
3031 3031 0, /* tp_dict */
3032 3032 0, /* tp_descr_get */
3033 3033 0, /* tp_descr_set */
3034 3034 0, /* tp_dictoffset */
3035 3035 (initproc)rustla_init, /* tp_init */
3036 3036 0, /* tp_alloc */
3037 3037 };
3038 3038 #endif /* WITH_RUST */
3039 3039
3040 3040 static Revlog_CAPI CAPI = {
3041 3041 /* increment the abi_version field upon each change in the Revlog_CAPI
3042 3042 struct or in the ABI of the listed functions */
3043 3043 1,
3044 3044 HgRevlogIndex_GetParents,
3045 3045 };
3046 3046
3047 3047 void revlog_module_init(PyObject *mod)
3048 3048 {
3049 3049 PyObject *caps = NULL;
3050 3050 HgRevlogIndex_Type.tp_new = PyType_GenericNew;
3051 3051 if (PyType_Ready(&HgRevlogIndex_Type) < 0)
3052 3052 return;
3053 3053 Py_INCREF(&HgRevlogIndex_Type);
3054 3054 PyModule_AddObject(mod, "index", (PyObject *)&HgRevlogIndex_Type);
3055 3055
3056 3056 nodetreeType.tp_new = PyType_GenericNew;
3057 3057 if (PyType_Ready(&nodetreeType) < 0)
3058 3058 return;
3059 3059 Py_INCREF(&nodetreeType);
3060 3060 PyModule_AddObject(mod, "nodetree", (PyObject *)&nodetreeType);
3061 3061
3062 3062 if (!nullentry) {
3063 3063 nullentry =
3064 3064 Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0, 0, -1,
3065 3065 -1, -1, -1, nullid, (Py_ssize_t)20);
3066 3066 }
3067 3067 if (nullentry)
3068 3068 PyObject_GC_UnTrack(nullentry);
3069 3069
3070 3070 caps = PyCapsule_New(&CAPI, "mercurial.cext.parsers.revlog_CAPI", NULL);
3071 3071 if (caps != NULL)
3072 3072 PyModule_AddObject(mod, "revlog_CAPI", caps);
3073 3073
3074 3074 #ifdef WITH_RUST
3075 3075 rustlazyancestorsType.tp_new = PyType_GenericNew;
3076 3076 if (PyType_Ready(&rustlazyancestorsType) < 0)
3077 3077 return;
3078 3078 Py_INCREF(&rustlazyancestorsType);
3079 3079 PyModule_AddObject(mod, "rustlazyancestors",
3080 3080 (PyObject *)&rustlazyancestorsType);
3081 3081 #endif
3082 3082 }
@@ -1,19 +1,17 b''
1 1 /*
2 2 revlog.h - efficient revlog parsing
3 3
4 4 This software may be used and distributed according to the terms of
5 5 the GNU General Public License, incorporated herein by reference.
6 6 */
7 7
8 8 #ifndef _HG_REVLOG_H_
9 9 #define _HG_REVLOG_H_
10 10
11 11 #include <Python.h>
12 12
13 13 extern PyTypeObject HgRevlogIndex_Type;
14 14
15 15 #define HgRevlogIndex_Check(op) PyObject_TypeCheck(op, &HgRevlogIndex_Type)
16 16
17 int HgRevlogIndex_GetParents(PyObject *op, int rev, int *ps);
18
19 17 #endif /* _HG_REVLOG_H_ */
General Comments 0
You need to be logged in to leave comments. Login now