##// END OF EJS Templates
cext: move variable declaration to the top of the block for C89 support...
Matt Harbison -
r45070:3122058d stable
parent child Browse files
Show More
@@ -1,3085 +1,3086 b''
1 1 /*
2 2 parsers.c - efficient content parsing
3 3
4 4 Copyright 2008 Matt Mackall <mpm@selenic.com> and others
5 5
6 6 This software may be used and distributed according to the terms of
7 7 the GNU General Public License, incorporated herein by reference.
8 8 */
9 9
10 10 #define PY_SSIZE_T_CLEAN
11 11 #include <Python.h>
12 12 #include <assert.h>
13 13 #include <ctype.h>
14 14 #include <limits.h>
15 15 #include <stddef.h>
16 16 #include <stdlib.h>
17 17 #include <string.h>
18 18
19 19 #include "bitmanipulation.h"
20 20 #include "charencode.h"
21 21 #include "revlog.h"
22 22 #include "util.h"
23 23
24 24 #ifdef IS_PY3K
25 25 /* The mapping of Python types is meant to be temporary to get Python
26 26 * 3 to compile. We should remove this once Python 3 support is fully
27 27 * supported and proper types are used in the extensions themselves. */
28 28 #define PyInt_Check PyLong_Check
29 29 #define PyInt_FromLong PyLong_FromLong
30 30 #define PyInt_FromSsize_t PyLong_FromSsize_t
31 31 #define PyInt_AsLong PyLong_AsLong
32 32 #endif
33 33
34 34 typedef struct indexObjectStruct indexObject;
35 35
36 36 typedef struct {
37 37 int children[16];
38 38 } nodetreenode;
39 39
40 40 typedef struct {
41 41 int abi_version;
42 42 int (*index_parents)(PyObject *, int, int *);
43 43 } Revlog_CAPI;
44 44
45 45 /*
46 46 * A base-16 trie for fast node->rev mapping.
47 47 *
48 48 * Positive value is index of the next node in the trie
49 49 * Negative value is a leaf: -(rev + 2)
50 50 * Zero is empty
51 51 */
52 52 typedef struct {
53 53 indexObject *index;
54 54 nodetreenode *nodes;
55 55 unsigned length; /* # nodes in use */
56 56 unsigned capacity; /* # nodes allocated */
57 57 int depth; /* maximum depth of tree */
58 58 int splits; /* # splits performed */
59 59 } nodetree;
60 60
61 61 typedef struct {
62 62 PyObject_HEAD /* ; */
63 63 nodetree nt;
64 64 } nodetreeObject;
65 65
66 66 /*
67 67 * This class has two behaviors.
68 68 *
69 69 * When used in a list-like way (with integer keys), we decode an
70 70 * entry in a RevlogNG index file on demand. We have limited support for
71 71 * integer-keyed insert and delete, only at elements right before the
72 72 * end.
73 73 *
74 74 * With string keys, we lazily perform a reverse mapping from node to
75 75 * rev, using a base-16 trie.
76 76 */
77 77 struct indexObjectStruct {
78 78 PyObject_HEAD
79 79 /* Type-specific fields go here. */
80 80 PyObject *data; /* raw bytes of index */
81 81 Py_buffer buf; /* buffer of data */
82 82 PyObject **cache; /* cached tuples */
83 83 const char **offsets; /* populated on demand */
84 84 Py_ssize_t raw_length; /* original number of elements */
85 85 Py_ssize_t length; /* current number of elements */
86 86 PyObject *added; /* populated on demand */
87 87 PyObject *headrevs; /* cache, invalidated on changes */
88 88 PyObject *filteredrevs; /* filtered revs set */
89 89 nodetree nt; /* base-16 trie */
90 90 int ntinitialized; /* 0 or 1 */
91 91 int ntrev; /* last rev scanned */
92 92 int ntlookups; /* # lookups */
93 93 int ntmisses; /* # lookups that miss the cache */
94 94 int inlined;
95 95 };
96 96
97 97 static Py_ssize_t index_length(const indexObject *self)
98 98 {
99 99 if (self->added == NULL)
100 100 return self->length;
101 101 return self->length + PyList_GET_SIZE(self->added);
102 102 }
103 103
104 104 static PyObject *nullentry = NULL;
105 105 static const char nullid[20] = {0};
106 106 static const Py_ssize_t nullrev = -1;
107 107
108 108 static Py_ssize_t inline_scan(indexObject *self, const char **offsets);
109 109
110 110 #if LONG_MAX == 0x7fffffffL
111 111 static const char *const tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
112 112 #else
113 113 static const char *const tuple_format = PY23("kiiiiiis#", "kiiiiiiy#");
114 114 #endif
115 115
116 116 /* A RevlogNG v1 index entry is 64 bytes long. */
117 117 static const long v1_hdrsize = 64;
118 118
119 119 static void raise_revlog_error(void)
120 120 {
121 121 PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
122 122
123 123 mod = PyImport_ImportModule("mercurial.error");
124 124 if (mod == NULL) {
125 125 goto cleanup;
126 126 }
127 127
128 128 dict = PyModule_GetDict(mod);
129 129 if (dict == NULL) {
130 130 goto cleanup;
131 131 }
132 132 Py_INCREF(dict);
133 133
134 134 errclass = PyDict_GetItemString(dict, "RevlogError");
135 135 if (errclass == NULL) {
136 136 PyErr_SetString(PyExc_SystemError,
137 137 "could not find RevlogError");
138 138 goto cleanup;
139 139 }
140 140
141 141 /* value of exception is ignored by callers */
142 142 PyErr_SetString(errclass, "RevlogError");
143 143
144 144 cleanup:
145 145 Py_XDECREF(dict);
146 146 Py_XDECREF(mod);
147 147 }
148 148
149 149 /*
150 150 * Return a pointer to the beginning of a RevlogNG record.
151 151 */
152 152 static const char *index_deref(indexObject *self, Py_ssize_t pos)
153 153 {
154 154 if (self->inlined && pos > 0) {
155 155 if (self->offsets == NULL) {
156 Py_ssize_t ret;
156 157 self->offsets = PyMem_Malloc(self->raw_length *
157 158 sizeof(*self->offsets));
158 159 if (self->offsets == NULL)
159 160 return (const char *)PyErr_NoMemory();
160 Py_ssize_t ret = inline_scan(self, self->offsets);
161 ret = inline_scan(self, self->offsets);
161 162 if (ret == -1) {
162 163 return NULL;
163 164 };
164 165 }
165 166 return self->offsets[pos];
166 167 }
167 168
168 169 return (const char *)(self->buf.buf) + pos * v1_hdrsize;
169 170 }
170 171
171 172 /*
172 173 * Get parents of the given rev.
173 174 *
174 175 * The specified rev must be valid and must not be nullrev. A returned
175 176 * parent revision may be nullrev, but is guaranteed to be in valid range.
176 177 */
177 178 static inline int index_get_parents(indexObject *self, Py_ssize_t rev, int *ps,
178 179 int maxrev)
179 180 {
180 181 if (rev >= self->length) {
181 182 long tmp;
182 183 PyObject *tuple =
183 184 PyList_GET_ITEM(self->added, rev - self->length);
184 185 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 5), &tmp)) {
185 186 return -1;
186 187 }
187 188 ps[0] = (int)tmp;
188 189 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 6), &tmp)) {
189 190 return -1;
190 191 }
191 192 ps[1] = (int)tmp;
192 193 } else {
193 194 const char *data = index_deref(self, rev);
194 195 ps[0] = getbe32(data + 24);
195 196 ps[1] = getbe32(data + 28);
196 197 }
197 198 /* If index file is corrupted, ps[] may point to invalid revisions. So
198 199 * there is a risk of buffer overflow to trust them unconditionally. */
199 200 if (ps[0] < -1 || ps[0] > maxrev || ps[1] < -1 || ps[1] > maxrev) {
200 201 PyErr_SetString(PyExc_ValueError, "parent out of range");
201 202 return -1;
202 203 }
203 204 return 0;
204 205 }
205 206
206 207 /*
207 208 * Get parents of the given rev.
208 209 *
209 210 * If the specified rev is out of range, IndexError will be raised. If the
210 211 * revlog entry is corrupted, ValueError may be raised.
211 212 *
212 213 * Returns 0 on success or -1 on failure.
213 214 */
214 215 int HgRevlogIndex_GetParents(PyObject *op, int rev, int *ps)
215 216 {
216 217 int tiprev;
217 218 if (!op || !HgRevlogIndex_Check(op) || !ps) {
218 219 PyErr_BadInternalCall();
219 220 return -1;
220 221 }
221 222 tiprev = (int)index_length((indexObject *)op) - 1;
222 223 if (rev < -1 || rev > tiprev) {
223 224 PyErr_Format(PyExc_IndexError, "rev out of range: %d", rev);
224 225 return -1;
225 226 } else if (rev == -1) {
226 227 ps[0] = ps[1] = -1;
227 228 return 0;
228 229 } else {
229 230 return index_get_parents((indexObject *)op, rev, ps, tiprev);
230 231 }
231 232 }
232 233
233 234 static inline int64_t index_get_start(indexObject *self, Py_ssize_t rev)
234 235 {
235 236 uint64_t offset;
236 237 if (rev == nullrev) {
237 238 return 0;
238 239 }
239 240 if (rev >= self->length) {
240 241 PyObject *tuple;
241 242 PyObject *pylong;
242 243 PY_LONG_LONG tmp;
243 244 tuple = PyList_GET_ITEM(self->added, rev - self->length);
244 245 pylong = PyTuple_GET_ITEM(tuple, 0);
245 246 tmp = PyLong_AsLongLong(pylong);
246 247 if (tmp == -1 && PyErr_Occurred()) {
247 248 return -1;
248 249 }
249 250 if (tmp < 0) {
250 251 PyErr_Format(PyExc_OverflowError,
251 252 "revlog entry size out of bound (%lld)",
252 253 (long long)tmp);
253 254 return -1;
254 255 }
255 256 offset = (uint64_t)tmp;
256 257 } else {
257 258 const char *data = index_deref(self, rev);
258 259 offset = getbe32(data + 4);
259 260 if (rev == 0) {
260 261 /* mask out version number for the first entry */
261 262 offset &= 0xFFFF;
262 263 } else {
263 264 uint32_t offset_high = getbe32(data);
264 265 offset |= ((uint64_t)offset_high) << 32;
265 266 }
266 267 }
267 268 return (int64_t)(offset >> 16);
268 269 }
269 270
270 271 static inline int index_get_length(indexObject *self, Py_ssize_t rev)
271 272 {
272 273 if (rev == nullrev) {
273 274 return 0;
274 275 }
275 276 if (rev >= self->length) {
276 277 PyObject *tuple;
277 278 PyObject *pylong;
278 279 long ret;
279 280 tuple = PyList_GET_ITEM(self->added, rev - self->length);
280 281 pylong = PyTuple_GET_ITEM(tuple, 1);
281 282 ret = PyInt_AsLong(pylong);
282 283 if (ret == -1 && PyErr_Occurred()) {
283 284 return -1;
284 285 }
285 286 if (ret < 0 || ret > (long)INT_MAX) {
286 287 PyErr_Format(PyExc_OverflowError,
287 288 "revlog entry size out of bound (%ld)",
288 289 ret);
289 290 return -1;
290 291 }
291 292 return (int)ret;
292 293 } else {
293 294 const char *data = index_deref(self, rev);
294 295 int tmp = (int)getbe32(data + 8);
295 296 if (tmp < 0) {
296 297 PyErr_Format(PyExc_OverflowError,
297 298 "revlog entry size out of bound (%d)",
298 299 tmp);
299 300 return -1;
300 301 }
301 302 return tmp;
302 303 }
303 304 }
304 305
305 306 /*
306 307 * RevlogNG format (all in big endian, data may be inlined):
307 308 * 6 bytes: offset
308 309 * 2 bytes: flags
309 310 * 4 bytes: compressed length
310 311 * 4 bytes: uncompressed length
311 312 * 4 bytes: base revision
312 313 * 4 bytes: link revision
313 314 * 4 bytes: parent 1 revision
314 315 * 4 bytes: parent 2 revision
315 316 * 32 bytes: nodeid (only 20 bytes used)
316 317 */
317 318 static PyObject *index_get(indexObject *self, Py_ssize_t pos)
318 319 {
319 320 uint64_t offset_flags;
320 321 int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
321 322 const char *c_node_id;
322 323 const char *data;
323 324 Py_ssize_t length = index_length(self);
324 325 PyObject *entry;
325 326
326 327 if (pos == nullrev) {
327 328 Py_INCREF(nullentry);
328 329 return nullentry;
329 330 }
330 331
331 332 if (pos < 0 || pos >= length) {
332 333 PyErr_SetString(PyExc_IndexError, "revlog index out of range");
333 334 return NULL;
334 335 }
335 336
336 337 if (pos >= self->length) {
337 338 PyObject *obj;
338 339 obj = PyList_GET_ITEM(self->added, pos - self->length);
339 340 Py_INCREF(obj);
340 341 return obj;
341 342 }
342 343
343 344 if (self->cache) {
344 345 if (self->cache[pos]) {
345 346 Py_INCREF(self->cache[pos]);
346 347 return self->cache[pos];
347 348 }
348 349 } else {
349 350 self->cache = calloc(self->raw_length, sizeof(PyObject *));
350 351 if (self->cache == NULL)
351 352 return PyErr_NoMemory();
352 353 }
353 354
354 355 data = index_deref(self, pos);
355 356 if (data == NULL)
356 357 return NULL;
357 358
358 359 offset_flags = getbe32(data + 4);
359 360 if (pos == 0) /* mask out version number for the first entry */
360 361 offset_flags &= 0xFFFF;
361 362 else {
362 363 uint32_t offset_high = getbe32(data);
363 364 offset_flags |= ((uint64_t)offset_high) << 32;
364 365 }
365 366
366 367 comp_len = getbe32(data + 8);
367 368 uncomp_len = getbe32(data + 12);
368 369 base_rev = getbe32(data + 16);
369 370 link_rev = getbe32(data + 20);
370 371 parent_1 = getbe32(data + 24);
371 372 parent_2 = getbe32(data + 28);
372 373 c_node_id = data + 32;
373 374
374 375 entry = Py_BuildValue(tuple_format, offset_flags, comp_len, uncomp_len,
375 376 base_rev, link_rev, parent_1, parent_2, c_node_id,
376 377 (Py_ssize_t)20);
377 378
378 379 if (entry) {
379 380 PyObject_GC_UnTrack(entry);
380 381 Py_INCREF(entry);
381 382 }
382 383
383 384 self->cache[pos] = entry;
384 385
385 386 return entry;
386 387 }
387 388
388 389 /*
389 390 * Return the 20-byte SHA of the node corresponding to the given rev.
390 391 */
391 392 static const char *index_node(indexObject *self, Py_ssize_t pos)
392 393 {
393 394 Py_ssize_t length = index_length(self);
394 395 const char *data;
395 396
396 397 if (pos == nullrev)
397 398 return nullid;
398 399
399 400 if (pos >= length)
400 401 return NULL;
401 402
402 403 if (pos >= self->length) {
403 404 PyObject *tuple, *str;
404 405 tuple = PyList_GET_ITEM(self->added, pos - self->length);
405 406 str = PyTuple_GetItem(tuple, 7);
406 407 return str ? PyBytes_AS_STRING(str) : NULL;
407 408 }
408 409
409 410 data = index_deref(self, pos);
410 411 return data ? data + 32 : NULL;
411 412 }
412 413
413 414 /*
414 415 * Return the 20-byte SHA of the node corresponding to the given rev. The
415 416 * rev is assumed to be existing. If not, an exception is set.
416 417 */
417 418 static const char *index_node_existing(indexObject *self, Py_ssize_t pos)
418 419 {
419 420 const char *node = index_node(self, pos);
420 421 if (node == NULL) {
421 422 PyErr_Format(PyExc_IndexError, "could not access rev %d",
422 423 (int)pos);
423 424 }
424 425 return node;
425 426 }
426 427
427 428 static int nt_insert(nodetree *self, const char *node, int rev);
428 429
429 430 static int node_check(PyObject *obj, char **node)
430 431 {
431 432 Py_ssize_t nodelen;
432 433 if (PyBytes_AsStringAndSize(obj, node, &nodelen) == -1)
433 434 return -1;
434 435 if (nodelen == 20)
435 436 return 0;
436 437 PyErr_SetString(PyExc_ValueError, "20-byte hash required");
437 438 return -1;
438 439 }
439 440
440 441 static PyObject *index_append(indexObject *self, PyObject *obj)
441 442 {
442 443 char *node;
443 444 Py_ssize_t len;
444 445
445 446 if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj) != 8) {
446 447 PyErr_SetString(PyExc_TypeError, "8-tuple required");
447 448 return NULL;
448 449 }
449 450
450 451 if (node_check(PyTuple_GET_ITEM(obj, 7), &node) == -1)
451 452 return NULL;
452 453
453 454 len = index_length(self);
454 455
455 456 if (self->added == NULL) {
456 457 self->added = PyList_New(0);
457 458 if (self->added == NULL)
458 459 return NULL;
459 460 }
460 461
461 462 if (PyList_Append(self->added, obj) == -1)
462 463 return NULL;
463 464
464 465 if (self->ntinitialized)
465 466 nt_insert(&self->nt, node, (int)len);
466 467
467 468 Py_CLEAR(self->headrevs);
468 469 Py_RETURN_NONE;
469 470 }
470 471
471 472 static PyObject *index_stats(indexObject *self)
472 473 {
473 474 PyObject *obj = PyDict_New();
474 475 PyObject *s = NULL;
475 476 PyObject *t = NULL;
476 477
477 478 if (obj == NULL)
478 479 return NULL;
479 480
480 481 #define istat(__n, __d) \
481 482 do { \
482 483 s = PyBytes_FromString(__d); \
483 484 t = PyInt_FromSsize_t(self->__n); \
484 485 if (!s || !t) \
485 486 goto bail; \
486 487 if (PyDict_SetItem(obj, s, t) == -1) \
487 488 goto bail; \
488 489 Py_CLEAR(s); \
489 490 Py_CLEAR(t); \
490 491 } while (0)
491 492
492 493 if (self->added) {
493 494 Py_ssize_t len = PyList_GET_SIZE(self->added);
494 495 s = PyBytes_FromString("index entries added");
495 496 t = PyInt_FromSsize_t(len);
496 497 if (!s || !t)
497 498 goto bail;
498 499 if (PyDict_SetItem(obj, s, t) == -1)
499 500 goto bail;
500 501 Py_CLEAR(s);
501 502 Py_CLEAR(t);
502 503 }
503 504
504 505 if (self->raw_length != self->length)
505 506 istat(raw_length, "revs on disk");
506 507 istat(length, "revs in memory");
507 508 istat(ntlookups, "node trie lookups");
508 509 istat(ntmisses, "node trie misses");
509 510 istat(ntrev, "node trie last rev scanned");
510 511 if (self->ntinitialized) {
511 512 istat(nt.capacity, "node trie capacity");
512 513 istat(nt.depth, "node trie depth");
513 514 istat(nt.length, "node trie count");
514 515 istat(nt.splits, "node trie splits");
515 516 }
516 517
517 518 #undef istat
518 519
519 520 return obj;
520 521
521 522 bail:
522 523 Py_XDECREF(obj);
523 524 Py_XDECREF(s);
524 525 Py_XDECREF(t);
525 526 return NULL;
526 527 }
527 528
528 529 /*
529 530 * When we cache a list, we want to be sure the caller can't mutate
530 531 * the cached copy.
531 532 */
532 533 static PyObject *list_copy(PyObject *list)
533 534 {
534 535 Py_ssize_t len = PyList_GET_SIZE(list);
535 536 PyObject *newlist = PyList_New(len);
536 537 Py_ssize_t i;
537 538
538 539 if (newlist == NULL)
539 540 return NULL;
540 541
541 542 for (i = 0; i < len; i++) {
542 543 PyObject *obj = PyList_GET_ITEM(list, i);
543 544 Py_INCREF(obj);
544 545 PyList_SET_ITEM(newlist, i, obj);
545 546 }
546 547
547 548 return newlist;
548 549 }
549 550
550 551 static int check_filter(PyObject *filter, Py_ssize_t arg)
551 552 {
552 553 if (filter) {
553 554 PyObject *arglist, *result;
554 555 int isfiltered;
555 556
556 557 arglist = Py_BuildValue("(n)", arg);
557 558 if (!arglist) {
558 559 return -1;
559 560 }
560 561
561 562 result = PyEval_CallObject(filter, arglist);
562 563 Py_DECREF(arglist);
563 564 if (!result) {
564 565 return -1;
565 566 }
566 567
567 568 /* PyObject_IsTrue returns 1 if true, 0 if false, -1 if error,
568 569 * same as this function, so we can just return it directly.*/
569 570 isfiltered = PyObject_IsTrue(result);
570 571 Py_DECREF(result);
571 572 return isfiltered;
572 573 } else {
573 574 return 0;
574 575 }
575 576 }
576 577
577 578 static Py_ssize_t add_roots_get_min(indexObject *self, PyObject *list,
578 579 Py_ssize_t marker, char *phases)
579 580 {
580 581 PyObject *iter = NULL;
581 582 PyObject *iter_item = NULL;
582 583 Py_ssize_t min_idx = index_length(self) + 2;
583 584 long iter_item_long;
584 585
585 586 if (PyList_GET_SIZE(list) != 0) {
586 587 iter = PyObject_GetIter(list);
587 588 if (iter == NULL)
588 589 return -2;
589 590 while ((iter_item = PyIter_Next(iter))) {
590 591 if (!pylong_to_long(iter_item, &iter_item_long)) {
591 592 Py_DECREF(iter_item);
592 593 return -2;
593 594 }
594 595 Py_DECREF(iter_item);
595 596 if (iter_item_long < min_idx)
596 597 min_idx = iter_item_long;
597 598 phases[iter_item_long] = (char)marker;
598 599 }
599 600 Py_DECREF(iter);
600 601 }
601 602
602 603 return min_idx;
603 604 }
604 605
605 606 static inline void set_phase_from_parents(char *phases, int parent_1,
606 607 int parent_2, Py_ssize_t i)
607 608 {
608 609 if (parent_1 >= 0 && phases[parent_1] > phases[i])
609 610 phases[i] = phases[parent_1];
610 611 if (parent_2 >= 0 && phases[parent_2] > phases[i])
611 612 phases[i] = phases[parent_2];
612 613 }
613 614
614 615 static PyObject *reachableroots2(indexObject *self, PyObject *args)
615 616 {
616 617
617 618 /* Input */
618 619 long minroot;
619 620 PyObject *includepatharg = NULL;
620 621 int includepath = 0;
621 622 /* heads and roots are lists */
622 623 PyObject *heads = NULL;
623 624 PyObject *roots = NULL;
624 625 PyObject *reachable = NULL;
625 626
626 627 PyObject *val;
627 628 Py_ssize_t len = index_length(self);
628 629 long revnum;
629 630 Py_ssize_t k;
630 631 Py_ssize_t i;
631 632 Py_ssize_t l;
632 633 int r;
633 634 int parents[2];
634 635
635 636 /* Internal data structure:
636 637 * tovisit: array of length len+1 (all revs + nullrev), filled upto
637 638 * lentovisit
638 639 *
639 640 * revstates: array of length len+1 (all revs + nullrev) */
640 641 int *tovisit = NULL;
641 642 long lentovisit = 0;
642 643 enum { RS_SEEN = 1, RS_ROOT = 2, RS_REACHABLE = 4 };
643 644 char *revstates = NULL;
644 645
645 646 /* Get arguments */
646 647 if (!PyArg_ParseTuple(args, "lO!O!O!", &minroot, &PyList_Type, &heads,
647 648 &PyList_Type, &roots, &PyBool_Type,
648 649 &includepatharg))
649 650 goto bail;
650 651
651 652 if (includepatharg == Py_True)
652 653 includepath = 1;
653 654
654 655 /* Initialize return set */
655 656 reachable = PyList_New(0);
656 657 if (reachable == NULL)
657 658 goto bail;
658 659
659 660 /* Initialize internal datastructures */
660 661 tovisit = (int *)malloc((len + 1) * sizeof(int));
661 662 if (tovisit == NULL) {
662 663 PyErr_NoMemory();
663 664 goto bail;
664 665 }
665 666
666 667 revstates = (char *)calloc(len + 1, 1);
667 668 if (revstates == NULL) {
668 669 PyErr_NoMemory();
669 670 goto bail;
670 671 }
671 672
672 673 l = PyList_GET_SIZE(roots);
673 674 for (i = 0; i < l; i++) {
674 675 revnum = PyInt_AsLong(PyList_GET_ITEM(roots, i));
675 676 if (revnum == -1 && PyErr_Occurred())
676 677 goto bail;
677 678 /* If root is out of range, e.g. wdir(), it must be unreachable
678 679 * from heads. So we can just ignore it. */
679 680 if (revnum + 1 < 0 || revnum + 1 >= len + 1)
680 681 continue;
681 682 revstates[revnum + 1] |= RS_ROOT;
682 683 }
683 684
684 685 /* Populate tovisit with all the heads */
685 686 l = PyList_GET_SIZE(heads);
686 687 for (i = 0; i < l; i++) {
687 688 revnum = PyInt_AsLong(PyList_GET_ITEM(heads, i));
688 689 if (revnum == -1 && PyErr_Occurred())
689 690 goto bail;
690 691 if (revnum + 1 < 0 || revnum + 1 >= len + 1) {
691 692 PyErr_SetString(PyExc_IndexError, "head out of range");
692 693 goto bail;
693 694 }
694 695 if (!(revstates[revnum + 1] & RS_SEEN)) {
695 696 tovisit[lentovisit++] = (int)revnum;
696 697 revstates[revnum + 1] |= RS_SEEN;
697 698 }
698 699 }
699 700
700 701 /* Visit the tovisit list and find the reachable roots */
701 702 k = 0;
702 703 while (k < lentovisit) {
703 704 /* Add the node to reachable if it is a root*/
704 705 revnum = tovisit[k++];
705 706 if (revstates[revnum + 1] & RS_ROOT) {
706 707 revstates[revnum + 1] |= RS_REACHABLE;
707 708 val = PyInt_FromLong(revnum);
708 709 if (val == NULL)
709 710 goto bail;
710 711 r = PyList_Append(reachable, val);
711 712 Py_DECREF(val);
712 713 if (r < 0)
713 714 goto bail;
714 715 if (includepath == 0)
715 716 continue;
716 717 }
717 718
718 719 /* Add its parents to the list of nodes to visit */
719 720 if (revnum == nullrev)
720 721 continue;
721 722 r = index_get_parents(self, revnum, parents, (int)len - 1);
722 723 if (r < 0)
723 724 goto bail;
724 725 for (i = 0; i < 2; i++) {
725 726 if (!(revstates[parents[i] + 1] & RS_SEEN) &&
726 727 parents[i] >= minroot) {
727 728 tovisit[lentovisit++] = parents[i];
728 729 revstates[parents[i] + 1] |= RS_SEEN;
729 730 }
730 731 }
731 732 }
732 733
733 734 /* Find all the nodes in between the roots we found and the heads
734 735 * and add them to the reachable set */
735 736 if (includepath == 1) {
736 737 long minidx = minroot;
737 738 if (minidx < 0)
738 739 minidx = 0;
739 740 for (i = minidx; i < len; i++) {
740 741 if (!(revstates[i + 1] & RS_SEEN))
741 742 continue;
742 743 r = index_get_parents(self, i, parents, (int)len - 1);
743 744 /* Corrupted index file, error is set from
744 745 * index_get_parents */
745 746 if (r < 0)
746 747 goto bail;
747 748 if (((revstates[parents[0] + 1] |
748 749 revstates[parents[1] + 1]) &
749 750 RS_REACHABLE) &&
750 751 !(revstates[i + 1] & RS_REACHABLE)) {
751 752 revstates[i + 1] |= RS_REACHABLE;
752 753 val = PyInt_FromSsize_t(i);
753 754 if (val == NULL)
754 755 goto bail;
755 756 r = PyList_Append(reachable, val);
756 757 Py_DECREF(val);
757 758 if (r < 0)
758 759 goto bail;
759 760 }
760 761 }
761 762 }
762 763
763 764 free(revstates);
764 765 free(tovisit);
765 766 return reachable;
766 767 bail:
767 768 Py_XDECREF(reachable);
768 769 free(revstates);
769 770 free(tovisit);
770 771 return NULL;
771 772 }
772 773
773 774 static PyObject *compute_phases_map_sets(indexObject *self, PyObject *args)
774 775 {
775 776 PyObject *roots = Py_None;
776 777 PyObject *ret = NULL;
777 778 PyObject *phasessize = NULL;
778 779 PyObject *phaseroots = NULL;
779 780 PyObject *phaseset = NULL;
780 781 PyObject *phasessetlist = NULL;
781 782 PyObject *rev = NULL;
782 783 Py_ssize_t len = index_length(self);
783 784 Py_ssize_t numphase = 0;
784 785 Py_ssize_t minrevallphases = 0;
785 786 Py_ssize_t minrevphase = 0;
786 787 Py_ssize_t i = 0;
787 788 char *phases = NULL;
788 789 long phase;
789 790
790 791 if (!PyArg_ParseTuple(args, "O", &roots))
791 792 goto done;
792 793 if (roots == NULL || !PyList_Check(roots)) {
793 794 PyErr_SetString(PyExc_TypeError, "roots must be a list");
794 795 goto done;
795 796 }
796 797
797 798 phases = calloc(
798 799 len, 1); /* phase per rev: {0: public, 1: draft, 2: secret} */
799 800 if (phases == NULL) {
800 801 PyErr_NoMemory();
801 802 goto done;
802 803 }
803 804 /* Put the phase information of all the roots in phases */
804 805 numphase = PyList_GET_SIZE(roots) + 1;
805 806 minrevallphases = len + 1;
806 807 phasessetlist = PyList_New(numphase);
807 808 if (phasessetlist == NULL)
808 809 goto done;
809 810
810 811 PyList_SET_ITEM(phasessetlist, 0, Py_None);
811 812 Py_INCREF(Py_None);
812 813
813 814 for (i = 0; i < numphase - 1; i++) {
814 815 phaseroots = PyList_GET_ITEM(roots, i);
815 816 phaseset = PySet_New(NULL);
816 817 if (phaseset == NULL)
817 818 goto release;
818 819 PyList_SET_ITEM(phasessetlist, i + 1, phaseset);
819 820 if (!PyList_Check(phaseroots)) {
820 821 PyErr_SetString(PyExc_TypeError,
821 822 "roots item must be a list");
822 823 goto release;
823 824 }
824 825 minrevphase =
825 826 add_roots_get_min(self, phaseroots, i + 1, phases);
826 827 if (minrevphase == -2) /* Error from add_roots_get_min */
827 828 goto release;
828 829 minrevallphases = MIN(minrevallphases, minrevphase);
829 830 }
830 831 /* Propagate the phase information from the roots to the revs */
831 832 if (minrevallphases != -1) {
832 833 int parents[2];
833 834 for (i = minrevallphases; i < len; i++) {
834 835 if (index_get_parents(self, i, parents, (int)len - 1) <
835 836 0)
836 837 goto release;
837 838 set_phase_from_parents(phases, parents[0], parents[1],
838 839 i);
839 840 }
840 841 }
841 842 /* Transform phase list to a python list */
842 843 phasessize = PyInt_FromSsize_t(len);
843 844 if (phasessize == NULL)
844 845 goto release;
845 846 for (i = 0; i < len; i++) {
846 847 phase = phases[i];
847 848 /* We only store the sets of phase for non public phase, the
848 849 * public phase is computed as a difference */
849 850 if (phase != 0) {
850 851 phaseset = PyList_GET_ITEM(phasessetlist, phase);
851 852 rev = PyInt_FromSsize_t(i);
852 853 if (rev == NULL)
853 854 goto release;
854 855 PySet_Add(phaseset, rev);
855 856 Py_XDECREF(rev);
856 857 }
857 858 }
858 859 ret = PyTuple_Pack(2, phasessize, phasessetlist);
859 860
860 861 release:
861 862 Py_XDECREF(phasessize);
862 863 Py_XDECREF(phasessetlist);
863 864 done:
864 865 free(phases);
865 866 return ret;
866 867 }
867 868
868 869 static PyObject *index_headrevs(indexObject *self, PyObject *args)
869 870 {
870 871 Py_ssize_t i, j, len;
871 872 char *nothead = NULL;
872 873 PyObject *heads = NULL;
873 874 PyObject *filter = NULL;
874 875 PyObject *filteredrevs = Py_None;
875 876
876 877 if (!PyArg_ParseTuple(args, "|O", &filteredrevs)) {
877 878 return NULL;
878 879 }
879 880
880 881 if (self->headrevs && filteredrevs == self->filteredrevs)
881 882 return list_copy(self->headrevs);
882 883
883 884 Py_DECREF(self->filteredrevs);
884 885 self->filteredrevs = filteredrevs;
885 886 Py_INCREF(filteredrevs);
886 887
887 888 if (filteredrevs != Py_None) {
888 889 filter = PyObject_GetAttrString(filteredrevs, "__contains__");
889 890 if (!filter) {
890 891 PyErr_SetString(
891 892 PyExc_TypeError,
892 893 "filteredrevs has no attribute __contains__");
893 894 goto bail;
894 895 }
895 896 }
896 897
897 898 len = index_length(self);
898 899 heads = PyList_New(0);
899 900 if (heads == NULL)
900 901 goto bail;
901 902 if (len == 0) {
902 903 PyObject *nullid = PyInt_FromLong(-1);
903 904 if (nullid == NULL || PyList_Append(heads, nullid) == -1) {
904 905 Py_XDECREF(nullid);
905 906 goto bail;
906 907 }
907 908 goto done;
908 909 }
909 910
910 911 nothead = calloc(len, 1);
911 912 if (nothead == NULL) {
912 913 PyErr_NoMemory();
913 914 goto bail;
914 915 }
915 916
916 917 for (i = len - 1; i >= 0; i--) {
917 918 int isfiltered;
918 919 int parents[2];
919 920
920 921 /* If nothead[i] == 1, it means we've seen an unfiltered child
921 922 * of this node already, and therefore this node is not
922 923 * filtered. So we can skip the expensive check_filter step.
923 924 */
924 925 if (nothead[i] != 1) {
925 926 isfiltered = check_filter(filter, i);
926 927 if (isfiltered == -1) {
927 928 PyErr_SetString(PyExc_TypeError,
928 929 "unable to check filter");
929 930 goto bail;
930 931 }
931 932
932 933 if (isfiltered) {
933 934 nothead[i] = 1;
934 935 continue;
935 936 }
936 937 }
937 938
938 939 if (index_get_parents(self, i, parents, (int)len - 1) < 0)
939 940 goto bail;
940 941 for (j = 0; j < 2; j++) {
941 942 if (parents[j] >= 0)
942 943 nothead[parents[j]] = 1;
943 944 }
944 945 }
945 946
946 947 for (i = 0; i < len; i++) {
947 948 PyObject *head;
948 949
949 950 if (nothead[i])
950 951 continue;
951 952 head = PyInt_FromSsize_t(i);
952 953 if (head == NULL || PyList_Append(heads, head) == -1) {
953 954 Py_XDECREF(head);
954 955 goto bail;
955 956 }
956 957 }
957 958
958 959 done:
959 960 self->headrevs = heads;
960 961 Py_XDECREF(filter);
961 962 free(nothead);
962 963 return list_copy(self->headrevs);
963 964 bail:
964 965 Py_XDECREF(filter);
965 966 Py_XDECREF(heads);
966 967 free(nothead);
967 968 return NULL;
968 969 }
969 970
970 971 /**
971 972 * Obtain the base revision index entry.
972 973 *
973 974 * Callers must ensure that rev >= 0 or illegal memory access may occur.
974 975 */
975 976 static inline int index_baserev(indexObject *self, int rev)
976 977 {
977 978 const char *data;
978 979 int result;
979 980
980 981 if (rev >= self->length) {
981 982 PyObject *tuple =
982 983 PyList_GET_ITEM(self->added, rev - self->length);
983 984 long ret;
984 985 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 3), &ret)) {
985 986 return -2;
986 987 }
987 988 result = (int)ret;
988 989 } else {
989 990 data = index_deref(self, rev);
990 991 if (data == NULL) {
991 992 return -2;
992 993 }
993 994
994 995 result = getbe32(data + 16);
995 996 }
996 997 if (result > rev) {
997 998 PyErr_Format(
998 999 PyExc_ValueError,
999 1000 "corrupted revlog, revision base above revision: %d, %d",
1000 1001 rev, result);
1001 1002 return -2;
1002 1003 }
1003 1004 if (result < -1) {
1004 1005 PyErr_Format(
1005 1006 PyExc_ValueError,
1006 1007 "corrupted revlog, revision base out of range: %d, %d", rev,
1007 1008 result);
1008 1009 return -2;
1009 1010 }
1010 1011 return result;
1011 1012 }
1012 1013
1013 1014 /**
1014 1015 * Find if a revision is a snapshot or not
1015 1016 *
1016 1017 * Only relevant for sparse-revlog case.
1017 1018 * Callers must ensure that rev is in a valid range.
1018 1019 */
1019 1020 static int index_issnapshotrev(indexObject *self, Py_ssize_t rev)
1020 1021 {
1021 1022 int ps[2];
1022 1023 Py_ssize_t base;
1023 1024 while (rev >= 0) {
1024 1025 base = (Py_ssize_t)index_baserev(self, rev);
1025 1026 if (base == rev) {
1026 1027 base = -1;
1027 1028 }
1028 1029 if (base == -2) {
1029 1030 assert(PyErr_Occurred());
1030 1031 return -1;
1031 1032 }
1032 1033 if (base == -1) {
1033 1034 return 1;
1034 1035 }
1035 1036 if (index_get_parents(self, rev, ps, (int)rev) < 0) {
1036 1037 assert(PyErr_Occurred());
1037 1038 return -1;
1038 1039 };
1039 1040 if (base == ps[0] || base == ps[1]) {
1040 1041 return 0;
1041 1042 }
1042 1043 rev = base;
1043 1044 }
1044 1045 return rev == -1;
1045 1046 }
1046 1047
1047 1048 static PyObject *index_issnapshot(indexObject *self, PyObject *value)
1048 1049 {
1049 1050 long rev;
1050 1051 int issnap;
1051 1052 Py_ssize_t length = index_length(self);
1052 1053
1053 1054 if (!pylong_to_long(value, &rev)) {
1054 1055 return NULL;
1055 1056 }
1056 1057 if (rev < -1 || rev >= length) {
1057 1058 PyErr_Format(PyExc_ValueError, "revlog index out of range: %ld",
1058 1059 rev);
1059 1060 return NULL;
1060 1061 };
1061 1062 issnap = index_issnapshotrev(self, (Py_ssize_t)rev);
1062 1063 if (issnap < 0) {
1063 1064 return NULL;
1064 1065 };
1065 1066 return PyBool_FromLong((long)issnap);
1066 1067 }
1067 1068
1068 1069 static PyObject *index_findsnapshots(indexObject *self, PyObject *args)
1069 1070 {
1070 1071 Py_ssize_t start_rev;
1071 1072 PyObject *cache;
1072 1073 Py_ssize_t base;
1073 1074 Py_ssize_t rev;
1074 1075 PyObject *key = NULL;
1075 1076 PyObject *value = NULL;
1076 1077 const Py_ssize_t length = index_length(self);
1077 1078 if (!PyArg_ParseTuple(args, "O!n", &PyDict_Type, &cache, &start_rev)) {
1078 1079 return NULL;
1079 1080 }
1080 1081 for (rev = start_rev; rev < length; rev++) {
1081 1082 int issnap;
1082 1083 PyObject *allvalues = NULL;
1083 1084 issnap = index_issnapshotrev(self, rev);
1084 1085 if (issnap < 0) {
1085 1086 goto bail;
1086 1087 }
1087 1088 if (issnap == 0) {
1088 1089 continue;
1089 1090 }
1090 1091 base = (Py_ssize_t)index_baserev(self, rev);
1091 1092 if (base == rev) {
1092 1093 base = -1;
1093 1094 }
1094 1095 if (base == -2) {
1095 1096 assert(PyErr_Occurred());
1096 1097 goto bail;
1097 1098 }
1098 1099 key = PyInt_FromSsize_t(base);
1099 1100 allvalues = PyDict_GetItem(cache, key);
1100 1101 if (allvalues == NULL && PyErr_Occurred()) {
1101 1102 goto bail;
1102 1103 }
1103 1104 if (allvalues == NULL) {
1104 1105 int r;
1105 1106 allvalues = PyList_New(0);
1106 1107 if (!allvalues) {
1107 1108 goto bail;
1108 1109 }
1109 1110 r = PyDict_SetItem(cache, key, allvalues);
1110 1111 Py_DECREF(allvalues);
1111 1112 if (r < 0) {
1112 1113 goto bail;
1113 1114 }
1114 1115 }
1115 1116 value = PyInt_FromSsize_t(rev);
1116 1117 if (PyList_Append(allvalues, value)) {
1117 1118 goto bail;
1118 1119 }
1119 1120 Py_CLEAR(key);
1120 1121 Py_CLEAR(value);
1121 1122 }
1122 1123 Py_RETURN_NONE;
1123 1124 bail:
1124 1125 Py_XDECREF(key);
1125 1126 Py_XDECREF(value);
1126 1127 return NULL;
1127 1128 }
1128 1129
1129 1130 static PyObject *index_deltachain(indexObject *self, PyObject *args)
1130 1131 {
1131 1132 int rev, generaldelta;
1132 1133 PyObject *stoparg;
1133 1134 int stoprev, iterrev, baserev = -1;
1134 1135 int stopped;
1135 1136 PyObject *chain = NULL, *result = NULL;
1136 1137 const Py_ssize_t length = index_length(self);
1137 1138
1138 1139 if (!PyArg_ParseTuple(args, "iOi", &rev, &stoparg, &generaldelta)) {
1139 1140 return NULL;
1140 1141 }
1141 1142
1142 1143 if (PyInt_Check(stoparg)) {
1143 1144 stoprev = (int)PyInt_AsLong(stoparg);
1144 1145 if (stoprev == -1 && PyErr_Occurred()) {
1145 1146 return NULL;
1146 1147 }
1147 1148 } else if (stoparg == Py_None) {
1148 1149 stoprev = -2;
1149 1150 } else {
1150 1151 PyErr_SetString(PyExc_ValueError,
1151 1152 "stoprev must be integer or None");
1152 1153 return NULL;
1153 1154 }
1154 1155
1155 1156 if (rev < 0 || rev >= length) {
1156 1157 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
1157 1158 return NULL;
1158 1159 }
1159 1160
1160 1161 chain = PyList_New(0);
1161 1162 if (chain == NULL) {
1162 1163 return NULL;
1163 1164 }
1164 1165
1165 1166 baserev = index_baserev(self, rev);
1166 1167
1167 1168 /* This should never happen. */
1168 1169 if (baserev <= -2) {
1169 1170 /* Error should be set by index_deref() */
1170 1171 assert(PyErr_Occurred());
1171 1172 goto bail;
1172 1173 }
1173 1174
1174 1175 iterrev = rev;
1175 1176
1176 1177 while (iterrev != baserev && iterrev != stoprev) {
1177 1178 PyObject *value = PyInt_FromLong(iterrev);
1178 1179 if (value == NULL) {
1179 1180 goto bail;
1180 1181 }
1181 1182 if (PyList_Append(chain, value)) {
1182 1183 Py_DECREF(value);
1183 1184 goto bail;
1184 1185 }
1185 1186 Py_DECREF(value);
1186 1187
1187 1188 if (generaldelta) {
1188 1189 iterrev = baserev;
1189 1190 } else {
1190 1191 iterrev--;
1191 1192 }
1192 1193
1193 1194 if (iterrev < 0) {
1194 1195 break;
1195 1196 }
1196 1197
1197 1198 if (iterrev >= length) {
1198 1199 PyErr_SetString(PyExc_IndexError,
1199 1200 "revision outside index");
1200 1201 return NULL;
1201 1202 }
1202 1203
1203 1204 baserev = index_baserev(self, iterrev);
1204 1205
1205 1206 /* This should never happen. */
1206 1207 if (baserev <= -2) {
1207 1208 /* Error should be set by index_deref() */
1208 1209 assert(PyErr_Occurred());
1209 1210 goto bail;
1210 1211 }
1211 1212 }
1212 1213
1213 1214 if (iterrev == stoprev) {
1214 1215 stopped = 1;
1215 1216 } else {
1216 1217 PyObject *value = PyInt_FromLong(iterrev);
1217 1218 if (value == NULL) {
1218 1219 goto bail;
1219 1220 }
1220 1221 if (PyList_Append(chain, value)) {
1221 1222 Py_DECREF(value);
1222 1223 goto bail;
1223 1224 }
1224 1225 Py_DECREF(value);
1225 1226
1226 1227 stopped = 0;
1227 1228 }
1228 1229
1229 1230 if (PyList_Reverse(chain)) {
1230 1231 goto bail;
1231 1232 }
1232 1233
1233 1234 result = Py_BuildValue("OO", chain, stopped ? Py_True : Py_False);
1234 1235 Py_DECREF(chain);
1235 1236 return result;
1236 1237
1237 1238 bail:
1238 1239 Py_DECREF(chain);
1239 1240 return NULL;
1240 1241 }
1241 1242
1242 1243 static inline int64_t
1243 1244 index_segment_span(indexObject *self, Py_ssize_t start_rev, Py_ssize_t end_rev)
1244 1245 {
1245 1246 int64_t start_offset;
1246 1247 int64_t end_offset;
1247 1248 int end_size;
1248 1249 start_offset = index_get_start(self, start_rev);
1249 1250 if (start_offset < 0) {
1250 1251 return -1;
1251 1252 }
1252 1253 end_offset = index_get_start(self, end_rev);
1253 1254 if (end_offset < 0) {
1254 1255 return -1;
1255 1256 }
1256 1257 end_size = index_get_length(self, end_rev);
1257 1258 if (end_size < 0) {
1258 1259 return -1;
1259 1260 }
1260 1261 if (end_offset < start_offset) {
1261 1262 PyErr_Format(PyExc_ValueError,
1262 1263 "corrupted revlog index: inconsistent offset "
1263 1264 "between revisions (%zd) and (%zd)",
1264 1265 start_rev, end_rev);
1265 1266 return -1;
1266 1267 }
1267 1268 return (end_offset - start_offset) + (int64_t)end_size;
1268 1269 }
1269 1270
1270 1271 /* returns endidx so that revs[startidx:endidx] has no empty trailing revs */
1271 1272 static Py_ssize_t trim_endidx(indexObject *self, const Py_ssize_t *revs,
1272 1273 Py_ssize_t startidx, Py_ssize_t endidx)
1273 1274 {
1274 1275 int length;
1275 1276 while (endidx > 1 && endidx > startidx) {
1276 1277 length = index_get_length(self, revs[endidx - 1]);
1277 1278 if (length < 0) {
1278 1279 return -1;
1279 1280 }
1280 1281 if (length != 0) {
1281 1282 break;
1282 1283 }
1283 1284 endidx -= 1;
1284 1285 }
1285 1286 return endidx;
1286 1287 }
1287 1288
1288 1289 struct Gap {
1289 1290 int64_t size;
1290 1291 Py_ssize_t idx;
1291 1292 };
1292 1293
1293 1294 static int gap_compare(const void *left, const void *right)
1294 1295 {
1295 1296 const struct Gap *l_left = ((const struct Gap *)left);
1296 1297 const struct Gap *l_right = ((const struct Gap *)right);
1297 1298 if (l_left->size < l_right->size) {
1298 1299 return -1;
1299 1300 } else if (l_left->size > l_right->size) {
1300 1301 return 1;
1301 1302 }
1302 1303 return 0;
1303 1304 }
1304 1305 static int Py_ssize_t_compare(const void *left, const void *right)
1305 1306 {
1306 1307 const Py_ssize_t l_left = *(const Py_ssize_t *)left;
1307 1308 const Py_ssize_t l_right = *(const Py_ssize_t *)right;
1308 1309 if (l_left < l_right) {
1309 1310 return -1;
1310 1311 } else if (l_left > l_right) {
1311 1312 return 1;
1312 1313 }
1313 1314 return 0;
1314 1315 }
1315 1316
1316 1317 static PyObject *index_slicechunktodensity(indexObject *self, PyObject *args)
1317 1318 {
1318 1319 /* method arguments */
1319 1320 PyObject *list_revs = NULL; /* revisions in the chain */
1320 1321 double targetdensity = 0; /* min density to achieve */
1321 1322 Py_ssize_t mingapsize = 0; /* threshold to ignore gaps */
1322 1323
1323 1324 /* other core variables */
1324 1325 Py_ssize_t idxlen = index_length(self);
1325 1326 Py_ssize_t i; /* used for various iteration */
1326 1327 PyObject *result = NULL; /* the final return of the function */
1327 1328
1328 1329 /* generic information about the delta chain being slice */
1329 1330 Py_ssize_t num_revs = 0; /* size of the full delta chain */
1330 1331 Py_ssize_t *revs = NULL; /* native array of revision in the chain */
1331 1332 int64_t chainpayload = 0; /* sum of all delta in the chain */
1332 1333 int64_t deltachainspan = 0; /* distance from first byte to last byte */
1333 1334
1334 1335 /* variable used for slicing the delta chain */
1335 1336 int64_t readdata = 0; /* amount of data currently planned to be read */
1336 1337 double density = 0; /* ration of payload data compared to read ones */
1337 1338 int64_t previous_end;
1338 1339 struct Gap *gaps = NULL; /* array of notable gap in the chain */
1339 1340 Py_ssize_t num_gaps =
1340 1341 0; /* total number of notable gap recorded so far */
1341 1342 Py_ssize_t *selected_indices = NULL; /* indices of gap skipped over */
1342 1343 Py_ssize_t num_selected = 0; /* number of gaps skipped */
1343 1344 PyObject *chunk = NULL; /* individual slice */
1344 1345 PyObject *allchunks = NULL; /* all slices */
1345 1346 Py_ssize_t previdx;
1346 1347
1347 1348 /* parsing argument */
1348 1349 if (!PyArg_ParseTuple(args, "O!dn", &PyList_Type, &list_revs,
1349 1350 &targetdensity, &mingapsize)) {
1350 1351 goto bail;
1351 1352 }
1352 1353
1353 1354 /* If the delta chain contains a single element, we do not need slicing
1354 1355 */
1355 1356 num_revs = PyList_GET_SIZE(list_revs);
1356 1357 if (num_revs <= 1) {
1357 1358 result = PyTuple_Pack(1, list_revs);
1358 1359 goto done;
1359 1360 }
1360 1361
1361 1362 /* Turn the python list into a native integer array (for efficiency) */
1362 1363 revs = (Py_ssize_t *)calloc(num_revs, sizeof(Py_ssize_t));
1363 1364 if (revs == NULL) {
1364 1365 PyErr_NoMemory();
1365 1366 goto bail;
1366 1367 }
1367 1368 for (i = 0; i < num_revs; i++) {
1368 1369 Py_ssize_t revnum = PyInt_AsLong(PyList_GET_ITEM(list_revs, i));
1369 1370 if (revnum == -1 && PyErr_Occurred()) {
1370 1371 goto bail;
1371 1372 }
1372 1373 if (revnum < nullrev || revnum >= idxlen) {
1373 1374 PyErr_Format(PyExc_IndexError,
1374 1375 "index out of range: %zd", revnum);
1375 1376 goto bail;
1376 1377 }
1377 1378 revs[i] = revnum;
1378 1379 }
1379 1380
1380 1381 /* Compute and check various property of the unsliced delta chain */
1381 1382 deltachainspan = index_segment_span(self, revs[0], revs[num_revs - 1]);
1382 1383 if (deltachainspan < 0) {
1383 1384 goto bail;
1384 1385 }
1385 1386
1386 1387 if (deltachainspan <= mingapsize) {
1387 1388 result = PyTuple_Pack(1, list_revs);
1388 1389 goto done;
1389 1390 }
1390 1391 chainpayload = 0;
1391 1392 for (i = 0; i < num_revs; i++) {
1392 1393 int tmp = index_get_length(self, revs[i]);
1393 1394 if (tmp < 0) {
1394 1395 goto bail;
1395 1396 }
1396 1397 chainpayload += tmp;
1397 1398 }
1398 1399
1399 1400 readdata = deltachainspan;
1400 1401 density = 1.0;
1401 1402
1402 1403 if (0 < deltachainspan) {
1403 1404 density = (double)chainpayload / (double)deltachainspan;
1404 1405 }
1405 1406
1406 1407 if (density >= targetdensity) {
1407 1408 result = PyTuple_Pack(1, list_revs);
1408 1409 goto done;
1409 1410 }
1410 1411
1411 1412 /* if chain is too sparse, look for relevant gaps */
1412 1413 gaps = (struct Gap *)calloc(num_revs, sizeof(struct Gap));
1413 1414 if (gaps == NULL) {
1414 1415 PyErr_NoMemory();
1415 1416 goto bail;
1416 1417 }
1417 1418
1418 1419 previous_end = -1;
1419 1420 for (i = 0; i < num_revs; i++) {
1420 1421 int64_t revstart;
1421 1422 int revsize;
1422 1423 revstart = index_get_start(self, revs[i]);
1423 1424 if (revstart < 0) {
1424 1425 goto bail;
1425 1426 };
1426 1427 revsize = index_get_length(self, revs[i]);
1427 1428 if (revsize < 0) {
1428 1429 goto bail;
1429 1430 };
1430 1431 if (revsize == 0) {
1431 1432 continue;
1432 1433 }
1433 1434 if (previous_end >= 0) {
1434 1435 int64_t gapsize = revstart - previous_end;
1435 1436 if (gapsize > mingapsize) {
1436 1437 gaps[num_gaps].size = gapsize;
1437 1438 gaps[num_gaps].idx = i;
1438 1439 num_gaps += 1;
1439 1440 }
1440 1441 }
1441 1442 previous_end = revstart + revsize;
1442 1443 }
1443 1444 if (num_gaps == 0) {
1444 1445 result = PyTuple_Pack(1, list_revs);
1445 1446 goto done;
1446 1447 }
1447 1448 qsort(gaps, num_gaps, sizeof(struct Gap), &gap_compare);
1448 1449
1449 1450 /* Slice the largest gap first, they improve the density the most */
1450 1451 selected_indices =
1451 1452 (Py_ssize_t *)malloc((num_gaps + 1) * sizeof(Py_ssize_t));
1452 1453 if (selected_indices == NULL) {
1453 1454 PyErr_NoMemory();
1454 1455 goto bail;
1455 1456 }
1456 1457
1457 1458 for (i = num_gaps - 1; i >= 0; i--) {
1458 1459 selected_indices[num_selected] = gaps[i].idx;
1459 1460 readdata -= gaps[i].size;
1460 1461 num_selected += 1;
1461 1462 if (readdata <= 0) {
1462 1463 density = 1.0;
1463 1464 } else {
1464 1465 density = (double)chainpayload / (double)readdata;
1465 1466 }
1466 1467 if (density >= targetdensity) {
1467 1468 break;
1468 1469 }
1469 1470 }
1470 1471 qsort(selected_indices, num_selected, sizeof(Py_ssize_t),
1471 1472 &Py_ssize_t_compare);
1472 1473
1473 1474 /* create the resulting slice */
1474 1475 allchunks = PyList_New(0);
1475 1476 if (allchunks == NULL) {
1476 1477 goto bail;
1477 1478 }
1478 1479 previdx = 0;
1479 1480 selected_indices[num_selected] = num_revs;
1480 1481 for (i = 0; i <= num_selected; i++) {
1481 1482 Py_ssize_t idx = selected_indices[i];
1482 1483 Py_ssize_t endidx = trim_endidx(self, revs, previdx, idx);
1483 1484 if (endidx < 0) {
1484 1485 goto bail;
1485 1486 }
1486 1487 if (previdx < endidx) {
1487 1488 chunk = PyList_GetSlice(list_revs, previdx, endidx);
1488 1489 if (chunk == NULL) {
1489 1490 goto bail;
1490 1491 }
1491 1492 if (PyList_Append(allchunks, chunk) == -1) {
1492 1493 goto bail;
1493 1494 }
1494 1495 Py_DECREF(chunk);
1495 1496 chunk = NULL;
1496 1497 }
1497 1498 previdx = idx;
1498 1499 }
1499 1500 result = allchunks;
1500 1501 goto done;
1501 1502
1502 1503 bail:
1503 1504 Py_XDECREF(allchunks);
1504 1505 Py_XDECREF(chunk);
1505 1506 done:
1506 1507 free(revs);
1507 1508 free(gaps);
1508 1509 free(selected_indices);
1509 1510 return result;
1510 1511 }
1511 1512
1512 1513 static inline int nt_level(const char *node, Py_ssize_t level)
1513 1514 {
1514 1515 int v = node[level >> 1];
1515 1516 if (!(level & 1))
1516 1517 v >>= 4;
1517 1518 return v & 0xf;
1518 1519 }
1519 1520
1520 1521 /*
1521 1522 * Return values:
1522 1523 *
1523 1524 * -4: match is ambiguous (multiple candidates)
1524 1525 * -2: not found
1525 1526 * rest: valid rev
1526 1527 */
1527 1528 static int nt_find(nodetree *self, const char *node, Py_ssize_t nodelen,
1528 1529 int hex)
1529 1530 {
1530 1531 int (*getnybble)(const char *, Py_ssize_t) = hex ? hexdigit : nt_level;
1531 1532 int level, maxlevel, off;
1532 1533
1533 1534 if (nodelen == 20 && node[0] == '\0' && memcmp(node, nullid, 20) == 0)
1534 1535 return -1;
1535 1536
1536 1537 if (hex)
1537 1538 maxlevel = nodelen > 40 ? 40 : (int)nodelen;
1538 1539 else
1539 1540 maxlevel = nodelen > 20 ? 40 : ((int)nodelen * 2);
1540 1541
1541 1542 for (level = off = 0; level < maxlevel; level++) {
1542 1543 int k = getnybble(node, level);
1543 1544 nodetreenode *n = &self->nodes[off];
1544 1545 int v = n->children[k];
1545 1546
1546 1547 if (v < 0) {
1547 1548 const char *n;
1548 1549 Py_ssize_t i;
1549 1550
1550 1551 v = -(v + 2);
1551 1552 n = index_node(self->index, v);
1552 1553 if (n == NULL)
1553 1554 return -2;
1554 1555 for (i = level; i < maxlevel; i++)
1555 1556 if (getnybble(node, i) != nt_level(n, i))
1556 1557 return -2;
1557 1558 return v;
1558 1559 }
1559 1560 if (v == 0)
1560 1561 return -2;
1561 1562 off = v;
1562 1563 }
1563 1564 /* multiple matches against an ambiguous prefix */
1564 1565 return -4;
1565 1566 }
1566 1567
1567 1568 static int nt_new(nodetree *self)
1568 1569 {
1569 1570 if (self->length == self->capacity) {
1570 1571 unsigned newcapacity;
1571 1572 nodetreenode *newnodes;
1572 1573 newcapacity = self->capacity * 2;
1573 1574 if (newcapacity >= INT_MAX / sizeof(nodetreenode)) {
1574 1575 PyErr_SetString(PyExc_MemoryError,
1575 1576 "overflow in nt_new");
1576 1577 return -1;
1577 1578 }
1578 1579 newnodes =
1579 1580 realloc(self->nodes, newcapacity * sizeof(nodetreenode));
1580 1581 if (newnodes == NULL) {
1581 1582 PyErr_SetString(PyExc_MemoryError, "out of memory");
1582 1583 return -1;
1583 1584 }
1584 1585 self->capacity = newcapacity;
1585 1586 self->nodes = newnodes;
1586 1587 memset(&self->nodes[self->length], 0,
1587 1588 sizeof(nodetreenode) * (self->capacity - self->length));
1588 1589 }
1589 1590 return self->length++;
1590 1591 }
1591 1592
1592 1593 static int nt_insert(nodetree *self, const char *node, int rev)
1593 1594 {
1594 1595 int level = 0;
1595 1596 int off = 0;
1596 1597
1597 1598 while (level < 40) {
1598 1599 int k = nt_level(node, level);
1599 1600 nodetreenode *n;
1600 1601 int v;
1601 1602
1602 1603 n = &self->nodes[off];
1603 1604 v = n->children[k];
1604 1605
1605 1606 if (v == 0) {
1606 1607 n->children[k] = -rev - 2;
1607 1608 return 0;
1608 1609 }
1609 1610 if (v < 0) {
1610 1611 const char *oldnode =
1611 1612 index_node_existing(self->index, -(v + 2));
1612 1613 int noff;
1613 1614
1614 1615 if (oldnode == NULL)
1615 1616 return -1;
1616 1617 if (!memcmp(oldnode, node, 20)) {
1617 1618 n->children[k] = -rev - 2;
1618 1619 return 0;
1619 1620 }
1620 1621 noff = nt_new(self);
1621 1622 if (noff == -1)
1622 1623 return -1;
1623 1624 /* self->nodes may have been changed by realloc */
1624 1625 self->nodes[off].children[k] = noff;
1625 1626 off = noff;
1626 1627 n = &self->nodes[off];
1627 1628 n->children[nt_level(oldnode, ++level)] = v;
1628 1629 if (level > self->depth)
1629 1630 self->depth = level;
1630 1631 self->splits += 1;
1631 1632 } else {
1632 1633 level += 1;
1633 1634 off = v;
1634 1635 }
1635 1636 }
1636 1637
1637 1638 return -1;
1638 1639 }
1639 1640
1640 1641 static PyObject *ntobj_insert(nodetreeObject *self, PyObject *args)
1641 1642 {
1642 1643 Py_ssize_t rev;
1643 1644 const char *node;
1644 1645 Py_ssize_t length;
1645 1646 if (!PyArg_ParseTuple(args, "n", &rev))
1646 1647 return NULL;
1647 1648 length = index_length(self->nt.index);
1648 1649 if (rev < 0 || rev >= length) {
1649 1650 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
1650 1651 return NULL;
1651 1652 }
1652 1653 node = index_node_existing(self->nt.index, rev);
1653 1654 if (nt_insert(&self->nt, node, (int)rev) == -1)
1654 1655 return NULL;
1655 1656 Py_RETURN_NONE;
1656 1657 }
1657 1658
1658 1659 static int nt_delete_node(nodetree *self, const char *node)
1659 1660 {
1660 1661 /* rev==-2 happens to get encoded as 0, which is interpreted as not set
1661 1662 */
1662 1663 return nt_insert(self, node, -2);
1663 1664 }
1664 1665
1665 1666 static int nt_init(nodetree *self, indexObject *index, unsigned capacity)
1666 1667 {
1667 1668 /* Initialize before overflow-checking to avoid nt_dealloc() crash. */
1668 1669 self->nodes = NULL;
1669 1670
1670 1671 self->index = index;
1671 1672 /* The input capacity is in terms of revisions, while the field is in
1672 1673 * terms of nodetree nodes. */
1673 1674 self->capacity = (capacity < 4 ? 4 : capacity / 2);
1674 1675 self->depth = 0;
1675 1676 self->splits = 0;
1676 1677 if ((size_t)self->capacity > INT_MAX / sizeof(nodetreenode)) {
1677 1678 PyErr_SetString(PyExc_ValueError, "overflow in init_nt");
1678 1679 return -1;
1679 1680 }
1680 1681 self->nodes = calloc(self->capacity, sizeof(nodetreenode));
1681 1682 if (self->nodes == NULL) {
1682 1683 PyErr_NoMemory();
1683 1684 return -1;
1684 1685 }
1685 1686 self->length = 1;
1686 1687 return 0;
1687 1688 }
1688 1689
1689 1690 static int ntobj_init(nodetreeObject *self, PyObject *args)
1690 1691 {
1691 1692 PyObject *index;
1692 1693 unsigned capacity;
1693 1694 if (!PyArg_ParseTuple(args, "O!I", &HgRevlogIndex_Type, &index,
1694 1695 &capacity))
1695 1696 return -1;
1696 1697 Py_INCREF(index);
1697 1698 return nt_init(&self->nt, (indexObject *)index, capacity);
1698 1699 }
1699 1700
1700 1701 static int nt_partialmatch(nodetree *self, const char *node, Py_ssize_t nodelen)
1701 1702 {
1702 1703 return nt_find(self, node, nodelen, 1);
1703 1704 }
1704 1705
1705 1706 /*
1706 1707 * Find the length of the shortest unique prefix of node.
1707 1708 *
1708 1709 * Return values:
1709 1710 *
1710 1711 * -3: error (exception set)
1711 1712 * -2: not found (no exception set)
1712 1713 * rest: length of shortest prefix
1713 1714 */
1714 1715 static int nt_shortest(nodetree *self, const char *node)
1715 1716 {
1716 1717 int level, off;
1717 1718
1718 1719 for (level = off = 0; level < 40; level++) {
1719 1720 int k, v;
1720 1721 nodetreenode *n = &self->nodes[off];
1721 1722 k = nt_level(node, level);
1722 1723 v = n->children[k];
1723 1724 if (v < 0) {
1724 1725 const char *n;
1725 1726 v = -(v + 2);
1726 1727 n = index_node_existing(self->index, v);
1727 1728 if (n == NULL)
1728 1729 return -3;
1729 1730 if (memcmp(node, n, 20) != 0)
1730 1731 /*
1731 1732 * Found a unique prefix, but it wasn't for the
1732 1733 * requested node (i.e the requested node does
1733 1734 * not exist).
1734 1735 */
1735 1736 return -2;
1736 1737 return level + 1;
1737 1738 }
1738 1739 if (v == 0)
1739 1740 return -2;
1740 1741 off = v;
1741 1742 }
1742 1743 /*
1743 1744 * The node was still not unique after 40 hex digits, so this won't
1744 1745 * happen. Also, if we get here, then there's a programming error in
1745 1746 * this file that made us insert a node longer than 40 hex digits.
1746 1747 */
1747 1748 PyErr_SetString(PyExc_Exception, "broken node tree");
1748 1749 return -3;
1749 1750 }
1750 1751
1751 1752 static PyObject *ntobj_shortest(nodetreeObject *self, PyObject *args)
1752 1753 {
1753 1754 PyObject *val;
1754 1755 char *node;
1755 1756 int length;
1756 1757
1757 1758 if (!PyArg_ParseTuple(args, "O", &val))
1758 1759 return NULL;
1759 1760 if (node_check(val, &node) == -1)
1760 1761 return NULL;
1761 1762
1762 1763 length = nt_shortest(&self->nt, node);
1763 1764 if (length == -3)
1764 1765 return NULL;
1765 1766 if (length == -2) {
1766 1767 raise_revlog_error();
1767 1768 return NULL;
1768 1769 }
1769 1770 return PyInt_FromLong(length);
1770 1771 }
1771 1772
1772 1773 static void nt_dealloc(nodetree *self)
1773 1774 {
1774 1775 free(self->nodes);
1775 1776 self->nodes = NULL;
1776 1777 }
1777 1778
1778 1779 static void ntobj_dealloc(nodetreeObject *self)
1779 1780 {
1780 1781 Py_XDECREF(self->nt.index);
1781 1782 nt_dealloc(&self->nt);
1782 1783 PyObject_Del(self);
1783 1784 }
1784 1785
1785 1786 static PyMethodDef ntobj_methods[] = {
1786 1787 {"insert", (PyCFunction)ntobj_insert, METH_VARARGS,
1787 1788 "insert an index entry"},
1788 1789 {"shortest", (PyCFunction)ntobj_shortest, METH_VARARGS,
1789 1790 "find length of shortest hex nodeid of a binary ID"},
1790 1791 {NULL} /* Sentinel */
1791 1792 };
1792 1793
1793 1794 static PyTypeObject nodetreeType = {
1794 1795 PyVarObject_HEAD_INIT(NULL, 0) /* header */
1795 1796 "parsers.nodetree", /* tp_name */
1796 1797 sizeof(nodetreeObject), /* tp_basicsize */
1797 1798 0, /* tp_itemsize */
1798 1799 (destructor)ntobj_dealloc, /* tp_dealloc */
1799 1800 0, /* tp_print */
1800 1801 0, /* tp_getattr */
1801 1802 0, /* tp_setattr */
1802 1803 0, /* tp_compare */
1803 1804 0, /* tp_repr */
1804 1805 0, /* tp_as_number */
1805 1806 0, /* tp_as_sequence */
1806 1807 0, /* tp_as_mapping */
1807 1808 0, /* tp_hash */
1808 1809 0, /* tp_call */
1809 1810 0, /* tp_str */
1810 1811 0, /* tp_getattro */
1811 1812 0, /* tp_setattro */
1812 1813 0, /* tp_as_buffer */
1813 1814 Py_TPFLAGS_DEFAULT, /* tp_flags */
1814 1815 "nodetree", /* tp_doc */
1815 1816 0, /* tp_traverse */
1816 1817 0, /* tp_clear */
1817 1818 0, /* tp_richcompare */
1818 1819 0, /* tp_weaklistoffset */
1819 1820 0, /* tp_iter */
1820 1821 0, /* tp_iternext */
1821 1822 ntobj_methods, /* tp_methods */
1822 1823 0, /* tp_members */
1823 1824 0, /* tp_getset */
1824 1825 0, /* tp_base */
1825 1826 0, /* tp_dict */
1826 1827 0, /* tp_descr_get */
1827 1828 0, /* tp_descr_set */
1828 1829 0, /* tp_dictoffset */
1829 1830 (initproc)ntobj_init, /* tp_init */
1830 1831 0, /* tp_alloc */
1831 1832 };
1832 1833
1833 1834 static int index_init_nt(indexObject *self)
1834 1835 {
1835 1836 if (!self->ntinitialized) {
1836 1837 if (nt_init(&self->nt, self, (int)self->raw_length) == -1) {
1837 1838 nt_dealloc(&self->nt);
1838 1839 return -1;
1839 1840 }
1840 1841 if (nt_insert(&self->nt, nullid, -1) == -1) {
1841 1842 nt_dealloc(&self->nt);
1842 1843 return -1;
1843 1844 }
1844 1845 self->ntinitialized = 1;
1845 1846 self->ntrev = (int)index_length(self);
1846 1847 self->ntlookups = 1;
1847 1848 self->ntmisses = 0;
1848 1849 }
1849 1850 return 0;
1850 1851 }
1851 1852
1852 1853 /*
1853 1854 * Return values:
1854 1855 *
1855 1856 * -3: error (exception set)
1856 1857 * -2: not found (no exception set)
1857 1858 * rest: valid rev
1858 1859 */
1859 1860 static int index_find_node(indexObject *self, const char *node,
1860 1861 Py_ssize_t nodelen)
1861 1862 {
1862 1863 int rev;
1863 1864
1864 1865 if (index_init_nt(self) == -1)
1865 1866 return -3;
1866 1867
1867 1868 self->ntlookups++;
1868 1869 rev = nt_find(&self->nt, node, nodelen, 0);
1869 1870 if (rev >= -1)
1870 1871 return rev;
1871 1872
1872 1873 /*
1873 1874 * For the first handful of lookups, we scan the entire index,
1874 1875 * and cache only the matching nodes. This optimizes for cases
1875 1876 * like "hg tip", where only a few nodes are accessed.
1876 1877 *
1877 1878 * After that, we cache every node we visit, using a single
1878 1879 * scan amortized over multiple lookups. This gives the best
1879 1880 * bulk performance, e.g. for "hg log".
1880 1881 */
1881 1882 if (self->ntmisses++ < 4) {
1882 1883 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1883 1884 const char *n = index_node_existing(self, rev);
1884 1885 if (n == NULL)
1885 1886 return -3;
1886 1887 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1887 1888 if (nt_insert(&self->nt, n, rev) == -1)
1888 1889 return -3;
1889 1890 break;
1890 1891 }
1891 1892 }
1892 1893 } else {
1893 1894 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1894 1895 const char *n = index_node_existing(self, rev);
1895 1896 if (n == NULL)
1896 1897 return -3;
1897 1898 if (nt_insert(&self->nt, n, rev) == -1) {
1898 1899 self->ntrev = rev + 1;
1899 1900 return -3;
1900 1901 }
1901 1902 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1902 1903 break;
1903 1904 }
1904 1905 }
1905 1906 self->ntrev = rev;
1906 1907 }
1907 1908
1908 1909 if (rev >= 0)
1909 1910 return rev;
1910 1911 return -2;
1911 1912 }
1912 1913
1913 1914 static PyObject *index_getitem(indexObject *self, PyObject *value)
1914 1915 {
1915 1916 char *node;
1916 1917 int rev;
1917 1918
1918 1919 if (PyInt_Check(value)) {
1919 1920 long idx;
1920 1921 if (!pylong_to_long(value, &idx)) {
1921 1922 return NULL;
1922 1923 }
1923 1924 return index_get(self, idx);
1924 1925 }
1925 1926
1926 1927 if (node_check(value, &node) == -1)
1927 1928 return NULL;
1928 1929 rev = index_find_node(self, node, 20);
1929 1930 if (rev >= -1)
1930 1931 return PyInt_FromLong(rev);
1931 1932 if (rev == -2)
1932 1933 raise_revlog_error();
1933 1934 return NULL;
1934 1935 }
1935 1936
1936 1937 /*
1937 1938 * Fully populate the radix tree.
1938 1939 */
1939 1940 static int index_populate_nt(indexObject *self)
1940 1941 {
1941 1942 int rev;
1942 1943 if (self->ntrev > 0) {
1943 1944 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1944 1945 const char *n = index_node_existing(self, rev);
1945 1946 if (n == NULL)
1946 1947 return -1;
1947 1948 if (nt_insert(&self->nt, n, rev) == -1)
1948 1949 return -1;
1949 1950 }
1950 1951 self->ntrev = -1;
1951 1952 }
1952 1953 return 0;
1953 1954 }
1954 1955
1955 1956 static PyObject *index_partialmatch(indexObject *self, PyObject *args)
1956 1957 {
1957 1958 const char *fullnode;
1958 1959 Py_ssize_t nodelen;
1959 1960 char *node;
1960 1961 int rev, i;
1961 1962
1962 1963 if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &node, &nodelen))
1963 1964 return NULL;
1964 1965
1965 1966 if (nodelen < 1) {
1966 1967 PyErr_SetString(PyExc_ValueError, "key too short");
1967 1968 return NULL;
1968 1969 }
1969 1970
1970 1971 if (nodelen > 40) {
1971 1972 PyErr_SetString(PyExc_ValueError, "key too long");
1972 1973 return NULL;
1973 1974 }
1974 1975
1975 1976 for (i = 0; i < nodelen; i++)
1976 1977 hexdigit(node, i);
1977 1978 if (PyErr_Occurred()) {
1978 1979 /* input contains non-hex characters */
1979 1980 PyErr_Clear();
1980 1981 Py_RETURN_NONE;
1981 1982 }
1982 1983
1983 1984 if (index_init_nt(self) == -1)
1984 1985 return NULL;
1985 1986 if (index_populate_nt(self) == -1)
1986 1987 return NULL;
1987 1988 rev = nt_partialmatch(&self->nt, node, nodelen);
1988 1989
1989 1990 switch (rev) {
1990 1991 case -4:
1991 1992 raise_revlog_error();
1992 1993 return NULL;
1993 1994 case -2:
1994 1995 Py_RETURN_NONE;
1995 1996 case -1:
1996 1997 return PyBytes_FromStringAndSize(nullid, 20);
1997 1998 }
1998 1999
1999 2000 fullnode = index_node_existing(self, rev);
2000 2001 if (fullnode == NULL) {
2001 2002 return NULL;
2002 2003 }
2003 2004 return PyBytes_FromStringAndSize(fullnode, 20);
2004 2005 }
2005 2006
2006 2007 static PyObject *index_shortest(indexObject *self, PyObject *args)
2007 2008 {
2008 2009 PyObject *val;
2009 2010 char *node;
2010 2011 int length;
2011 2012
2012 2013 if (!PyArg_ParseTuple(args, "O", &val))
2013 2014 return NULL;
2014 2015 if (node_check(val, &node) == -1)
2015 2016 return NULL;
2016 2017
2017 2018 self->ntlookups++;
2018 2019 if (index_init_nt(self) == -1)
2019 2020 return NULL;
2020 2021 if (index_populate_nt(self) == -1)
2021 2022 return NULL;
2022 2023 length = nt_shortest(&self->nt, node);
2023 2024 if (length == -3)
2024 2025 return NULL;
2025 2026 if (length == -2) {
2026 2027 raise_revlog_error();
2027 2028 return NULL;
2028 2029 }
2029 2030 return PyInt_FromLong(length);
2030 2031 }
2031 2032
2032 2033 static PyObject *index_m_get(indexObject *self, PyObject *args)
2033 2034 {
2034 2035 PyObject *val;
2035 2036 char *node;
2036 2037 int rev;
2037 2038
2038 2039 if (!PyArg_ParseTuple(args, "O", &val))
2039 2040 return NULL;
2040 2041 if (node_check(val, &node) == -1)
2041 2042 return NULL;
2042 2043 rev = index_find_node(self, node, 20);
2043 2044 if (rev == -3)
2044 2045 return NULL;
2045 2046 if (rev == -2)
2046 2047 Py_RETURN_NONE;
2047 2048 return PyInt_FromLong(rev);
2048 2049 }
2049 2050
2050 2051 static int index_contains(indexObject *self, PyObject *value)
2051 2052 {
2052 2053 char *node;
2053 2054
2054 2055 if (PyInt_Check(value)) {
2055 2056 long rev;
2056 2057 if (!pylong_to_long(value, &rev)) {
2057 2058 return -1;
2058 2059 }
2059 2060 return rev >= -1 && rev < index_length(self);
2060 2061 }
2061 2062
2062 2063 if (node_check(value, &node) == -1)
2063 2064 return -1;
2064 2065
2065 2066 switch (index_find_node(self, node, 20)) {
2066 2067 case -3:
2067 2068 return -1;
2068 2069 case -2:
2069 2070 return 0;
2070 2071 default:
2071 2072 return 1;
2072 2073 }
2073 2074 }
2074 2075
2075 2076 static PyObject *index_m_has_node(indexObject *self, PyObject *args)
2076 2077 {
2077 2078 int ret = index_contains(self, args);
2078 2079 if (ret < 0)
2079 2080 return NULL;
2080 2081 return PyBool_FromLong((long)ret);
2081 2082 }
2082 2083
2083 2084 static PyObject *index_m_rev(indexObject *self, PyObject *val)
2084 2085 {
2085 2086 char *node;
2086 2087 int rev;
2087 2088
2088 2089 if (node_check(val, &node) == -1)
2089 2090 return NULL;
2090 2091 rev = index_find_node(self, node, 20);
2091 2092 if (rev >= -1)
2092 2093 return PyInt_FromLong(rev);
2093 2094 if (rev == -2)
2094 2095 raise_revlog_error();
2095 2096 return NULL;
2096 2097 }
2097 2098
2098 2099 typedef uint64_t bitmask;
2099 2100
2100 2101 /*
2101 2102 * Given a disjoint set of revs, return all candidates for the
2102 2103 * greatest common ancestor. In revset notation, this is the set
2103 2104 * "heads(::a and ::b and ...)"
2104 2105 */
2105 2106 static PyObject *find_gca_candidates(indexObject *self, const int *revs,
2106 2107 int revcount)
2107 2108 {
2108 2109 const bitmask allseen = (1ull << revcount) - 1;
2109 2110 const bitmask poison = 1ull << revcount;
2110 2111 PyObject *gca = PyList_New(0);
2111 2112 int i, v, interesting;
2112 2113 int maxrev = -1;
2113 2114 bitmask sp;
2114 2115 bitmask *seen;
2115 2116
2116 2117 if (gca == NULL)
2117 2118 return PyErr_NoMemory();
2118 2119
2119 2120 for (i = 0; i < revcount; i++) {
2120 2121 if (revs[i] > maxrev)
2121 2122 maxrev = revs[i];
2122 2123 }
2123 2124
2124 2125 seen = calloc(sizeof(*seen), maxrev + 1);
2125 2126 if (seen == NULL) {
2126 2127 Py_DECREF(gca);
2127 2128 return PyErr_NoMemory();
2128 2129 }
2129 2130
2130 2131 for (i = 0; i < revcount; i++)
2131 2132 seen[revs[i]] = 1ull << i;
2132 2133
2133 2134 interesting = revcount;
2134 2135
2135 2136 for (v = maxrev; v >= 0 && interesting; v--) {
2136 2137 bitmask sv = seen[v];
2137 2138 int parents[2];
2138 2139
2139 2140 if (!sv)
2140 2141 continue;
2141 2142
2142 2143 if (sv < poison) {
2143 2144 interesting -= 1;
2144 2145 if (sv == allseen) {
2145 2146 PyObject *obj = PyInt_FromLong(v);
2146 2147 if (obj == NULL)
2147 2148 goto bail;
2148 2149 if (PyList_Append(gca, obj) == -1) {
2149 2150 Py_DECREF(obj);
2150 2151 goto bail;
2151 2152 }
2152 2153 sv |= poison;
2153 2154 for (i = 0; i < revcount; i++) {
2154 2155 if (revs[i] == v)
2155 2156 goto done;
2156 2157 }
2157 2158 }
2158 2159 }
2159 2160 if (index_get_parents(self, v, parents, maxrev) < 0)
2160 2161 goto bail;
2161 2162
2162 2163 for (i = 0; i < 2; i++) {
2163 2164 int p = parents[i];
2164 2165 if (p == -1)
2165 2166 continue;
2166 2167 sp = seen[p];
2167 2168 if (sv < poison) {
2168 2169 if (sp == 0) {
2169 2170 seen[p] = sv;
2170 2171 interesting++;
2171 2172 } else if (sp != sv)
2172 2173 seen[p] |= sv;
2173 2174 } else {
2174 2175 if (sp && sp < poison)
2175 2176 interesting--;
2176 2177 seen[p] = sv;
2177 2178 }
2178 2179 }
2179 2180 }
2180 2181
2181 2182 done:
2182 2183 free(seen);
2183 2184 return gca;
2184 2185 bail:
2185 2186 free(seen);
2186 2187 Py_XDECREF(gca);
2187 2188 return NULL;
2188 2189 }
2189 2190
2190 2191 /*
2191 2192 * Given a disjoint set of revs, return the subset with the longest
2192 2193 * path to the root.
2193 2194 */
2194 2195 static PyObject *find_deepest(indexObject *self, PyObject *revs)
2195 2196 {
2196 2197 const Py_ssize_t revcount = PyList_GET_SIZE(revs);
2197 2198 static const Py_ssize_t capacity = 24;
2198 2199 int *depth, *interesting = NULL;
2199 2200 int i, j, v, ninteresting;
2200 2201 PyObject *dict = NULL, *keys = NULL;
2201 2202 long *seen = NULL;
2202 2203 int maxrev = -1;
2203 2204 long final;
2204 2205
2205 2206 if (revcount > capacity) {
2206 2207 PyErr_Format(PyExc_OverflowError,
2207 2208 "bitset size (%ld) > capacity (%ld)",
2208 2209 (long)revcount, (long)capacity);
2209 2210 return NULL;
2210 2211 }
2211 2212
2212 2213 for (i = 0; i < revcount; i++) {
2213 2214 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
2214 2215 if (n > maxrev)
2215 2216 maxrev = n;
2216 2217 }
2217 2218
2218 2219 depth = calloc(sizeof(*depth), maxrev + 1);
2219 2220 if (depth == NULL)
2220 2221 return PyErr_NoMemory();
2221 2222
2222 2223 seen = calloc(sizeof(*seen), maxrev + 1);
2223 2224 if (seen == NULL) {
2224 2225 PyErr_NoMemory();
2225 2226 goto bail;
2226 2227 }
2227 2228
2228 2229 interesting = calloc(sizeof(*interesting), ((size_t)1) << revcount);
2229 2230 if (interesting == NULL) {
2230 2231 PyErr_NoMemory();
2231 2232 goto bail;
2232 2233 }
2233 2234
2234 2235 if (PyList_Sort(revs) == -1)
2235 2236 goto bail;
2236 2237
2237 2238 for (i = 0; i < revcount; i++) {
2238 2239 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
2239 2240 long b = 1l << i;
2240 2241 depth[n] = 1;
2241 2242 seen[n] = b;
2242 2243 interesting[b] = 1;
2243 2244 }
2244 2245
2245 2246 /* invariant: ninteresting is the number of non-zero entries in
2246 2247 * interesting. */
2247 2248 ninteresting = (int)revcount;
2248 2249
2249 2250 for (v = maxrev; v >= 0 && ninteresting > 1; v--) {
2250 2251 int dv = depth[v];
2251 2252 int parents[2];
2252 2253 long sv;
2253 2254
2254 2255 if (dv == 0)
2255 2256 continue;
2256 2257
2257 2258 sv = seen[v];
2258 2259 if (index_get_parents(self, v, parents, maxrev) < 0)
2259 2260 goto bail;
2260 2261
2261 2262 for (i = 0; i < 2; i++) {
2262 2263 int p = parents[i];
2263 2264 long sp;
2264 2265 int dp;
2265 2266
2266 2267 if (p == -1)
2267 2268 continue;
2268 2269
2269 2270 dp = depth[p];
2270 2271 sp = seen[p];
2271 2272 if (dp <= dv) {
2272 2273 depth[p] = dv + 1;
2273 2274 if (sp != sv) {
2274 2275 interesting[sv] += 1;
2275 2276 seen[p] = sv;
2276 2277 if (sp) {
2277 2278 interesting[sp] -= 1;
2278 2279 if (interesting[sp] == 0)
2279 2280 ninteresting -= 1;
2280 2281 }
2281 2282 }
2282 2283 } else if (dv == dp - 1) {
2283 2284 long nsp = sp | sv;
2284 2285 if (nsp == sp)
2285 2286 continue;
2286 2287 seen[p] = nsp;
2287 2288 interesting[sp] -= 1;
2288 2289 if (interesting[sp] == 0)
2289 2290 ninteresting -= 1;
2290 2291 if (interesting[nsp] == 0)
2291 2292 ninteresting += 1;
2292 2293 interesting[nsp] += 1;
2293 2294 }
2294 2295 }
2295 2296 interesting[sv] -= 1;
2296 2297 if (interesting[sv] == 0)
2297 2298 ninteresting -= 1;
2298 2299 }
2299 2300
2300 2301 final = 0;
2301 2302 j = ninteresting;
2302 2303 for (i = 0; i < (int)(2 << revcount) && j > 0; i++) {
2303 2304 if (interesting[i] == 0)
2304 2305 continue;
2305 2306 final |= i;
2306 2307 j -= 1;
2307 2308 }
2308 2309 if (final == 0) {
2309 2310 keys = PyList_New(0);
2310 2311 goto bail;
2311 2312 }
2312 2313
2313 2314 dict = PyDict_New();
2314 2315 if (dict == NULL)
2315 2316 goto bail;
2316 2317
2317 2318 for (i = 0; i < revcount; i++) {
2318 2319 PyObject *key;
2319 2320
2320 2321 if ((final & (1 << i)) == 0)
2321 2322 continue;
2322 2323
2323 2324 key = PyList_GET_ITEM(revs, i);
2324 2325 Py_INCREF(key);
2325 2326 Py_INCREF(Py_None);
2326 2327 if (PyDict_SetItem(dict, key, Py_None) == -1) {
2327 2328 Py_DECREF(key);
2328 2329 Py_DECREF(Py_None);
2329 2330 goto bail;
2330 2331 }
2331 2332 }
2332 2333
2333 2334 keys = PyDict_Keys(dict);
2334 2335
2335 2336 bail:
2336 2337 free(depth);
2337 2338 free(seen);
2338 2339 free(interesting);
2339 2340 Py_XDECREF(dict);
2340 2341
2341 2342 return keys;
2342 2343 }
2343 2344
2344 2345 /*
2345 2346 * Given a (possibly overlapping) set of revs, return all the
2346 2347 * common ancestors heads: heads(::args[0] and ::a[1] and ...)
2347 2348 */
2348 2349 static PyObject *index_commonancestorsheads(indexObject *self, PyObject *args)
2349 2350 {
2350 2351 PyObject *ret = NULL;
2351 2352 Py_ssize_t argcount, i, len;
2352 2353 bitmask repeat = 0;
2353 2354 int revcount = 0;
2354 2355 int *revs;
2355 2356
2356 2357 argcount = PySequence_Length(args);
2357 2358 revs = PyMem_Malloc(argcount * sizeof(*revs));
2358 2359 if (argcount > 0 && revs == NULL)
2359 2360 return PyErr_NoMemory();
2360 2361 len = index_length(self);
2361 2362
2362 2363 for (i = 0; i < argcount; i++) {
2363 2364 static const int capacity = 24;
2364 2365 PyObject *obj = PySequence_GetItem(args, i);
2365 2366 bitmask x;
2366 2367 long val;
2367 2368
2368 2369 if (!PyInt_Check(obj)) {
2369 2370 PyErr_SetString(PyExc_TypeError,
2370 2371 "arguments must all be ints");
2371 2372 Py_DECREF(obj);
2372 2373 goto bail;
2373 2374 }
2374 2375 val = PyInt_AsLong(obj);
2375 2376 Py_DECREF(obj);
2376 2377 if (val == -1) {
2377 2378 ret = PyList_New(0);
2378 2379 goto done;
2379 2380 }
2380 2381 if (val < 0 || val >= len) {
2381 2382 PyErr_SetString(PyExc_IndexError, "index out of range");
2382 2383 goto bail;
2383 2384 }
2384 2385 /* this cheesy bloom filter lets us avoid some more
2385 2386 * expensive duplicate checks in the common set-is-disjoint
2386 2387 * case */
2387 2388 x = 1ull << (val & 0x3f);
2388 2389 if (repeat & x) {
2389 2390 int k;
2390 2391 for (k = 0; k < revcount; k++) {
2391 2392 if (val == revs[k])
2392 2393 goto duplicate;
2393 2394 }
2394 2395 } else
2395 2396 repeat |= x;
2396 2397 if (revcount >= capacity) {
2397 2398 PyErr_Format(PyExc_OverflowError,
2398 2399 "bitset size (%d) > capacity (%d)",
2399 2400 revcount, capacity);
2400 2401 goto bail;
2401 2402 }
2402 2403 revs[revcount++] = (int)val;
2403 2404 duplicate:;
2404 2405 }
2405 2406
2406 2407 if (revcount == 0) {
2407 2408 ret = PyList_New(0);
2408 2409 goto done;
2409 2410 }
2410 2411 if (revcount == 1) {
2411 2412 PyObject *obj;
2412 2413 ret = PyList_New(1);
2413 2414 if (ret == NULL)
2414 2415 goto bail;
2415 2416 obj = PyInt_FromLong(revs[0]);
2416 2417 if (obj == NULL)
2417 2418 goto bail;
2418 2419 PyList_SET_ITEM(ret, 0, obj);
2419 2420 goto done;
2420 2421 }
2421 2422
2422 2423 ret = find_gca_candidates(self, revs, revcount);
2423 2424 if (ret == NULL)
2424 2425 goto bail;
2425 2426
2426 2427 done:
2427 2428 PyMem_Free(revs);
2428 2429 return ret;
2429 2430
2430 2431 bail:
2431 2432 PyMem_Free(revs);
2432 2433 Py_XDECREF(ret);
2433 2434 return NULL;
2434 2435 }
2435 2436
2436 2437 /*
2437 2438 * Given a (possibly overlapping) set of revs, return the greatest
2438 2439 * common ancestors: those with the longest path to the root.
2439 2440 */
2440 2441 static PyObject *index_ancestors(indexObject *self, PyObject *args)
2441 2442 {
2442 2443 PyObject *ret;
2443 2444 PyObject *gca = index_commonancestorsheads(self, args);
2444 2445 if (gca == NULL)
2445 2446 return NULL;
2446 2447
2447 2448 if (PyList_GET_SIZE(gca) <= 1) {
2448 2449 return gca;
2449 2450 }
2450 2451
2451 2452 ret = find_deepest(self, gca);
2452 2453 Py_DECREF(gca);
2453 2454 return ret;
2454 2455 }
2455 2456
2456 2457 /*
2457 2458 * Invalidate any trie entries introduced by added revs.
2458 2459 */
2459 2460 static void index_invalidate_added(indexObject *self, Py_ssize_t start)
2460 2461 {
2461 2462 Py_ssize_t i, len = PyList_GET_SIZE(self->added);
2462 2463
2463 2464 for (i = start; i < len; i++) {
2464 2465 PyObject *tuple = PyList_GET_ITEM(self->added, i);
2465 2466 PyObject *node = PyTuple_GET_ITEM(tuple, 7);
2466 2467
2467 2468 nt_delete_node(&self->nt, PyBytes_AS_STRING(node));
2468 2469 }
2469 2470
2470 2471 if (start == 0)
2471 2472 Py_CLEAR(self->added);
2472 2473 }
2473 2474
2474 2475 /*
2475 2476 * Delete a numeric range of revs, which must be at the end of the
2476 2477 * range.
2477 2478 */
2478 2479 static int index_slice_del(indexObject *self, PyObject *item)
2479 2480 {
2480 2481 Py_ssize_t start, stop, step, slicelength;
2481 2482 Py_ssize_t length = index_length(self) + 1;
2482 2483 int ret = 0;
2483 2484
2484 2485 /* Argument changed from PySliceObject* to PyObject* in Python 3. */
2485 2486 #ifdef IS_PY3K
2486 2487 if (PySlice_GetIndicesEx(item, length, &start, &stop, &step,
2487 2488 &slicelength) < 0)
2488 2489 #else
2489 2490 if (PySlice_GetIndicesEx((PySliceObject *)item, length, &start, &stop,
2490 2491 &step, &slicelength) < 0)
2491 2492 #endif
2492 2493 return -1;
2493 2494
2494 2495 if (slicelength <= 0)
2495 2496 return 0;
2496 2497
2497 2498 if ((step < 0 && start < stop) || (step > 0 && start > stop))
2498 2499 stop = start;
2499 2500
2500 2501 if (step < 0) {
2501 2502 stop = start + 1;
2502 2503 start = stop + step * (slicelength - 1) - 1;
2503 2504 step = -step;
2504 2505 }
2505 2506
2506 2507 if (step != 1) {
2507 2508 PyErr_SetString(PyExc_ValueError,
2508 2509 "revlog index delete requires step size of 1");
2509 2510 return -1;
2510 2511 }
2511 2512
2512 2513 if (stop != length - 1) {
2513 2514 PyErr_SetString(PyExc_IndexError,
2514 2515 "revlog index deletion indices are invalid");
2515 2516 return -1;
2516 2517 }
2517 2518
2518 2519 if (start < self->length) {
2519 2520 if (self->ntinitialized) {
2520 2521 Py_ssize_t i;
2521 2522
2522 2523 for (i = start; i < self->length; i++) {
2523 2524 const char *node = index_node_existing(self, i);
2524 2525 if (node == NULL)
2525 2526 return -1;
2526 2527
2527 2528 nt_delete_node(&self->nt, node);
2528 2529 }
2529 2530 if (self->added)
2530 2531 index_invalidate_added(self, 0);
2531 2532 if (self->ntrev > start)
2532 2533 self->ntrev = (int)start;
2533 2534 } else if (self->added) {
2534 2535 Py_CLEAR(self->added);
2535 2536 }
2536 2537
2537 2538 self->length = start;
2538 2539 if (start < self->raw_length) {
2539 2540 if (self->cache) {
2540 2541 Py_ssize_t i;
2541 2542 for (i = start; i < self->raw_length; i++)
2542 2543 Py_CLEAR(self->cache[i]);
2543 2544 }
2544 2545 self->raw_length = start;
2545 2546 }
2546 2547 goto done;
2547 2548 }
2548 2549
2549 2550 if (self->ntinitialized) {
2550 2551 index_invalidate_added(self, start - self->length);
2551 2552 if (self->ntrev > start)
2552 2553 self->ntrev = (int)start;
2553 2554 }
2554 2555 if (self->added)
2555 2556 ret = PyList_SetSlice(self->added, start - self->length,
2556 2557 PyList_GET_SIZE(self->added), NULL);
2557 2558 done:
2558 2559 Py_CLEAR(self->headrevs);
2559 2560 return ret;
2560 2561 }
2561 2562
2562 2563 /*
2563 2564 * Supported ops:
2564 2565 *
2565 2566 * slice deletion
2566 2567 * string assignment (extend node->rev mapping)
2567 2568 * string deletion (shrink node->rev mapping)
2568 2569 */
2569 2570 static int index_assign_subscript(indexObject *self, PyObject *item,
2570 2571 PyObject *value)
2571 2572 {
2572 2573 char *node;
2573 2574 long rev;
2574 2575
2575 2576 if (PySlice_Check(item) && value == NULL)
2576 2577 return index_slice_del(self, item);
2577 2578
2578 2579 if (node_check(item, &node) == -1)
2579 2580 return -1;
2580 2581
2581 2582 if (value == NULL)
2582 2583 return self->ntinitialized ? nt_delete_node(&self->nt, node)
2583 2584 : 0;
2584 2585 rev = PyInt_AsLong(value);
2585 2586 if (rev > INT_MAX || rev < 0) {
2586 2587 if (!PyErr_Occurred())
2587 2588 PyErr_SetString(PyExc_ValueError, "rev out of range");
2588 2589 return -1;
2589 2590 }
2590 2591
2591 2592 if (index_init_nt(self) == -1)
2592 2593 return -1;
2593 2594 return nt_insert(&self->nt, node, (int)rev);
2594 2595 }
2595 2596
2596 2597 /*
2597 2598 * Find all RevlogNG entries in an index that has inline data. Update
2598 2599 * the optional "offsets" table with those entries.
2599 2600 */
2600 2601 static Py_ssize_t inline_scan(indexObject *self, const char **offsets)
2601 2602 {
2602 2603 const char *data = (const char *)self->buf.buf;
2603 2604 Py_ssize_t pos = 0;
2604 2605 Py_ssize_t end = self->buf.len;
2605 2606 long incr = v1_hdrsize;
2606 2607 Py_ssize_t len = 0;
2607 2608
2608 2609 while (pos + v1_hdrsize <= end && pos >= 0) {
2609 2610 uint32_t comp_len;
2610 2611 /* 3rd element of header is length of compressed inline data */
2611 2612 comp_len = getbe32(data + pos + 8);
2612 2613 incr = v1_hdrsize + comp_len;
2613 2614 if (offsets)
2614 2615 offsets[len] = data + pos;
2615 2616 len++;
2616 2617 pos += incr;
2617 2618 }
2618 2619
2619 2620 if (pos != end) {
2620 2621 if (!PyErr_Occurred())
2621 2622 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2622 2623 return -1;
2623 2624 }
2624 2625
2625 2626 return len;
2626 2627 }
2627 2628
2628 2629 static int index_init(indexObject *self, PyObject *args)
2629 2630 {
2630 2631 PyObject *data_obj, *inlined_obj;
2631 2632 Py_ssize_t size;
2632 2633
2633 2634 /* Initialize before argument-checking to avoid index_dealloc() crash.
2634 2635 */
2635 2636 self->raw_length = 0;
2636 2637 self->added = NULL;
2637 2638 self->cache = NULL;
2638 2639 self->data = NULL;
2639 2640 memset(&self->buf, 0, sizeof(self->buf));
2640 2641 self->headrevs = NULL;
2641 2642 self->filteredrevs = Py_None;
2642 2643 Py_INCREF(Py_None);
2643 2644 self->ntinitialized = 0;
2644 2645 self->offsets = NULL;
2645 2646
2646 2647 if (!PyArg_ParseTuple(args, "OO", &data_obj, &inlined_obj))
2647 2648 return -1;
2648 2649 if (!PyObject_CheckBuffer(data_obj)) {
2649 2650 PyErr_SetString(PyExc_TypeError,
2650 2651 "data does not support buffer interface");
2651 2652 return -1;
2652 2653 }
2653 2654
2654 2655 if (PyObject_GetBuffer(data_obj, &self->buf, PyBUF_SIMPLE) == -1)
2655 2656 return -1;
2656 2657 size = self->buf.len;
2657 2658
2658 2659 self->inlined = inlined_obj && PyObject_IsTrue(inlined_obj);
2659 2660 self->data = data_obj;
2660 2661
2661 2662 self->ntlookups = self->ntmisses = 0;
2662 2663 self->ntrev = -1;
2663 2664 Py_INCREF(self->data);
2664 2665
2665 2666 if (self->inlined) {
2666 2667 Py_ssize_t len = inline_scan(self, NULL);
2667 2668 if (len == -1)
2668 2669 goto bail;
2669 2670 self->raw_length = len;
2670 2671 self->length = len;
2671 2672 } else {
2672 2673 if (size % v1_hdrsize) {
2673 2674 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2674 2675 goto bail;
2675 2676 }
2676 2677 self->raw_length = size / v1_hdrsize;
2677 2678 self->length = self->raw_length;
2678 2679 }
2679 2680
2680 2681 return 0;
2681 2682 bail:
2682 2683 return -1;
2683 2684 }
2684 2685
2685 2686 static PyObject *index_nodemap(indexObject *self)
2686 2687 {
2687 2688 Py_INCREF(self);
2688 2689 return (PyObject *)self;
2689 2690 }
2690 2691
2691 2692 static void _index_clearcaches(indexObject *self)
2692 2693 {
2693 2694 if (self->cache) {
2694 2695 Py_ssize_t i;
2695 2696
2696 2697 for (i = 0; i < self->raw_length; i++)
2697 2698 Py_CLEAR(self->cache[i]);
2698 2699 free(self->cache);
2699 2700 self->cache = NULL;
2700 2701 }
2701 2702 if (self->offsets) {
2702 2703 PyMem_Free((void *)self->offsets);
2703 2704 self->offsets = NULL;
2704 2705 }
2705 2706 if (self->ntinitialized) {
2706 2707 nt_dealloc(&self->nt);
2707 2708 }
2708 2709 self->ntinitialized = 0;
2709 2710 Py_CLEAR(self->headrevs);
2710 2711 }
2711 2712
2712 2713 static PyObject *index_clearcaches(indexObject *self)
2713 2714 {
2714 2715 _index_clearcaches(self);
2715 2716 self->ntrev = -1;
2716 2717 self->ntlookups = self->ntmisses = 0;
2717 2718 Py_RETURN_NONE;
2718 2719 }
2719 2720
2720 2721 static void index_dealloc(indexObject *self)
2721 2722 {
2722 2723 _index_clearcaches(self);
2723 2724 Py_XDECREF(self->filteredrevs);
2724 2725 if (self->buf.buf) {
2725 2726 PyBuffer_Release(&self->buf);
2726 2727 memset(&self->buf, 0, sizeof(self->buf));
2727 2728 }
2728 2729 Py_XDECREF(self->data);
2729 2730 Py_XDECREF(self->added);
2730 2731 PyObject_Del(self);
2731 2732 }
2732 2733
2733 2734 static PySequenceMethods index_sequence_methods = {
2734 2735 (lenfunc)index_length, /* sq_length */
2735 2736 0, /* sq_concat */
2736 2737 0, /* sq_repeat */
2737 2738 (ssizeargfunc)index_get, /* sq_item */
2738 2739 0, /* sq_slice */
2739 2740 0, /* sq_ass_item */
2740 2741 0, /* sq_ass_slice */
2741 2742 (objobjproc)index_contains, /* sq_contains */
2742 2743 };
2743 2744
2744 2745 static PyMappingMethods index_mapping_methods = {
2745 2746 (lenfunc)index_length, /* mp_length */
2746 2747 (binaryfunc)index_getitem, /* mp_subscript */
2747 2748 (objobjargproc)index_assign_subscript, /* mp_ass_subscript */
2748 2749 };
2749 2750
2750 2751 static PyMethodDef index_methods[] = {
2751 2752 {"ancestors", (PyCFunction)index_ancestors, METH_VARARGS,
2752 2753 "return the gca set of the given revs"},
2753 2754 {"commonancestorsheads", (PyCFunction)index_commonancestorsheads,
2754 2755 METH_VARARGS,
2755 2756 "return the heads of the common ancestors of the given revs"},
2756 2757 {"clearcaches", (PyCFunction)index_clearcaches, METH_NOARGS,
2757 2758 "clear the index caches"},
2758 2759 {"get", (PyCFunction)index_m_get, METH_VARARGS, "get an index entry"},
2759 2760 {"get_rev", (PyCFunction)index_m_get, METH_VARARGS,
2760 2761 "return `rev` associated with a node or None"},
2761 2762 {"has_node", (PyCFunction)index_m_has_node, METH_O,
2762 2763 "return True if the node exist in the index"},
2763 2764 {"rev", (PyCFunction)index_m_rev, METH_O,
2764 2765 "return `rev` associated with a node or raise RevlogError"},
2765 2766 {"computephasesmapsets", (PyCFunction)compute_phases_map_sets, METH_VARARGS,
2766 2767 "compute phases"},
2767 2768 {"reachableroots2", (PyCFunction)reachableroots2, METH_VARARGS,
2768 2769 "reachableroots"},
2769 2770 {"headrevs", (PyCFunction)index_headrevs, METH_VARARGS,
2770 2771 "get head revisions"}, /* Can do filtering since 3.2 */
2771 2772 {"headrevsfiltered", (PyCFunction)index_headrevs, METH_VARARGS,
2772 2773 "get filtered head revisions"}, /* Can always do filtering */
2773 2774 {"issnapshot", (PyCFunction)index_issnapshot, METH_O,
2774 2775 "True if the object is a snapshot"},
2775 2776 {"findsnapshots", (PyCFunction)index_findsnapshots, METH_VARARGS,
2776 2777 "Gather snapshot data in a cache dict"},
2777 2778 {"deltachain", (PyCFunction)index_deltachain, METH_VARARGS,
2778 2779 "determine revisions with deltas to reconstruct fulltext"},
2779 2780 {"slicechunktodensity", (PyCFunction)index_slicechunktodensity,
2780 2781 METH_VARARGS, "determine revisions with deltas to reconstruct fulltext"},
2781 2782 {"append", (PyCFunction)index_append, METH_O, "append an index entry"},
2782 2783 {"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS,
2783 2784 "match a potentially ambiguous node ID"},
2784 2785 {"shortest", (PyCFunction)index_shortest, METH_VARARGS,
2785 2786 "find length of shortest hex nodeid of a binary ID"},
2786 2787 {"stats", (PyCFunction)index_stats, METH_NOARGS, "stats for the index"},
2787 2788 {NULL} /* Sentinel */
2788 2789 };
2789 2790
2790 2791 static PyGetSetDef index_getset[] = {
2791 2792 {"nodemap", (getter)index_nodemap, NULL, "nodemap", NULL},
2792 2793 {NULL} /* Sentinel */
2793 2794 };
2794 2795
2795 2796 PyTypeObject HgRevlogIndex_Type = {
2796 2797 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2797 2798 "parsers.index", /* tp_name */
2798 2799 sizeof(indexObject), /* tp_basicsize */
2799 2800 0, /* tp_itemsize */
2800 2801 (destructor)index_dealloc, /* tp_dealloc */
2801 2802 0, /* tp_print */
2802 2803 0, /* tp_getattr */
2803 2804 0, /* tp_setattr */
2804 2805 0, /* tp_compare */
2805 2806 0, /* tp_repr */
2806 2807 0, /* tp_as_number */
2807 2808 &index_sequence_methods, /* tp_as_sequence */
2808 2809 &index_mapping_methods, /* tp_as_mapping */
2809 2810 0, /* tp_hash */
2810 2811 0, /* tp_call */
2811 2812 0, /* tp_str */
2812 2813 0, /* tp_getattro */
2813 2814 0, /* tp_setattro */
2814 2815 0, /* tp_as_buffer */
2815 2816 Py_TPFLAGS_DEFAULT, /* tp_flags */
2816 2817 "revlog index", /* tp_doc */
2817 2818 0, /* tp_traverse */
2818 2819 0, /* tp_clear */
2819 2820 0, /* tp_richcompare */
2820 2821 0, /* tp_weaklistoffset */
2821 2822 0, /* tp_iter */
2822 2823 0, /* tp_iternext */
2823 2824 index_methods, /* tp_methods */
2824 2825 0, /* tp_members */
2825 2826 index_getset, /* tp_getset */
2826 2827 0, /* tp_base */
2827 2828 0, /* tp_dict */
2828 2829 0, /* tp_descr_get */
2829 2830 0, /* tp_descr_set */
2830 2831 0, /* tp_dictoffset */
2831 2832 (initproc)index_init, /* tp_init */
2832 2833 0, /* tp_alloc */
2833 2834 };
2834 2835
2835 2836 /*
2836 2837 * returns a tuple of the form (index, index, cache) with elements as
2837 2838 * follows:
2838 2839 *
2839 2840 * index: an index object that lazily parses RevlogNG records
2840 2841 * cache: if data is inlined, a tuple (0, index_file_content), else None
2841 2842 * index_file_content could be a string, or a buffer
2842 2843 *
2843 2844 * added complications are for backwards compatibility
2844 2845 */
2845 2846 PyObject *parse_index2(PyObject *self, PyObject *args)
2846 2847 {
2847 2848 PyObject *tuple = NULL, *cache = NULL;
2848 2849 indexObject *idx;
2849 2850 int ret;
2850 2851
2851 2852 idx = PyObject_New(indexObject, &HgRevlogIndex_Type);
2852 2853 if (idx == NULL)
2853 2854 goto bail;
2854 2855
2855 2856 ret = index_init(idx, args);
2856 2857 if (ret == -1)
2857 2858 goto bail;
2858 2859
2859 2860 if (idx->inlined) {
2860 2861 cache = Py_BuildValue("iO", 0, idx->data);
2861 2862 if (cache == NULL)
2862 2863 goto bail;
2863 2864 } else {
2864 2865 cache = Py_None;
2865 2866 Py_INCREF(cache);
2866 2867 }
2867 2868
2868 2869 tuple = Py_BuildValue("NN", idx, cache);
2869 2870 if (!tuple)
2870 2871 goto bail;
2871 2872 return tuple;
2872 2873
2873 2874 bail:
2874 2875 Py_XDECREF(idx);
2875 2876 Py_XDECREF(cache);
2876 2877 Py_XDECREF(tuple);
2877 2878 return NULL;
2878 2879 }
2879 2880
2880 2881 #ifdef WITH_RUST
2881 2882
2882 2883 /* rustlazyancestors: iteration over ancestors implemented in Rust
2883 2884 *
2884 2885 * This class holds a reference to an index and to the Rust iterator.
2885 2886 */
2886 2887 typedef struct rustlazyancestorsObjectStruct rustlazyancestorsObject;
2887 2888
2888 2889 struct rustlazyancestorsObjectStruct {
2889 2890 PyObject_HEAD
2890 2891 /* Type-specific fields go here. */
2891 2892 indexObject *index; /* Ref kept to avoid GC'ing the index */
2892 2893 void *iter; /* Rust iterator */
2893 2894 };
2894 2895
2895 2896 /* FFI exposed from Rust code */
2896 2897 rustlazyancestorsObject *rustlazyancestors_init(indexObject *index,
2897 2898 /* intrevs vector */
2898 2899 Py_ssize_t initrevslen,
2899 2900 long *initrevs, long stoprev,
2900 2901 int inclusive);
2901 2902 void rustlazyancestors_drop(rustlazyancestorsObject *self);
2902 2903 int rustlazyancestors_next(rustlazyancestorsObject *self);
2903 2904 int rustlazyancestors_contains(rustlazyancestorsObject *self, long rev);
2904 2905
2905 2906 /* CPython instance methods */
2906 2907 static int rustla_init(rustlazyancestorsObject *self, PyObject *args)
2907 2908 {
2908 2909 PyObject *initrevsarg = NULL;
2909 2910 PyObject *inclusivearg = NULL;
2910 2911 long stoprev = 0;
2911 2912 long *initrevs = NULL;
2912 2913 int inclusive = 0;
2913 2914 Py_ssize_t i;
2914 2915
2915 2916 indexObject *index;
2916 2917 if (!PyArg_ParseTuple(args, "O!O!lO!", &HgRevlogIndex_Type, &index,
2917 2918 &PyList_Type, &initrevsarg, &stoprev,
2918 2919 &PyBool_Type, &inclusivearg))
2919 2920 return -1;
2920 2921
2921 2922 Py_INCREF(index);
2922 2923 self->index = index;
2923 2924
2924 2925 if (inclusivearg == Py_True)
2925 2926 inclusive = 1;
2926 2927
2927 2928 Py_ssize_t linit = PyList_GET_SIZE(initrevsarg);
2928 2929
2929 2930 initrevs = (long *)calloc(linit, sizeof(long));
2930 2931
2931 2932 if (initrevs == NULL) {
2932 2933 PyErr_NoMemory();
2933 2934 goto bail;
2934 2935 }
2935 2936
2936 2937 for (i = 0; i < linit; i++) {
2937 2938 initrevs[i] = PyInt_AsLong(PyList_GET_ITEM(initrevsarg, i));
2938 2939 }
2939 2940 if (PyErr_Occurred())
2940 2941 goto bail;
2941 2942
2942 2943 self->iter =
2943 2944 rustlazyancestors_init(index, linit, initrevs, stoprev, inclusive);
2944 2945 if (self->iter == NULL) {
2945 2946 /* if this is because of GraphError::ParentOutOfRange
2946 2947 * HgRevlogIndex_GetParents() has already set the proper
2947 2948 * exception */
2948 2949 goto bail;
2949 2950 }
2950 2951
2951 2952 free(initrevs);
2952 2953 return 0;
2953 2954
2954 2955 bail:
2955 2956 free(initrevs);
2956 2957 return -1;
2957 2958 };
2958 2959
2959 2960 static void rustla_dealloc(rustlazyancestorsObject *self)
2960 2961 {
2961 2962 Py_XDECREF(self->index);
2962 2963 if (self->iter != NULL) { /* can happen if rustla_init failed */
2963 2964 rustlazyancestors_drop(self->iter);
2964 2965 }
2965 2966 PyObject_Del(self);
2966 2967 }
2967 2968
2968 2969 static PyObject *rustla_next(rustlazyancestorsObject *self)
2969 2970 {
2970 2971 int res = rustlazyancestors_next(self->iter);
2971 2972 if (res == -1) {
2972 2973 /* Setting an explicit exception seems unnecessary
2973 2974 * as examples from Python source code (Objects/rangeobjets.c
2974 2975 * and Modules/_io/stringio.c) seem to demonstrate.
2975 2976 */
2976 2977 return NULL;
2977 2978 }
2978 2979 return PyInt_FromLong(res);
2979 2980 }
2980 2981
2981 2982 static int rustla_contains(rustlazyancestorsObject *self, PyObject *rev)
2982 2983 {
2983 2984 long lrev;
2984 2985 if (!pylong_to_long(rev, &lrev)) {
2985 2986 PyErr_Clear();
2986 2987 return 0;
2987 2988 }
2988 2989 return rustlazyancestors_contains(self->iter, lrev);
2989 2990 }
2990 2991
2991 2992 static PySequenceMethods rustla_sequence_methods = {
2992 2993 0, /* sq_length */
2993 2994 0, /* sq_concat */
2994 2995 0, /* sq_repeat */
2995 2996 0, /* sq_item */
2996 2997 0, /* sq_slice */
2997 2998 0, /* sq_ass_item */
2998 2999 0, /* sq_ass_slice */
2999 3000 (objobjproc)rustla_contains, /* sq_contains */
3000 3001 };
3001 3002
3002 3003 static PyTypeObject rustlazyancestorsType = {
3003 3004 PyVarObject_HEAD_INIT(NULL, 0) /* header */
3004 3005 "parsers.rustlazyancestors", /* tp_name */
3005 3006 sizeof(rustlazyancestorsObject), /* tp_basicsize */
3006 3007 0, /* tp_itemsize */
3007 3008 (destructor)rustla_dealloc, /* tp_dealloc */
3008 3009 0, /* tp_print */
3009 3010 0, /* tp_getattr */
3010 3011 0, /* tp_setattr */
3011 3012 0, /* tp_compare */
3012 3013 0, /* tp_repr */
3013 3014 0, /* tp_as_number */
3014 3015 &rustla_sequence_methods, /* tp_as_sequence */
3015 3016 0, /* tp_as_mapping */
3016 3017 0, /* tp_hash */
3017 3018 0, /* tp_call */
3018 3019 0, /* tp_str */
3019 3020 0, /* tp_getattro */
3020 3021 0, /* tp_setattro */
3021 3022 0, /* tp_as_buffer */
3022 3023 Py_TPFLAGS_DEFAULT, /* tp_flags */
3023 3024 "Iterator over ancestors, implemented in Rust", /* tp_doc */
3024 3025 0, /* tp_traverse */
3025 3026 0, /* tp_clear */
3026 3027 0, /* tp_richcompare */
3027 3028 0, /* tp_weaklistoffset */
3028 3029 0, /* tp_iter */
3029 3030 (iternextfunc)rustla_next, /* tp_iternext */
3030 3031 0, /* tp_methods */
3031 3032 0, /* tp_members */
3032 3033 0, /* tp_getset */
3033 3034 0, /* tp_base */
3034 3035 0, /* tp_dict */
3035 3036 0, /* tp_descr_get */
3036 3037 0, /* tp_descr_set */
3037 3038 0, /* tp_dictoffset */
3038 3039 (initproc)rustla_init, /* tp_init */
3039 3040 0, /* tp_alloc */
3040 3041 };
3041 3042 #endif /* WITH_RUST */
3042 3043
3043 3044 static Revlog_CAPI CAPI = {
3044 3045 /* increment the abi_version field upon each change in the Revlog_CAPI
3045 3046 struct or in the ABI of the listed functions */
3046 3047 1,
3047 3048 HgRevlogIndex_GetParents,
3048 3049 };
3049 3050
3050 3051 void revlog_module_init(PyObject *mod)
3051 3052 {
3052 3053 PyObject *caps = NULL;
3053 3054 HgRevlogIndex_Type.tp_new = PyType_GenericNew;
3054 3055 if (PyType_Ready(&HgRevlogIndex_Type) < 0)
3055 3056 return;
3056 3057 Py_INCREF(&HgRevlogIndex_Type);
3057 3058 PyModule_AddObject(mod, "index", (PyObject *)&HgRevlogIndex_Type);
3058 3059
3059 3060 nodetreeType.tp_new = PyType_GenericNew;
3060 3061 if (PyType_Ready(&nodetreeType) < 0)
3061 3062 return;
3062 3063 Py_INCREF(&nodetreeType);
3063 3064 PyModule_AddObject(mod, "nodetree", (PyObject *)&nodetreeType);
3064 3065
3065 3066 if (!nullentry) {
3066 3067 nullentry =
3067 3068 Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0, 0, -1,
3068 3069 -1, -1, -1, nullid, (Py_ssize_t)20);
3069 3070 }
3070 3071 if (nullentry)
3071 3072 PyObject_GC_UnTrack(nullentry);
3072 3073
3073 3074 caps = PyCapsule_New(&CAPI, "mercurial.cext.parsers.revlog_CAPI", NULL);
3074 3075 if (caps != NULL)
3075 3076 PyModule_AddObject(mod, "revlog_CAPI", caps);
3076 3077
3077 3078 #ifdef WITH_RUST
3078 3079 rustlazyancestorsType.tp_new = PyType_GenericNew;
3079 3080 if (PyType_Ready(&rustlazyancestorsType) < 0)
3080 3081 return;
3081 3082 Py_INCREF(&rustlazyancestorsType);
3082 3083 PyModule_AddObject(mod, "rustlazyancestors",
3083 3084 (PyObject *)&rustlazyancestorsType);
3084 3085 #endif
3085 3086 }
General Comments 0
You need to be logged in to leave comments. Login now