##// END OF EJS Templates
cext: make revlog.c PY_SSIZE_T_CLEAN...
Gregory Szorc -
r42234:b01bbb8f default
parent child Browse files
Show More
@@ -1,3039 +1,3040 b''
1 1 /*
2 2 parsers.c - efficient content parsing
3 3
4 4 Copyright 2008 Matt Mackall <mpm@selenic.com> and others
5 5
6 6 This software may be used and distributed according to the terms of
7 7 the GNU General Public License, incorporated herein by reference.
8 8 */
9 9
10 #define PY_SSIZE_T_CLEAN
10 11 #include <Python.h>
11 12 #include <assert.h>
12 13 #include <ctype.h>
13 14 #include <limits.h>
14 15 #include <stddef.h>
15 16 #include <stdlib.h>
16 17 #include <string.h>
17 18
18 19 #include "bitmanipulation.h"
19 20 #include "charencode.h"
20 21 #include "revlog.h"
21 22 #include "util.h"
22 23
23 24 #ifdef IS_PY3K
24 25 /* The mapping of Python types is meant to be temporary to get Python
25 26 * 3 to compile. We should remove this once Python 3 support is fully
26 27 * supported and proper types are used in the extensions themselves. */
27 28 #define PyInt_Check PyLong_Check
28 29 #define PyInt_FromLong PyLong_FromLong
29 30 #define PyInt_FromSsize_t PyLong_FromSsize_t
30 31 #define PyInt_AsLong PyLong_AsLong
31 32 #endif
32 33
33 34 typedef struct indexObjectStruct indexObject;
34 35
35 36 typedef struct {
36 37 int children[16];
37 38 } nodetreenode;
38 39
39 40 /*
40 41 * A base-16 trie for fast node->rev mapping.
41 42 *
42 43 * Positive value is index of the next node in the trie
43 44 * Negative value is a leaf: -(rev + 2)
44 45 * Zero is empty
45 46 */
46 47 typedef struct {
47 48 indexObject *index;
48 49 nodetreenode *nodes;
49 50 unsigned length; /* # nodes in use */
50 51 unsigned capacity; /* # nodes allocated */
51 52 int depth; /* maximum depth of tree */
52 53 int splits; /* # splits performed */
53 54 } nodetree;
54 55
55 56 typedef struct {
56 57 PyObject_HEAD /* ; */
57 58 nodetree nt;
58 59 } nodetreeObject;
59 60
60 61 /*
61 62 * This class has two behaviors.
62 63 *
63 64 * When used in a list-like way (with integer keys), we decode an
64 65 * entry in a RevlogNG index file on demand. Our last entry is a
65 66 * sentinel, always a nullid. We have limited support for
66 67 * integer-keyed insert and delete, only at elements right before the
67 68 * sentinel.
68 69 *
69 70 * With string keys, we lazily perform a reverse mapping from node to
70 71 * rev, using a base-16 trie.
71 72 */
72 73 struct indexObjectStruct {
73 74 PyObject_HEAD
74 75 /* Type-specific fields go here. */
75 76 PyObject *data; /* raw bytes of index */
76 77 Py_buffer buf; /* buffer of data */
77 78 PyObject **cache; /* cached tuples */
78 79 const char **offsets; /* populated on demand */
79 80 Py_ssize_t raw_length; /* original number of elements */
80 81 Py_ssize_t length; /* current number of elements */
81 82 PyObject *added; /* populated on demand */
82 83 PyObject *headrevs; /* cache, invalidated on changes */
83 84 PyObject *filteredrevs; /* filtered revs set */
84 85 nodetree nt; /* base-16 trie */
85 86 int ntinitialized; /* 0 or 1 */
86 87 int ntrev; /* last rev scanned */
87 88 int ntlookups; /* # lookups */
88 89 int ntmisses; /* # lookups that miss the cache */
89 90 int inlined;
90 91 };
91 92
92 93 static Py_ssize_t index_length(const indexObject *self)
93 94 {
94 95 if (self->added == NULL)
95 96 return self->length;
96 97 return self->length + PyList_GET_SIZE(self->added);
97 98 }
98 99
99 100 static PyObject *nullentry = NULL;
100 101 static const char nullid[20] = {0};
101 102 static const Py_ssize_t nullrev = -1;
102 103
103 104 static Py_ssize_t inline_scan(indexObject *self, const char **offsets);
104 105
105 106 #if LONG_MAX == 0x7fffffffL
106 107 static const char *const tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
107 108 #else
108 109 static const char *const tuple_format = PY23("kiiiiiis#", "kiiiiiiy#");
109 110 #endif
110 111
111 112 /* A RevlogNG v1 index entry is 64 bytes long. */
112 113 static const long v1_hdrsize = 64;
113 114
114 115 static void raise_revlog_error(void)
115 116 {
116 117 PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
117 118
118 119 mod = PyImport_ImportModule("mercurial.error");
119 120 if (mod == NULL) {
120 121 goto cleanup;
121 122 }
122 123
123 124 dict = PyModule_GetDict(mod);
124 125 if (dict == NULL) {
125 126 goto cleanup;
126 127 }
127 128 Py_INCREF(dict);
128 129
129 130 errclass = PyDict_GetItemString(dict, "RevlogError");
130 131 if (errclass == NULL) {
131 132 PyErr_SetString(PyExc_SystemError,
132 133 "could not find RevlogError");
133 134 goto cleanup;
134 135 }
135 136
136 137 /* value of exception is ignored by callers */
137 138 PyErr_SetString(errclass, "RevlogError");
138 139
139 140 cleanup:
140 141 Py_XDECREF(dict);
141 142 Py_XDECREF(mod);
142 143 }
143 144
144 145 /*
145 146 * Return a pointer to the beginning of a RevlogNG record.
146 147 */
147 148 static const char *index_deref(indexObject *self, Py_ssize_t pos)
148 149 {
149 150 if (self->inlined && pos > 0) {
150 151 if (self->offsets == NULL) {
151 152 self->offsets = PyMem_Malloc(self->raw_length *
152 153 sizeof(*self->offsets));
153 154 if (self->offsets == NULL)
154 155 return (const char *)PyErr_NoMemory();
155 156 inline_scan(self, self->offsets);
156 157 }
157 158 return self->offsets[pos];
158 159 }
159 160
160 161 return (const char *)(self->buf.buf) + pos * v1_hdrsize;
161 162 }
162 163
163 164 /*
164 165 * Get parents of the given rev.
165 166 *
166 167 * The specified rev must be valid and must not be nullrev. A returned
167 168 * parent revision may be nullrev, but is guaranteed to be in valid range.
168 169 */
169 170 static inline int index_get_parents(indexObject *self, Py_ssize_t rev, int *ps,
170 171 int maxrev)
171 172 {
172 173 if (rev >= self->length) {
173 174 long tmp;
174 175 PyObject *tuple =
175 176 PyList_GET_ITEM(self->added, rev - self->length);
176 177 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 5), &tmp)) {
177 178 return -1;
178 179 }
179 180 ps[0] = (int)tmp;
180 181 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 6), &tmp)) {
181 182 return -1;
182 183 }
183 184 ps[1] = (int)tmp;
184 185 } else {
185 186 const char *data = index_deref(self, rev);
186 187 ps[0] = getbe32(data + 24);
187 188 ps[1] = getbe32(data + 28);
188 189 }
189 190 /* If index file is corrupted, ps[] may point to invalid revisions. So
190 191 * there is a risk of buffer overflow to trust them unconditionally. */
191 192 if (ps[0] < -1 || ps[0] > maxrev || ps[1] < -1 || ps[1] > maxrev) {
192 193 PyErr_SetString(PyExc_ValueError, "parent out of range");
193 194 return -1;
194 195 }
195 196 return 0;
196 197 }
197 198
198 199 /*
199 200 * Get parents of the given rev.
200 201 *
201 202 * If the specified rev is out of range, IndexError will be raised. If the
202 203 * revlog entry is corrupted, ValueError may be raised.
203 204 *
204 205 * Returns 0 on success or -1 on failure.
205 206 */
206 207 int HgRevlogIndex_GetParents(PyObject *op, int rev, int *ps)
207 208 {
208 209 int tiprev;
209 210 if (!op || !HgRevlogIndex_Check(op) || !ps) {
210 211 PyErr_BadInternalCall();
211 212 return -1;
212 213 }
213 214 tiprev = (int)index_length((indexObject *)op) - 1;
214 215 if (rev < -1 || rev > tiprev) {
215 216 PyErr_Format(PyExc_IndexError, "rev out of range: %d", rev);
216 217 return -1;
217 218 } else if (rev == -1) {
218 219 ps[0] = ps[1] = -1;
219 220 return 0;
220 221 } else {
221 222 return index_get_parents((indexObject *)op, rev, ps, tiprev);
222 223 }
223 224 }
224 225
225 226 static inline int64_t index_get_start(indexObject *self, Py_ssize_t rev)
226 227 {
227 228 uint64_t offset;
228 229 if (rev == nullrev) {
229 230 return 0;
230 231 }
231 232 if (rev >= self->length) {
232 233 PyObject *tuple;
233 234 PyObject *pylong;
234 235 PY_LONG_LONG tmp;
235 236 tuple = PyList_GET_ITEM(self->added, rev - self->length);
236 237 pylong = PyTuple_GET_ITEM(tuple, 0);
237 238 tmp = PyLong_AsLongLong(pylong);
238 239 if (tmp == -1 && PyErr_Occurred()) {
239 240 return -1;
240 241 }
241 242 if (tmp < 0) {
242 243 PyErr_Format(PyExc_OverflowError,
243 244 "revlog entry size out of bound (%lld)",
244 245 (long long)tmp);
245 246 return -1;
246 247 }
247 248 offset = (uint64_t)tmp;
248 249 } else {
249 250 const char *data = index_deref(self, rev);
250 251 offset = getbe32(data + 4);
251 252 if (rev == 0) {
252 253 /* mask out version number for the first entry */
253 254 offset &= 0xFFFF;
254 255 } else {
255 256 uint32_t offset_high = getbe32(data);
256 257 offset |= ((uint64_t)offset_high) << 32;
257 258 }
258 259 }
259 260 return (int64_t)(offset >> 16);
260 261 }
261 262
262 263 static inline int index_get_length(indexObject *self, Py_ssize_t rev)
263 264 {
264 265 if (rev == nullrev) {
265 266 return 0;
266 267 }
267 268 if (rev >= self->length) {
268 269 PyObject *tuple;
269 270 PyObject *pylong;
270 271 long ret;
271 272 tuple = PyList_GET_ITEM(self->added, rev - self->length);
272 273 pylong = PyTuple_GET_ITEM(tuple, 1);
273 274 ret = PyInt_AsLong(pylong);
274 275 if (ret == -1 && PyErr_Occurred()) {
275 276 return -1;
276 277 }
277 278 if (ret < 0 || ret > (long)INT_MAX) {
278 279 PyErr_Format(PyExc_OverflowError,
279 280 "revlog entry size out of bound (%ld)",
280 281 ret);
281 282 return -1;
282 283 }
283 284 return (int)ret;
284 285 } else {
285 286 const char *data = index_deref(self, rev);
286 287 int tmp = (int)getbe32(data + 8);
287 288 if (tmp < 0) {
288 289 PyErr_Format(PyExc_OverflowError,
289 290 "revlog entry size out of bound (%d)",
290 291 tmp);
291 292 return -1;
292 293 }
293 294 return tmp;
294 295 }
295 296 }
296 297
297 298 /*
298 299 * RevlogNG format (all in big endian, data may be inlined):
299 300 * 6 bytes: offset
300 301 * 2 bytes: flags
301 302 * 4 bytes: compressed length
302 303 * 4 bytes: uncompressed length
303 304 * 4 bytes: base revision
304 305 * 4 bytes: link revision
305 306 * 4 bytes: parent 1 revision
306 307 * 4 bytes: parent 2 revision
307 308 * 32 bytes: nodeid (only 20 bytes used)
308 309 */
309 310 static PyObject *index_get(indexObject *self, Py_ssize_t pos)
310 311 {
311 312 uint64_t offset_flags;
312 313 int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
313 314 const char *c_node_id;
314 315 const char *data;
315 316 Py_ssize_t length = index_length(self);
316 317 PyObject *entry;
317 318
318 319 if (pos == nullrev) {
319 320 Py_INCREF(nullentry);
320 321 return nullentry;
321 322 }
322 323
323 324 if (pos < 0 || pos >= length) {
324 325 PyErr_SetString(PyExc_IndexError, "revlog index out of range");
325 326 return NULL;
326 327 }
327 328
328 329 if (pos >= self->length) {
329 330 PyObject *obj;
330 331 obj = PyList_GET_ITEM(self->added, pos - self->length);
331 332 Py_INCREF(obj);
332 333 return obj;
333 334 }
334 335
335 336 if (self->cache) {
336 337 if (self->cache[pos]) {
337 338 Py_INCREF(self->cache[pos]);
338 339 return self->cache[pos];
339 340 }
340 341 } else {
341 342 self->cache = calloc(self->raw_length, sizeof(PyObject *));
342 343 if (self->cache == NULL)
343 344 return PyErr_NoMemory();
344 345 }
345 346
346 347 data = index_deref(self, pos);
347 348 if (data == NULL)
348 349 return NULL;
349 350
350 351 offset_flags = getbe32(data + 4);
351 352 if (pos == 0) /* mask out version number for the first entry */
352 353 offset_flags &= 0xFFFF;
353 354 else {
354 355 uint32_t offset_high = getbe32(data);
355 356 offset_flags |= ((uint64_t)offset_high) << 32;
356 357 }
357 358
358 359 comp_len = getbe32(data + 8);
359 360 uncomp_len = getbe32(data + 12);
360 361 base_rev = getbe32(data + 16);
361 362 link_rev = getbe32(data + 20);
362 363 parent_1 = getbe32(data + 24);
363 364 parent_2 = getbe32(data + 28);
364 365 c_node_id = data + 32;
365 366
366 367 entry = Py_BuildValue(tuple_format, offset_flags, comp_len, uncomp_len,
367 368 base_rev, link_rev, parent_1, parent_2, c_node_id,
368 369 20);
369 370
370 371 if (entry) {
371 372 PyObject_GC_UnTrack(entry);
372 373 Py_INCREF(entry);
373 374 }
374 375
375 376 self->cache[pos] = entry;
376 377
377 378 return entry;
378 379 }
379 380
380 381 /*
381 382 * Return the 20-byte SHA of the node corresponding to the given rev.
382 383 */
383 384 static const char *index_node(indexObject *self, Py_ssize_t pos)
384 385 {
385 386 Py_ssize_t length = index_length(self);
386 387 const char *data;
387 388
388 389 if (pos == nullrev)
389 390 return nullid;
390 391
391 392 if (pos >= length)
392 393 return NULL;
393 394
394 395 if (pos >= self->length) {
395 396 PyObject *tuple, *str;
396 397 tuple = PyList_GET_ITEM(self->added, pos - self->length);
397 398 str = PyTuple_GetItem(tuple, 7);
398 399 return str ? PyBytes_AS_STRING(str) : NULL;
399 400 }
400 401
401 402 data = index_deref(self, pos);
402 403 return data ? data + 32 : NULL;
403 404 }
404 405
405 406 /*
406 407 * Return the 20-byte SHA of the node corresponding to the given rev. The
407 408 * rev is assumed to be existing. If not, an exception is set.
408 409 */
409 410 static const char *index_node_existing(indexObject *self, Py_ssize_t pos)
410 411 {
411 412 const char *node = index_node(self, pos);
412 413 if (node == NULL) {
413 414 PyErr_Format(PyExc_IndexError, "could not access rev %d",
414 415 (int)pos);
415 416 }
416 417 return node;
417 418 }
418 419
419 420 static int nt_insert(nodetree *self, const char *node, int rev);
420 421
421 422 static int node_check(PyObject *obj, char **node)
422 423 {
423 424 Py_ssize_t nodelen;
424 425 if (PyBytes_AsStringAndSize(obj, node, &nodelen) == -1)
425 426 return -1;
426 427 if (nodelen == 20)
427 428 return 0;
428 429 PyErr_SetString(PyExc_ValueError, "20-byte hash required");
429 430 return -1;
430 431 }
431 432
432 433 static PyObject *index_append(indexObject *self, PyObject *obj)
433 434 {
434 435 char *node;
435 436 Py_ssize_t len;
436 437
437 438 if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj) != 8) {
438 439 PyErr_SetString(PyExc_TypeError, "8-tuple required");
439 440 return NULL;
440 441 }
441 442
442 443 if (node_check(PyTuple_GET_ITEM(obj, 7), &node) == -1)
443 444 return NULL;
444 445
445 446 len = index_length(self);
446 447
447 448 if (self->added == NULL) {
448 449 self->added = PyList_New(0);
449 450 if (self->added == NULL)
450 451 return NULL;
451 452 }
452 453
453 454 if (PyList_Append(self->added, obj) == -1)
454 455 return NULL;
455 456
456 457 if (self->ntinitialized)
457 458 nt_insert(&self->nt, node, (int)len);
458 459
459 460 Py_CLEAR(self->headrevs);
460 461 Py_RETURN_NONE;
461 462 }
462 463
463 464 static PyObject *index_stats(indexObject *self)
464 465 {
465 466 PyObject *obj = PyDict_New();
466 467 PyObject *s = NULL;
467 468 PyObject *t = NULL;
468 469
469 470 if (obj == NULL)
470 471 return NULL;
471 472
472 473 #define istat(__n, __d) \
473 474 do { \
474 475 s = PyBytes_FromString(__d); \
475 476 t = PyInt_FromSsize_t(self->__n); \
476 477 if (!s || !t) \
477 478 goto bail; \
478 479 if (PyDict_SetItem(obj, s, t) == -1) \
479 480 goto bail; \
480 481 Py_CLEAR(s); \
481 482 Py_CLEAR(t); \
482 483 } while (0)
483 484
484 485 if (self->added) {
485 486 Py_ssize_t len = PyList_GET_SIZE(self->added);
486 487 s = PyBytes_FromString("index entries added");
487 488 t = PyInt_FromSsize_t(len);
488 489 if (!s || !t)
489 490 goto bail;
490 491 if (PyDict_SetItem(obj, s, t) == -1)
491 492 goto bail;
492 493 Py_CLEAR(s);
493 494 Py_CLEAR(t);
494 495 }
495 496
496 497 if (self->raw_length != self->length)
497 498 istat(raw_length, "revs on disk");
498 499 istat(length, "revs in memory");
499 500 istat(ntlookups, "node trie lookups");
500 501 istat(ntmisses, "node trie misses");
501 502 istat(ntrev, "node trie last rev scanned");
502 503 if (self->ntinitialized) {
503 504 istat(nt.capacity, "node trie capacity");
504 505 istat(nt.depth, "node trie depth");
505 506 istat(nt.length, "node trie count");
506 507 istat(nt.splits, "node trie splits");
507 508 }
508 509
509 510 #undef istat
510 511
511 512 return obj;
512 513
513 514 bail:
514 515 Py_XDECREF(obj);
515 516 Py_XDECREF(s);
516 517 Py_XDECREF(t);
517 518 return NULL;
518 519 }
519 520
520 521 /*
521 522 * When we cache a list, we want to be sure the caller can't mutate
522 523 * the cached copy.
523 524 */
524 525 static PyObject *list_copy(PyObject *list)
525 526 {
526 527 Py_ssize_t len = PyList_GET_SIZE(list);
527 528 PyObject *newlist = PyList_New(len);
528 529 Py_ssize_t i;
529 530
530 531 if (newlist == NULL)
531 532 return NULL;
532 533
533 534 for (i = 0; i < len; i++) {
534 535 PyObject *obj = PyList_GET_ITEM(list, i);
535 536 Py_INCREF(obj);
536 537 PyList_SET_ITEM(newlist, i, obj);
537 538 }
538 539
539 540 return newlist;
540 541 }
541 542
542 543 static int check_filter(PyObject *filter, Py_ssize_t arg)
543 544 {
544 545 if (filter) {
545 546 PyObject *arglist, *result;
546 547 int isfiltered;
547 548
548 549 arglist = Py_BuildValue("(n)", arg);
549 550 if (!arglist) {
550 551 return -1;
551 552 }
552 553
553 554 result = PyEval_CallObject(filter, arglist);
554 555 Py_DECREF(arglist);
555 556 if (!result) {
556 557 return -1;
557 558 }
558 559
559 560 /* PyObject_IsTrue returns 1 if true, 0 if false, -1 if error,
560 561 * same as this function, so we can just return it directly.*/
561 562 isfiltered = PyObject_IsTrue(result);
562 563 Py_DECREF(result);
563 564 return isfiltered;
564 565 } else {
565 566 return 0;
566 567 }
567 568 }
568 569
569 570 static Py_ssize_t add_roots_get_min(indexObject *self, PyObject *list,
570 571 Py_ssize_t marker, char *phases)
571 572 {
572 573 PyObject *iter = NULL;
573 574 PyObject *iter_item = NULL;
574 575 Py_ssize_t min_idx = index_length(self) + 2;
575 576 long iter_item_long;
576 577
577 578 if (PyList_GET_SIZE(list) != 0) {
578 579 iter = PyObject_GetIter(list);
579 580 if (iter == NULL)
580 581 return -2;
581 582 while ((iter_item = PyIter_Next(iter))) {
582 583 if (!pylong_to_long(iter_item, &iter_item_long)) {
583 584 Py_DECREF(iter_item);
584 585 return -2;
585 586 }
586 587 Py_DECREF(iter_item);
587 588 if (iter_item_long < min_idx)
588 589 min_idx = iter_item_long;
589 590 phases[iter_item_long] = (char)marker;
590 591 }
591 592 Py_DECREF(iter);
592 593 }
593 594
594 595 return min_idx;
595 596 }
596 597
597 598 static inline void set_phase_from_parents(char *phases, int parent_1,
598 599 int parent_2, Py_ssize_t i)
599 600 {
600 601 if (parent_1 >= 0 && phases[parent_1] > phases[i])
601 602 phases[i] = phases[parent_1];
602 603 if (parent_2 >= 0 && phases[parent_2] > phases[i])
603 604 phases[i] = phases[parent_2];
604 605 }
605 606
606 607 static PyObject *reachableroots2(indexObject *self, PyObject *args)
607 608 {
608 609
609 610 /* Input */
610 611 long minroot;
611 612 PyObject *includepatharg = NULL;
612 613 int includepath = 0;
613 614 /* heads and roots are lists */
614 615 PyObject *heads = NULL;
615 616 PyObject *roots = NULL;
616 617 PyObject *reachable = NULL;
617 618
618 619 PyObject *val;
619 620 Py_ssize_t len = index_length(self);
620 621 long revnum;
621 622 Py_ssize_t k;
622 623 Py_ssize_t i;
623 624 Py_ssize_t l;
624 625 int r;
625 626 int parents[2];
626 627
627 628 /* Internal data structure:
628 629 * tovisit: array of length len+1 (all revs + nullrev), filled upto
629 630 * lentovisit
630 631 *
631 632 * revstates: array of length len+1 (all revs + nullrev) */
632 633 int *tovisit = NULL;
633 634 long lentovisit = 0;
634 635 enum { RS_SEEN = 1, RS_ROOT = 2, RS_REACHABLE = 4 };
635 636 char *revstates = NULL;
636 637
637 638 /* Get arguments */
638 639 if (!PyArg_ParseTuple(args, "lO!O!O!", &minroot, &PyList_Type, &heads,
639 640 &PyList_Type, &roots, &PyBool_Type,
640 641 &includepatharg))
641 642 goto bail;
642 643
643 644 if (includepatharg == Py_True)
644 645 includepath = 1;
645 646
646 647 /* Initialize return set */
647 648 reachable = PyList_New(0);
648 649 if (reachable == NULL)
649 650 goto bail;
650 651
651 652 /* Initialize internal datastructures */
652 653 tovisit = (int *)malloc((len + 1) * sizeof(int));
653 654 if (tovisit == NULL) {
654 655 PyErr_NoMemory();
655 656 goto bail;
656 657 }
657 658
658 659 revstates = (char *)calloc(len + 1, 1);
659 660 if (revstates == NULL) {
660 661 PyErr_NoMemory();
661 662 goto bail;
662 663 }
663 664
664 665 l = PyList_GET_SIZE(roots);
665 666 for (i = 0; i < l; i++) {
666 667 revnum = PyInt_AsLong(PyList_GET_ITEM(roots, i));
667 668 if (revnum == -1 && PyErr_Occurred())
668 669 goto bail;
669 670 /* If root is out of range, e.g. wdir(), it must be unreachable
670 671 * from heads. So we can just ignore it. */
671 672 if (revnum + 1 < 0 || revnum + 1 >= len + 1)
672 673 continue;
673 674 revstates[revnum + 1] |= RS_ROOT;
674 675 }
675 676
676 677 /* Populate tovisit with all the heads */
677 678 l = PyList_GET_SIZE(heads);
678 679 for (i = 0; i < l; i++) {
679 680 revnum = PyInt_AsLong(PyList_GET_ITEM(heads, i));
680 681 if (revnum == -1 && PyErr_Occurred())
681 682 goto bail;
682 683 if (revnum + 1 < 0 || revnum + 1 >= len + 1) {
683 684 PyErr_SetString(PyExc_IndexError, "head out of range");
684 685 goto bail;
685 686 }
686 687 if (!(revstates[revnum + 1] & RS_SEEN)) {
687 688 tovisit[lentovisit++] = (int)revnum;
688 689 revstates[revnum + 1] |= RS_SEEN;
689 690 }
690 691 }
691 692
692 693 /* Visit the tovisit list and find the reachable roots */
693 694 k = 0;
694 695 while (k < lentovisit) {
695 696 /* Add the node to reachable if it is a root*/
696 697 revnum = tovisit[k++];
697 698 if (revstates[revnum + 1] & RS_ROOT) {
698 699 revstates[revnum + 1] |= RS_REACHABLE;
699 700 val = PyInt_FromLong(revnum);
700 701 if (val == NULL)
701 702 goto bail;
702 703 r = PyList_Append(reachable, val);
703 704 Py_DECREF(val);
704 705 if (r < 0)
705 706 goto bail;
706 707 if (includepath == 0)
707 708 continue;
708 709 }
709 710
710 711 /* Add its parents to the list of nodes to visit */
711 712 if (revnum == nullrev)
712 713 continue;
713 714 r = index_get_parents(self, revnum, parents, (int)len - 1);
714 715 if (r < 0)
715 716 goto bail;
716 717 for (i = 0; i < 2; i++) {
717 718 if (!(revstates[parents[i] + 1] & RS_SEEN) &&
718 719 parents[i] >= minroot) {
719 720 tovisit[lentovisit++] = parents[i];
720 721 revstates[parents[i] + 1] |= RS_SEEN;
721 722 }
722 723 }
723 724 }
724 725
725 726 /* Find all the nodes in between the roots we found and the heads
726 727 * and add them to the reachable set */
727 728 if (includepath == 1) {
728 729 long minidx = minroot;
729 730 if (minidx < 0)
730 731 minidx = 0;
731 732 for (i = minidx; i < len; i++) {
732 733 if (!(revstates[i + 1] & RS_SEEN))
733 734 continue;
734 735 r = index_get_parents(self, i, parents, (int)len - 1);
735 736 /* Corrupted index file, error is set from
736 737 * index_get_parents */
737 738 if (r < 0)
738 739 goto bail;
739 740 if (((revstates[parents[0] + 1] |
740 741 revstates[parents[1] + 1]) &
741 742 RS_REACHABLE) &&
742 743 !(revstates[i + 1] & RS_REACHABLE)) {
743 744 revstates[i + 1] |= RS_REACHABLE;
744 745 val = PyInt_FromSsize_t(i);
745 746 if (val == NULL)
746 747 goto bail;
747 748 r = PyList_Append(reachable, val);
748 749 Py_DECREF(val);
749 750 if (r < 0)
750 751 goto bail;
751 752 }
752 753 }
753 754 }
754 755
755 756 free(revstates);
756 757 free(tovisit);
757 758 return reachable;
758 759 bail:
759 760 Py_XDECREF(reachable);
760 761 free(revstates);
761 762 free(tovisit);
762 763 return NULL;
763 764 }
764 765
765 766 static PyObject *compute_phases_map_sets(indexObject *self, PyObject *args)
766 767 {
767 768 PyObject *roots = Py_None;
768 769 PyObject *ret = NULL;
769 770 PyObject *phasessize = NULL;
770 771 PyObject *phaseroots = NULL;
771 772 PyObject *phaseset = NULL;
772 773 PyObject *phasessetlist = NULL;
773 774 PyObject *rev = NULL;
774 775 Py_ssize_t len = index_length(self);
775 776 Py_ssize_t numphase = 0;
776 777 Py_ssize_t minrevallphases = 0;
777 778 Py_ssize_t minrevphase = 0;
778 779 Py_ssize_t i = 0;
779 780 char *phases = NULL;
780 781 long phase;
781 782
782 783 if (!PyArg_ParseTuple(args, "O", &roots))
783 784 goto done;
784 785 if (roots == NULL || !PyList_Check(roots)) {
785 786 PyErr_SetString(PyExc_TypeError, "roots must be a list");
786 787 goto done;
787 788 }
788 789
789 790 phases = calloc(
790 791 len, 1); /* phase per rev: {0: public, 1: draft, 2: secret} */
791 792 if (phases == NULL) {
792 793 PyErr_NoMemory();
793 794 goto done;
794 795 }
795 796 /* Put the phase information of all the roots in phases */
796 797 numphase = PyList_GET_SIZE(roots) + 1;
797 798 minrevallphases = len + 1;
798 799 phasessetlist = PyList_New(numphase);
799 800 if (phasessetlist == NULL)
800 801 goto done;
801 802
802 803 PyList_SET_ITEM(phasessetlist, 0, Py_None);
803 804 Py_INCREF(Py_None);
804 805
805 806 for (i = 0; i < numphase - 1; i++) {
806 807 phaseroots = PyList_GET_ITEM(roots, i);
807 808 phaseset = PySet_New(NULL);
808 809 if (phaseset == NULL)
809 810 goto release;
810 811 PyList_SET_ITEM(phasessetlist, i + 1, phaseset);
811 812 if (!PyList_Check(phaseroots)) {
812 813 PyErr_SetString(PyExc_TypeError,
813 814 "roots item must be a list");
814 815 goto release;
815 816 }
816 817 minrevphase =
817 818 add_roots_get_min(self, phaseroots, i + 1, phases);
818 819 if (minrevphase == -2) /* Error from add_roots_get_min */
819 820 goto release;
820 821 minrevallphases = MIN(minrevallphases, minrevphase);
821 822 }
822 823 /* Propagate the phase information from the roots to the revs */
823 824 if (minrevallphases != -1) {
824 825 int parents[2];
825 826 for (i = minrevallphases; i < len; i++) {
826 827 if (index_get_parents(self, i, parents, (int)len - 1) <
827 828 0)
828 829 goto release;
829 830 set_phase_from_parents(phases, parents[0], parents[1],
830 831 i);
831 832 }
832 833 }
833 834 /* Transform phase list to a python list */
834 835 phasessize = PyInt_FromSsize_t(len);
835 836 if (phasessize == NULL)
836 837 goto release;
837 838 for (i = 0; i < len; i++) {
838 839 phase = phases[i];
839 840 /* We only store the sets of phase for non public phase, the
840 841 * public phase is computed as a difference */
841 842 if (phase != 0) {
842 843 phaseset = PyList_GET_ITEM(phasessetlist, phase);
843 844 rev = PyInt_FromSsize_t(i);
844 845 if (rev == NULL)
845 846 goto release;
846 847 PySet_Add(phaseset, rev);
847 848 Py_XDECREF(rev);
848 849 }
849 850 }
850 851 ret = PyTuple_Pack(2, phasessize, phasessetlist);
851 852
852 853 release:
853 854 Py_XDECREF(phasessize);
854 855 Py_XDECREF(phasessetlist);
855 856 done:
856 857 free(phases);
857 858 return ret;
858 859 }
859 860
860 861 static PyObject *index_headrevs(indexObject *self, PyObject *args)
861 862 {
862 863 Py_ssize_t i, j, len;
863 864 char *nothead = NULL;
864 865 PyObject *heads = NULL;
865 866 PyObject *filter = NULL;
866 867 PyObject *filteredrevs = Py_None;
867 868
868 869 if (!PyArg_ParseTuple(args, "|O", &filteredrevs)) {
869 870 return NULL;
870 871 }
871 872
872 873 if (self->headrevs && filteredrevs == self->filteredrevs)
873 874 return list_copy(self->headrevs);
874 875
875 876 Py_DECREF(self->filteredrevs);
876 877 self->filteredrevs = filteredrevs;
877 878 Py_INCREF(filteredrevs);
878 879
879 880 if (filteredrevs != Py_None) {
880 881 filter = PyObject_GetAttrString(filteredrevs, "__contains__");
881 882 if (!filter) {
882 883 PyErr_SetString(
883 884 PyExc_TypeError,
884 885 "filteredrevs has no attribute __contains__");
885 886 goto bail;
886 887 }
887 888 }
888 889
889 890 len = index_length(self);
890 891 heads = PyList_New(0);
891 892 if (heads == NULL)
892 893 goto bail;
893 894 if (len == 0) {
894 895 PyObject *nullid = PyInt_FromLong(-1);
895 896 if (nullid == NULL || PyList_Append(heads, nullid) == -1) {
896 897 Py_XDECREF(nullid);
897 898 goto bail;
898 899 }
899 900 goto done;
900 901 }
901 902
902 903 nothead = calloc(len, 1);
903 904 if (nothead == NULL) {
904 905 PyErr_NoMemory();
905 906 goto bail;
906 907 }
907 908
908 909 for (i = len - 1; i >= 0; i--) {
909 910 int isfiltered;
910 911 int parents[2];
911 912
912 913 /* If nothead[i] == 1, it means we've seen an unfiltered child
913 914 * of this node already, and therefore this node is not
914 915 * filtered. So we can skip the expensive check_filter step.
915 916 */
916 917 if (nothead[i] != 1) {
917 918 isfiltered = check_filter(filter, i);
918 919 if (isfiltered == -1) {
919 920 PyErr_SetString(PyExc_TypeError,
920 921 "unable to check filter");
921 922 goto bail;
922 923 }
923 924
924 925 if (isfiltered) {
925 926 nothead[i] = 1;
926 927 continue;
927 928 }
928 929 }
929 930
930 931 if (index_get_parents(self, i, parents, (int)len - 1) < 0)
931 932 goto bail;
932 933 for (j = 0; j < 2; j++) {
933 934 if (parents[j] >= 0)
934 935 nothead[parents[j]] = 1;
935 936 }
936 937 }
937 938
938 939 for (i = 0; i < len; i++) {
939 940 PyObject *head;
940 941
941 942 if (nothead[i])
942 943 continue;
943 944 head = PyInt_FromSsize_t(i);
944 945 if (head == NULL || PyList_Append(heads, head) == -1) {
945 946 Py_XDECREF(head);
946 947 goto bail;
947 948 }
948 949 }
949 950
950 951 done:
951 952 self->headrevs = heads;
952 953 Py_XDECREF(filter);
953 954 free(nothead);
954 955 return list_copy(self->headrevs);
955 956 bail:
956 957 Py_XDECREF(filter);
957 958 Py_XDECREF(heads);
958 959 free(nothead);
959 960 return NULL;
960 961 }
961 962
962 963 /**
963 964 * Obtain the base revision index entry.
964 965 *
965 966 * Callers must ensure that rev >= 0 or illegal memory access may occur.
966 967 */
967 968 static inline int index_baserev(indexObject *self, int rev)
968 969 {
969 970 const char *data;
970 971 int result;
971 972
972 973 if (rev >= self->length) {
973 974 PyObject *tuple =
974 975 PyList_GET_ITEM(self->added, rev - self->length);
975 976 long ret;
976 977 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 3), &ret)) {
977 978 return -2;
978 979 }
979 980 result = (int)ret;
980 981 } else {
981 982 data = index_deref(self, rev);
982 983 if (data == NULL) {
983 984 return -2;
984 985 }
985 986
986 987 result = getbe32(data + 16);
987 988 }
988 989 if (result > rev) {
989 990 PyErr_Format(
990 991 PyExc_ValueError,
991 992 "corrupted revlog, revision base above revision: %d, %d",
992 993 rev, result);
993 994 return -2;
994 995 }
995 996 if (result < -1) {
996 997 PyErr_Format(
997 998 PyExc_ValueError,
998 999 "corrupted revlog, revision base out of range: %d, %d", rev,
999 1000 result);
1000 1001 return -2;
1001 1002 }
1002 1003 return result;
1003 1004 }
1004 1005
1005 1006 /**
1006 1007 * Find if a revision is a snapshot or not
1007 1008 *
1008 1009 * Only relevant for sparse-revlog case.
1009 1010 * Callers must ensure that rev is in a valid range.
1010 1011 */
1011 1012 static int index_issnapshotrev(indexObject *self, Py_ssize_t rev)
1012 1013 {
1013 1014 int ps[2];
1014 1015 Py_ssize_t base;
1015 1016 while (rev >= 0) {
1016 1017 base = (Py_ssize_t)index_baserev(self, rev);
1017 1018 if (base == rev) {
1018 1019 base = -1;
1019 1020 }
1020 1021 if (base == -2) {
1021 1022 assert(PyErr_Occurred());
1022 1023 return -1;
1023 1024 }
1024 1025 if (base == -1) {
1025 1026 return 1;
1026 1027 }
1027 1028 if (index_get_parents(self, rev, ps, (int)rev) < 0) {
1028 1029 assert(PyErr_Occurred());
1029 1030 return -1;
1030 1031 };
1031 1032 if (base == ps[0] || base == ps[1]) {
1032 1033 return 0;
1033 1034 }
1034 1035 rev = base;
1035 1036 }
1036 1037 return rev == -1;
1037 1038 }
1038 1039
1039 1040 static PyObject *index_issnapshot(indexObject *self, PyObject *value)
1040 1041 {
1041 1042 long rev;
1042 1043 int issnap;
1043 1044 Py_ssize_t length = index_length(self);
1044 1045
1045 1046 if (!pylong_to_long(value, &rev)) {
1046 1047 return NULL;
1047 1048 }
1048 1049 if (rev < -1 || rev >= length) {
1049 1050 PyErr_Format(PyExc_ValueError, "revlog index out of range: %ld",
1050 1051 rev);
1051 1052 return NULL;
1052 1053 };
1053 1054 issnap = index_issnapshotrev(self, (Py_ssize_t)rev);
1054 1055 if (issnap < 0) {
1055 1056 return NULL;
1056 1057 };
1057 1058 return PyBool_FromLong((long)issnap);
1058 1059 }
1059 1060
1060 1061 static PyObject *index_findsnapshots(indexObject *self, PyObject *args)
1061 1062 {
1062 1063 Py_ssize_t start_rev;
1063 1064 PyObject *cache;
1064 1065 Py_ssize_t base;
1065 1066 Py_ssize_t rev;
1066 1067 PyObject *key = NULL;
1067 1068 PyObject *value = NULL;
1068 1069 const Py_ssize_t length = index_length(self);
1069 1070 if (!PyArg_ParseTuple(args, "O!n", &PyDict_Type, &cache, &start_rev)) {
1070 1071 return NULL;
1071 1072 }
1072 1073 for (rev = start_rev; rev < length; rev++) {
1073 1074 int issnap;
1074 1075 PyObject *allvalues = NULL;
1075 1076 issnap = index_issnapshotrev(self, rev);
1076 1077 if (issnap < 0) {
1077 1078 goto bail;
1078 1079 }
1079 1080 if (issnap == 0) {
1080 1081 continue;
1081 1082 }
1082 1083 base = (Py_ssize_t)index_baserev(self, rev);
1083 1084 if (base == rev) {
1084 1085 base = -1;
1085 1086 }
1086 1087 if (base == -2) {
1087 1088 assert(PyErr_Occurred());
1088 1089 goto bail;
1089 1090 }
1090 1091 key = PyInt_FromSsize_t(base);
1091 1092 allvalues = PyDict_GetItem(cache, key);
1092 1093 if (allvalues == NULL && PyErr_Occurred()) {
1093 1094 goto bail;
1094 1095 }
1095 1096 if (allvalues == NULL) {
1096 1097 int r;
1097 1098 allvalues = PyList_New(0);
1098 1099 if (!allvalues) {
1099 1100 goto bail;
1100 1101 }
1101 1102 r = PyDict_SetItem(cache, key, allvalues);
1102 1103 Py_DECREF(allvalues);
1103 1104 if (r < 0) {
1104 1105 goto bail;
1105 1106 }
1106 1107 }
1107 1108 value = PyInt_FromSsize_t(rev);
1108 1109 if (PyList_Append(allvalues, value)) {
1109 1110 goto bail;
1110 1111 }
1111 1112 Py_CLEAR(key);
1112 1113 Py_CLEAR(value);
1113 1114 }
1114 1115 Py_RETURN_NONE;
1115 1116 bail:
1116 1117 Py_XDECREF(key);
1117 1118 Py_XDECREF(value);
1118 1119 return NULL;
1119 1120 }
1120 1121
1121 1122 static PyObject *index_deltachain(indexObject *self, PyObject *args)
1122 1123 {
1123 1124 int rev, generaldelta;
1124 1125 PyObject *stoparg;
1125 1126 int stoprev, iterrev, baserev = -1;
1126 1127 int stopped;
1127 1128 PyObject *chain = NULL, *result = NULL;
1128 1129 const Py_ssize_t length = index_length(self);
1129 1130
1130 1131 if (!PyArg_ParseTuple(args, "iOi", &rev, &stoparg, &generaldelta)) {
1131 1132 return NULL;
1132 1133 }
1133 1134
1134 1135 if (PyInt_Check(stoparg)) {
1135 1136 stoprev = (int)PyInt_AsLong(stoparg);
1136 1137 if (stoprev == -1 && PyErr_Occurred()) {
1137 1138 return NULL;
1138 1139 }
1139 1140 } else if (stoparg == Py_None) {
1140 1141 stoprev = -2;
1141 1142 } else {
1142 1143 PyErr_SetString(PyExc_ValueError,
1143 1144 "stoprev must be integer or None");
1144 1145 return NULL;
1145 1146 }
1146 1147
1147 1148 if (rev < 0 || rev >= length) {
1148 1149 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
1149 1150 return NULL;
1150 1151 }
1151 1152
1152 1153 chain = PyList_New(0);
1153 1154 if (chain == NULL) {
1154 1155 return NULL;
1155 1156 }
1156 1157
1157 1158 baserev = index_baserev(self, rev);
1158 1159
1159 1160 /* This should never happen. */
1160 1161 if (baserev <= -2) {
1161 1162 /* Error should be set by index_deref() */
1162 1163 assert(PyErr_Occurred());
1163 1164 goto bail;
1164 1165 }
1165 1166
1166 1167 iterrev = rev;
1167 1168
1168 1169 while (iterrev != baserev && iterrev != stoprev) {
1169 1170 PyObject *value = PyInt_FromLong(iterrev);
1170 1171 if (value == NULL) {
1171 1172 goto bail;
1172 1173 }
1173 1174 if (PyList_Append(chain, value)) {
1174 1175 Py_DECREF(value);
1175 1176 goto bail;
1176 1177 }
1177 1178 Py_DECREF(value);
1178 1179
1179 1180 if (generaldelta) {
1180 1181 iterrev = baserev;
1181 1182 } else {
1182 1183 iterrev--;
1183 1184 }
1184 1185
1185 1186 if (iterrev < 0) {
1186 1187 break;
1187 1188 }
1188 1189
1189 1190 if (iterrev >= length) {
1190 1191 PyErr_SetString(PyExc_IndexError,
1191 1192 "revision outside index");
1192 1193 return NULL;
1193 1194 }
1194 1195
1195 1196 baserev = index_baserev(self, iterrev);
1196 1197
1197 1198 /* This should never happen. */
1198 1199 if (baserev <= -2) {
1199 1200 /* Error should be set by index_deref() */
1200 1201 assert(PyErr_Occurred());
1201 1202 goto bail;
1202 1203 }
1203 1204 }
1204 1205
1205 1206 if (iterrev == stoprev) {
1206 1207 stopped = 1;
1207 1208 } else {
1208 1209 PyObject *value = PyInt_FromLong(iterrev);
1209 1210 if (value == NULL) {
1210 1211 goto bail;
1211 1212 }
1212 1213 if (PyList_Append(chain, value)) {
1213 1214 Py_DECREF(value);
1214 1215 goto bail;
1215 1216 }
1216 1217 Py_DECREF(value);
1217 1218
1218 1219 stopped = 0;
1219 1220 }
1220 1221
1221 1222 if (PyList_Reverse(chain)) {
1222 1223 goto bail;
1223 1224 }
1224 1225
1225 1226 result = Py_BuildValue("OO", chain, stopped ? Py_True : Py_False);
1226 1227 Py_DECREF(chain);
1227 1228 return result;
1228 1229
1229 1230 bail:
1230 1231 Py_DECREF(chain);
1231 1232 return NULL;
1232 1233 }
1233 1234
1234 1235 static inline int64_t
1235 1236 index_segment_span(indexObject *self, Py_ssize_t start_rev, Py_ssize_t end_rev)
1236 1237 {
1237 1238 int64_t start_offset;
1238 1239 int64_t end_offset;
1239 1240 int end_size;
1240 1241 start_offset = index_get_start(self, start_rev);
1241 1242 if (start_offset < 0) {
1242 1243 return -1;
1243 1244 }
1244 1245 end_offset = index_get_start(self, end_rev);
1245 1246 if (end_offset < 0) {
1246 1247 return -1;
1247 1248 }
1248 1249 end_size = index_get_length(self, end_rev);
1249 1250 if (end_size < 0) {
1250 1251 return -1;
1251 1252 }
1252 1253 if (end_offset < start_offset) {
1253 1254 PyErr_Format(PyExc_ValueError,
1254 1255 "corrupted revlog index: inconsistent offset "
1255 1256 "between revisions (%zd) and (%zd)",
1256 1257 start_rev, end_rev);
1257 1258 return -1;
1258 1259 }
1259 1260 return (end_offset - start_offset) + (int64_t)end_size;
1260 1261 }
1261 1262
1262 1263 /* returns endidx so that revs[startidx:endidx] has no empty trailing revs */
1263 1264 static Py_ssize_t trim_endidx(indexObject *self, const Py_ssize_t *revs,
1264 1265 Py_ssize_t startidx, Py_ssize_t endidx)
1265 1266 {
1266 1267 int length;
1267 1268 while (endidx > 1 && endidx > startidx) {
1268 1269 length = index_get_length(self, revs[endidx - 1]);
1269 1270 if (length < 0) {
1270 1271 return -1;
1271 1272 }
1272 1273 if (length != 0) {
1273 1274 break;
1274 1275 }
1275 1276 endidx -= 1;
1276 1277 }
1277 1278 return endidx;
1278 1279 }
1279 1280
1280 1281 struct Gap {
1281 1282 int64_t size;
1282 1283 Py_ssize_t idx;
1283 1284 };
1284 1285
1285 1286 static int gap_compare(const void *left, const void *right)
1286 1287 {
1287 1288 const struct Gap *l_left = ((const struct Gap *)left);
1288 1289 const struct Gap *l_right = ((const struct Gap *)right);
1289 1290 if (l_left->size < l_right->size) {
1290 1291 return -1;
1291 1292 } else if (l_left->size > l_right->size) {
1292 1293 return 1;
1293 1294 }
1294 1295 return 0;
1295 1296 }
1296 1297 static int Py_ssize_t_compare(const void *left, const void *right)
1297 1298 {
1298 1299 const Py_ssize_t l_left = *(const Py_ssize_t *)left;
1299 1300 const Py_ssize_t l_right = *(const Py_ssize_t *)right;
1300 1301 if (l_left < l_right) {
1301 1302 return -1;
1302 1303 } else if (l_left > l_right) {
1303 1304 return 1;
1304 1305 }
1305 1306 return 0;
1306 1307 }
1307 1308
1308 1309 static PyObject *index_slicechunktodensity(indexObject *self, PyObject *args)
1309 1310 {
1310 1311 /* method arguments */
1311 1312 PyObject *list_revs = NULL; /* revisions in the chain */
1312 1313 double targetdensity = 0; /* min density to achieve */
1313 1314 Py_ssize_t mingapsize = 0; /* threshold to ignore gaps */
1314 1315
1315 1316 /* other core variables */
1316 1317 Py_ssize_t idxlen = index_length(self);
1317 1318 Py_ssize_t i; /* used for various iteration */
1318 1319 PyObject *result = NULL; /* the final return of the function */
1319 1320
1320 1321 /* generic information about the delta chain being slice */
1321 1322 Py_ssize_t num_revs = 0; /* size of the full delta chain */
1322 1323 Py_ssize_t *revs = NULL; /* native array of revision in the chain */
1323 1324 int64_t chainpayload = 0; /* sum of all delta in the chain */
1324 1325 int64_t deltachainspan = 0; /* distance from first byte to last byte */
1325 1326
1326 1327 /* variable used for slicing the delta chain */
1327 1328 int64_t readdata = 0; /* amount of data currently planned to be read */
1328 1329 double density = 0; /* ration of payload data compared to read ones */
1329 1330 int64_t previous_end;
1330 1331 struct Gap *gaps = NULL; /* array of notable gap in the chain */
1331 1332 Py_ssize_t num_gaps =
1332 1333 0; /* total number of notable gap recorded so far */
1333 1334 Py_ssize_t *selected_indices = NULL; /* indices of gap skipped over */
1334 1335 Py_ssize_t num_selected = 0; /* number of gaps skipped */
1335 1336 PyObject *chunk = NULL; /* individual slice */
1336 1337 PyObject *allchunks = NULL; /* all slices */
1337 1338 Py_ssize_t previdx;
1338 1339
1339 1340 /* parsing argument */
1340 1341 if (!PyArg_ParseTuple(args, "O!dn", &PyList_Type, &list_revs,
1341 1342 &targetdensity, &mingapsize)) {
1342 1343 goto bail;
1343 1344 }
1344 1345
1345 1346 /* If the delta chain contains a single element, we do not need slicing
1346 1347 */
1347 1348 num_revs = PyList_GET_SIZE(list_revs);
1348 1349 if (num_revs <= 1) {
1349 1350 result = PyTuple_Pack(1, list_revs);
1350 1351 goto done;
1351 1352 }
1352 1353
1353 1354 /* Turn the python list into a native integer array (for efficiency) */
1354 1355 revs = (Py_ssize_t *)calloc(num_revs, sizeof(Py_ssize_t));
1355 1356 if (revs == NULL) {
1356 1357 PyErr_NoMemory();
1357 1358 goto bail;
1358 1359 }
1359 1360 for (i = 0; i < num_revs; i++) {
1360 1361 Py_ssize_t revnum = PyInt_AsLong(PyList_GET_ITEM(list_revs, i));
1361 1362 if (revnum == -1 && PyErr_Occurred()) {
1362 1363 goto bail;
1363 1364 }
1364 1365 if (revnum < nullrev || revnum >= idxlen) {
1365 1366 PyErr_Format(PyExc_IndexError,
1366 1367 "index out of range: %zd", revnum);
1367 1368 goto bail;
1368 1369 }
1369 1370 revs[i] = revnum;
1370 1371 }
1371 1372
1372 1373 /* Compute and check various property of the unsliced delta chain */
1373 1374 deltachainspan = index_segment_span(self, revs[0], revs[num_revs - 1]);
1374 1375 if (deltachainspan < 0) {
1375 1376 goto bail;
1376 1377 }
1377 1378
1378 1379 if (deltachainspan <= mingapsize) {
1379 1380 result = PyTuple_Pack(1, list_revs);
1380 1381 goto done;
1381 1382 }
1382 1383 chainpayload = 0;
1383 1384 for (i = 0; i < num_revs; i++) {
1384 1385 int tmp = index_get_length(self, revs[i]);
1385 1386 if (tmp < 0) {
1386 1387 goto bail;
1387 1388 }
1388 1389 chainpayload += tmp;
1389 1390 }
1390 1391
1391 1392 readdata = deltachainspan;
1392 1393 density = 1.0;
1393 1394
1394 1395 if (0 < deltachainspan) {
1395 1396 density = (double)chainpayload / (double)deltachainspan;
1396 1397 }
1397 1398
1398 1399 if (density >= targetdensity) {
1399 1400 result = PyTuple_Pack(1, list_revs);
1400 1401 goto done;
1401 1402 }
1402 1403
1403 1404 /* if chain is too sparse, look for relevant gaps */
1404 1405 gaps = (struct Gap *)calloc(num_revs, sizeof(struct Gap));
1405 1406 if (gaps == NULL) {
1406 1407 PyErr_NoMemory();
1407 1408 goto bail;
1408 1409 }
1409 1410
1410 1411 previous_end = -1;
1411 1412 for (i = 0; i < num_revs; i++) {
1412 1413 int64_t revstart;
1413 1414 int revsize;
1414 1415 revstart = index_get_start(self, revs[i]);
1415 1416 if (revstart < 0) {
1416 1417 goto bail;
1417 1418 };
1418 1419 revsize = index_get_length(self, revs[i]);
1419 1420 if (revsize < 0) {
1420 1421 goto bail;
1421 1422 };
1422 1423 if (revsize == 0) {
1423 1424 continue;
1424 1425 }
1425 1426 if (previous_end >= 0) {
1426 1427 int64_t gapsize = revstart - previous_end;
1427 1428 if (gapsize > mingapsize) {
1428 1429 gaps[num_gaps].size = gapsize;
1429 1430 gaps[num_gaps].idx = i;
1430 1431 num_gaps += 1;
1431 1432 }
1432 1433 }
1433 1434 previous_end = revstart + revsize;
1434 1435 }
1435 1436 if (num_gaps == 0) {
1436 1437 result = PyTuple_Pack(1, list_revs);
1437 1438 goto done;
1438 1439 }
1439 1440 qsort(gaps, num_gaps, sizeof(struct Gap), &gap_compare);
1440 1441
1441 1442 /* Slice the largest gap first, they improve the density the most */
1442 1443 selected_indices =
1443 1444 (Py_ssize_t *)malloc((num_gaps + 1) * sizeof(Py_ssize_t));
1444 1445 if (selected_indices == NULL) {
1445 1446 PyErr_NoMemory();
1446 1447 goto bail;
1447 1448 }
1448 1449
1449 1450 for (i = num_gaps - 1; i >= 0; i--) {
1450 1451 selected_indices[num_selected] = gaps[i].idx;
1451 1452 readdata -= gaps[i].size;
1452 1453 num_selected += 1;
1453 1454 if (readdata <= 0) {
1454 1455 density = 1.0;
1455 1456 } else {
1456 1457 density = (double)chainpayload / (double)readdata;
1457 1458 }
1458 1459 if (density >= targetdensity) {
1459 1460 break;
1460 1461 }
1461 1462 }
1462 1463 qsort(selected_indices, num_selected, sizeof(Py_ssize_t),
1463 1464 &Py_ssize_t_compare);
1464 1465
1465 1466 /* create the resulting slice */
1466 1467 allchunks = PyList_New(0);
1467 1468 if (allchunks == NULL) {
1468 1469 goto bail;
1469 1470 }
1470 1471 previdx = 0;
1471 1472 selected_indices[num_selected] = num_revs;
1472 1473 for (i = 0; i <= num_selected; i++) {
1473 1474 Py_ssize_t idx = selected_indices[i];
1474 1475 Py_ssize_t endidx = trim_endidx(self, revs, previdx, idx);
1475 1476 if (endidx < 0) {
1476 1477 goto bail;
1477 1478 }
1478 1479 if (previdx < endidx) {
1479 1480 chunk = PyList_GetSlice(list_revs, previdx, endidx);
1480 1481 if (chunk == NULL) {
1481 1482 goto bail;
1482 1483 }
1483 1484 if (PyList_Append(allchunks, chunk) == -1) {
1484 1485 goto bail;
1485 1486 }
1486 1487 Py_DECREF(chunk);
1487 1488 chunk = NULL;
1488 1489 }
1489 1490 previdx = idx;
1490 1491 }
1491 1492 result = allchunks;
1492 1493 goto done;
1493 1494
1494 1495 bail:
1495 1496 Py_XDECREF(allchunks);
1496 1497 Py_XDECREF(chunk);
1497 1498 done:
1498 1499 free(revs);
1499 1500 free(gaps);
1500 1501 free(selected_indices);
1501 1502 return result;
1502 1503 }
1503 1504
1504 1505 static inline int nt_level(const char *node, Py_ssize_t level)
1505 1506 {
1506 1507 int v = node[level >> 1];
1507 1508 if (!(level & 1))
1508 1509 v >>= 4;
1509 1510 return v & 0xf;
1510 1511 }
1511 1512
1512 1513 /*
1513 1514 * Return values:
1514 1515 *
1515 1516 * -4: match is ambiguous (multiple candidates)
1516 1517 * -2: not found
1517 1518 * rest: valid rev
1518 1519 */
1519 1520 static int nt_find(nodetree *self, const char *node, Py_ssize_t nodelen,
1520 1521 int hex)
1521 1522 {
1522 1523 int (*getnybble)(const char *, Py_ssize_t) = hex ? hexdigit : nt_level;
1523 1524 int level, maxlevel, off;
1524 1525
1525 1526 if (nodelen == 20 && node[0] == '\0' && memcmp(node, nullid, 20) == 0)
1526 1527 return -1;
1527 1528
1528 1529 if (hex)
1529 1530 maxlevel = nodelen > 40 ? 40 : (int)nodelen;
1530 1531 else
1531 1532 maxlevel = nodelen > 20 ? 40 : ((int)nodelen * 2);
1532 1533
1533 1534 for (level = off = 0; level < maxlevel; level++) {
1534 1535 int k = getnybble(node, level);
1535 1536 nodetreenode *n = &self->nodes[off];
1536 1537 int v = n->children[k];
1537 1538
1538 1539 if (v < 0) {
1539 1540 const char *n;
1540 1541 Py_ssize_t i;
1541 1542
1542 1543 v = -(v + 2);
1543 1544 n = index_node(self->index, v);
1544 1545 if (n == NULL)
1545 1546 return -2;
1546 1547 for (i = level; i < maxlevel; i++)
1547 1548 if (getnybble(node, i) != nt_level(n, i))
1548 1549 return -2;
1549 1550 return v;
1550 1551 }
1551 1552 if (v == 0)
1552 1553 return -2;
1553 1554 off = v;
1554 1555 }
1555 1556 /* multiple matches against an ambiguous prefix */
1556 1557 return -4;
1557 1558 }
1558 1559
1559 1560 static int nt_new(nodetree *self)
1560 1561 {
1561 1562 if (self->length == self->capacity) {
1562 1563 unsigned newcapacity;
1563 1564 nodetreenode *newnodes;
1564 1565 newcapacity = self->capacity * 2;
1565 1566 if (newcapacity >= INT_MAX / sizeof(nodetreenode)) {
1566 1567 PyErr_SetString(PyExc_MemoryError,
1567 1568 "overflow in nt_new");
1568 1569 return -1;
1569 1570 }
1570 1571 newnodes =
1571 1572 realloc(self->nodes, newcapacity * sizeof(nodetreenode));
1572 1573 if (newnodes == NULL) {
1573 1574 PyErr_SetString(PyExc_MemoryError, "out of memory");
1574 1575 return -1;
1575 1576 }
1576 1577 self->capacity = newcapacity;
1577 1578 self->nodes = newnodes;
1578 1579 memset(&self->nodes[self->length], 0,
1579 1580 sizeof(nodetreenode) * (self->capacity - self->length));
1580 1581 }
1581 1582 return self->length++;
1582 1583 }
1583 1584
1584 1585 static int nt_insert(nodetree *self, const char *node, int rev)
1585 1586 {
1586 1587 int level = 0;
1587 1588 int off = 0;
1588 1589
1589 1590 while (level < 40) {
1590 1591 int k = nt_level(node, level);
1591 1592 nodetreenode *n;
1592 1593 int v;
1593 1594
1594 1595 n = &self->nodes[off];
1595 1596 v = n->children[k];
1596 1597
1597 1598 if (v == 0) {
1598 1599 n->children[k] = -rev - 2;
1599 1600 return 0;
1600 1601 }
1601 1602 if (v < 0) {
1602 1603 const char *oldnode =
1603 1604 index_node_existing(self->index, -(v + 2));
1604 1605 int noff;
1605 1606
1606 1607 if (oldnode == NULL)
1607 1608 return -1;
1608 1609 if (!memcmp(oldnode, node, 20)) {
1609 1610 n->children[k] = -rev - 2;
1610 1611 return 0;
1611 1612 }
1612 1613 noff = nt_new(self);
1613 1614 if (noff == -1)
1614 1615 return -1;
1615 1616 /* self->nodes may have been changed by realloc */
1616 1617 self->nodes[off].children[k] = noff;
1617 1618 off = noff;
1618 1619 n = &self->nodes[off];
1619 1620 n->children[nt_level(oldnode, ++level)] = v;
1620 1621 if (level > self->depth)
1621 1622 self->depth = level;
1622 1623 self->splits += 1;
1623 1624 } else {
1624 1625 level += 1;
1625 1626 off = v;
1626 1627 }
1627 1628 }
1628 1629
1629 1630 return -1;
1630 1631 }
1631 1632
1632 1633 static PyObject *ntobj_insert(nodetreeObject *self, PyObject *args)
1633 1634 {
1634 1635 Py_ssize_t rev;
1635 1636 const char *node;
1636 1637 Py_ssize_t length;
1637 1638 if (!PyArg_ParseTuple(args, "n", &rev))
1638 1639 return NULL;
1639 1640 length = index_length(self->nt.index);
1640 1641 if (rev < 0 || rev >= length) {
1641 1642 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
1642 1643 return NULL;
1643 1644 }
1644 1645 node = index_node_existing(self->nt.index, rev);
1645 1646 if (nt_insert(&self->nt, node, (int)rev) == -1)
1646 1647 return NULL;
1647 1648 Py_RETURN_NONE;
1648 1649 }
1649 1650
1650 1651 static int nt_delete_node(nodetree *self, const char *node)
1651 1652 {
1652 1653 /* rev==-2 happens to get encoded as 0, which is interpreted as not set
1653 1654 */
1654 1655 return nt_insert(self, node, -2);
1655 1656 }
1656 1657
1657 1658 static int nt_init(nodetree *self, indexObject *index, unsigned capacity)
1658 1659 {
1659 1660 /* Initialize before overflow-checking to avoid nt_dealloc() crash. */
1660 1661 self->nodes = NULL;
1661 1662
1662 1663 self->index = index;
1663 1664 /* The input capacity is in terms of revisions, while the field is in
1664 1665 * terms of nodetree nodes. */
1665 1666 self->capacity = (capacity < 4 ? 4 : capacity / 2);
1666 1667 self->depth = 0;
1667 1668 self->splits = 0;
1668 1669 if ((size_t)self->capacity > INT_MAX / sizeof(nodetreenode)) {
1669 1670 PyErr_SetString(PyExc_ValueError, "overflow in init_nt");
1670 1671 return -1;
1671 1672 }
1672 1673 self->nodes = calloc(self->capacity, sizeof(nodetreenode));
1673 1674 if (self->nodes == NULL) {
1674 1675 PyErr_NoMemory();
1675 1676 return -1;
1676 1677 }
1677 1678 self->length = 1;
1678 1679 return 0;
1679 1680 }
1680 1681
1681 1682 static int ntobj_init(nodetreeObject *self, PyObject *args)
1682 1683 {
1683 1684 PyObject *index;
1684 1685 unsigned capacity;
1685 1686 if (!PyArg_ParseTuple(args, "O!I", &HgRevlogIndex_Type, &index,
1686 1687 &capacity))
1687 1688 return -1;
1688 1689 Py_INCREF(index);
1689 1690 return nt_init(&self->nt, (indexObject *)index, capacity);
1690 1691 }
1691 1692
1692 1693 static int nt_partialmatch(nodetree *self, const char *node, Py_ssize_t nodelen)
1693 1694 {
1694 1695 return nt_find(self, node, nodelen, 1);
1695 1696 }
1696 1697
1697 1698 /*
1698 1699 * Find the length of the shortest unique prefix of node.
1699 1700 *
1700 1701 * Return values:
1701 1702 *
1702 1703 * -3: error (exception set)
1703 1704 * -2: not found (no exception set)
1704 1705 * rest: length of shortest prefix
1705 1706 */
1706 1707 static int nt_shortest(nodetree *self, const char *node)
1707 1708 {
1708 1709 int level, off;
1709 1710
1710 1711 for (level = off = 0; level < 40; level++) {
1711 1712 int k, v;
1712 1713 nodetreenode *n = &self->nodes[off];
1713 1714 k = nt_level(node, level);
1714 1715 v = n->children[k];
1715 1716 if (v < 0) {
1716 1717 const char *n;
1717 1718 v = -(v + 2);
1718 1719 n = index_node_existing(self->index, v);
1719 1720 if (n == NULL)
1720 1721 return -3;
1721 1722 if (memcmp(node, n, 20) != 0)
1722 1723 /*
1723 1724 * Found a unique prefix, but it wasn't for the
1724 1725 * requested node (i.e the requested node does
1725 1726 * not exist).
1726 1727 */
1727 1728 return -2;
1728 1729 return level + 1;
1729 1730 }
1730 1731 if (v == 0)
1731 1732 return -2;
1732 1733 off = v;
1733 1734 }
1734 1735 /*
1735 1736 * The node was still not unique after 40 hex digits, so this won't
1736 1737 * happen. Also, if we get here, then there's a programming error in
1737 1738 * this file that made us insert a node longer than 40 hex digits.
1738 1739 */
1739 1740 PyErr_SetString(PyExc_Exception, "broken node tree");
1740 1741 return -3;
1741 1742 }
1742 1743
1743 1744 static PyObject *ntobj_shortest(nodetreeObject *self, PyObject *args)
1744 1745 {
1745 1746 PyObject *val;
1746 1747 char *node;
1747 1748 int length;
1748 1749
1749 1750 if (!PyArg_ParseTuple(args, "O", &val))
1750 1751 return NULL;
1751 1752 if (node_check(val, &node) == -1)
1752 1753 return NULL;
1753 1754
1754 1755 length = nt_shortest(&self->nt, node);
1755 1756 if (length == -3)
1756 1757 return NULL;
1757 1758 if (length == -2) {
1758 1759 raise_revlog_error();
1759 1760 return NULL;
1760 1761 }
1761 1762 return PyInt_FromLong(length);
1762 1763 }
1763 1764
1764 1765 static void nt_dealloc(nodetree *self)
1765 1766 {
1766 1767 free(self->nodes);
1767 1768 self->nodes = NULL;
1768 1769 }
1769 1770
1770 1771 static void ntobj_dealloc(nodetreeObject *self)
1771 1772 {
1772 1773 Py_XDECREF(self->nt.index);
1773 1774 nt_dealloc(&self->nt);
1774 1775 PyObject_Del(self);
1775 1776 }
1776 1777
1777 1778 static PyMethodDef ntobj_methods[] = {
1778 1779 {"insert", (PyCFunction)ntobj_insert, METH_VARARGS,
1779 1780 "insert an index entry"},
1780 1781 {"shortest", (PyCFunction)ntobj_shortest, METH_VARARGS,
1781 1782 "find length of shortest hex nodeid of a binary ID"},
1782 1783 {NULL} /* Sentinel */
1783 1784 };
1784 1785
1785 1786 static PyTypeObject nodetreeType = {
1786 1787 PyVarObject_HEAD_INIT(NULL, 0) /* header */
1787 1788 "parsers.nodetree", /* tp_name */
1788 1789 sizeof(nodetreeObject), /* tp_basicsize */
1789 1790 0, /* tp_itemsize */
1790 1791 (destructor)ntobj_dealloc, /* tp_dealloc */
1791 1792 0, /* tp_print */
1792 1793 0, /* tp_getattr */
1793 1794 0, /* tp_setattr */
1794 1795 0, /* tp_compare */
1795 1796 0, /* tp_repr */
1796 1797 0, /* tp_as_number */
1797 1798 0, /* tp_as_sequence */
1798 1799 0, /* tp_as_mapping */
1799 1800 0, /* tp_hash */
1800 1801 0, /* tp_call */
1801 1802 0, /* tp_str */
1802 1803 0, /* tp_getattro */
1803 1804 0, /* tp_setattro */
1804 1805 0, /* tp_as_buffer */
1805 1806 Py_TPFLAGS_DEFAULT, /* tp_flags */
1806 1807 "nodetree", /* tp_doc */
1807 1808 0, /* tp_traverse */
1808 1809 0, /* tp_clear */
1809 1810 0, /* tp_richcompare */
1810 1811 0, /* tp_weaklistoffset */
1811 1812 0, /* tp_iter */
1812 1813 0, /* tp_iternext */
1813 1814 ntobj_methods, /* tp_methods */
1814 1815 0, /* tp_members */
1815 1816 0, /* tp_getset */
1816 1817 0, /* tp_base */
1817 1818 0, /* tp_dict */
1818 1819 0, /* tp_descr_get */
1819 1820 0, /* tp_descr_set */
1820 1821 0, /* tp_dictoffset */
1821 1822 (initproc)ntobj_init, /* tp_init */
1822 1823 0, /* tp_alloc */
1823 1824 };
1824 1825
1825 1826 static int index_init_nt(indexObject *self)
1826 1827 {
1827 1828 if (!self->ntinitialized) {
1828 1829 if (nt_init(&self->nt, self, (int)self->raw_length) == -1) {
1829 1830 nt_dealloc(&self->nt);
1830 1831 return -1;
1831 1832 }
1832 1833 if (nt_insert(&self->nt, nullid, -1) == -1) {
1833 1834 nt_dealloc(&self->nt);
1834 1835 return -1;
1835 1836 }
1836 1837 self->ntinitialized = 1;
1837 1838 self->ntrev = (int)index_length(self);
1838 1839 self->ntlookups = 1;
1839 1840 self->ntmisses = 0;
1840 1841 }
1841 1842 return 0;
1842 1843 }
1843 1844
1844 1845 /*
1845 1846 * Return values:
1846 1847 *
1847 1848 * -3: error (exception set)
1848 1849 * -2: not found (no exception set)
1849 1850 * rest: valid rev
1850 1851 */
1851 1852 static int index_find_node(indexObject *self, const char *node,
1852 1853 Py_ssize_t nodelen)
1853 1854 {
1854 1855 int rev;
1855 1856
1856 1857 if (index_init_nt(self) == -1)
1857 1858 return -3;
1858 1859
1859 1860 self->ntlookups++;
1860 1861 rev = nt_find(&self->nt, node, nodelen, 0);
1861 1862 if (rev >= -1)
1862 1863 return rev;
1863 1864
1864 1865 /*
1865 1866 * For the first handful of lookups, we scan the entire index,
1866 1867 * and cache only the matching nodes. This optimizes for cases
1867 1868 * like "hg tip", where only a few nodes are accessed.
1868 1869 *
1869 1870 * After that, we cache every node we visit, using a single
1870 1871 * scan amortized over multiple lookups. This gives the best
1871 1872 * bulk performance, e.g. for "hg log".
1872 1873 */
1873 1874 if (self->ntmisses++ < 4) {
1874 1875 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1875 1876 const char *n = index_node_existing(self, rev);
1876 1877 if (n == NULL)
1877 1878 return -3;
1878 1879 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1879 1880 if (nt_insert(&self->nt, n, rev) == -1)
1880 1881 return -3;
1881 1882 break;
1882 1883 }
1883 1884 }
1884 1885 } else {
1885 1886 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1886 1887 const char *n = index_node_existing(self, rev);
1887 1888 if (n == NULL)
1888 1889 return -3;
1889 1890 if (nt_insert(&self->nt, n, rev) == -1) {
1890 1891 self->ntrev = rev + 1;
1891 1892 return -3;
1892 1893 }
1893 1894 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1894 1895 break;
1895 1896 }
1896 1897 }
1897 1898 self->ntrev = rev;
1898 1899 }
1899 1900
1900 1901 if (rev >= 0)
1901 1902 return rev;
1902 1903 return -2;
1903 1904 }
1904 1905
1905 1906 static PyObject *index_getitem(indexObject *self, PyObject *value)
1906 1907 {
1907 1908 char *node;
1908 1909 int rev;
1909 1910
1910 1911 if (PyInt_Check(value)) {
1911 1912 long idx;
1912 1913 if (!pylong_to_long(value, &idx)) {
1913 1914 return NULL;
1914 1915 }
1915 1916 return index_get(self, idx);
1916 1917 }
1917 1918
1918 1919 if (node_check(value, &node) == -1)
1919 1920 return NULL;
1920 1921 rev = index_find_node(self, node, 20);
1921 1922 if (rev >= -1)
1922 1923 return PyInt_FromLong(rev);
1923 1924 if (rev == -2)
1924 1925 raise_revlog_error();
1925 1926 return NULL;
1926 1927 }
1927 1928
1928 1929 /*
1929 1930 * Fully populate the radix tree.
1930 1931 */
1931 1932 static int index_populate_nt(indexObject *self)
1932 1933 {
1933 1934 int rev;
1934 1935 if (self->ntrev > 0) {
1935 1936 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1936 1937 const char *n = index_node_existing(self, rev);
1937 1938 if (n == NULL)
1938 1939 return -1;
1939 1940 if (nt_insert(&self->nt, n, rev) == -1)
1940 1941 return -1;
1941 1942 }
1942 1943 self->ntrev = -1;
1943 1944 }
1944 1945 return 0;
1945 1946 }
1946 1947
1947 1948 static PyObject *index_partialmatch(indexObject *self, PyObject *args)
1948 1949 {
1949 1950 const char *fullnode;
1950 int nodelen;
1951 Py_ssize_t nodelen;
1951 1952 char *node;
1952 1953 int rev, i;
1953 1954
1954 1955 if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &node, &nodelen))
1955 1956 return NULL;
1956 1957
1957 1958 if (nodelen < 1) {
1958 1959 PyErr_SetString(PyExc_ValueError, "key too short");
1959 1960 return NULL;
1960 1961 }
1961 1962
1962 1963 if (nodelen > 40) {
1963 1964 PyErr_SetString(PyExc_ValueError, "key too long");
1964 1965 return NULL;
1965 1966 }
1966 1967
1967 1968 for (i = 0; i < nodelen; i++)
1968 1969 hexdigit(node, i);
1969 1970 if (PyErr_Occurred()) {
1970 1971 /* input contains non-hex characters */
1971 1972 PyErr_Clear();
1972 1973 Py_RETURN_NONE;
1973 1974 }
1974 1975
1975 1976 if (index_init_nt(self) == -1)
1976 1977 return NULL;
1977 1978 if (index_populate_nt(self) == -1)
1978 1979 return NULL;
1979 1980 rev = nt_partialmatch(&self->nt, node, nodelen);
1980 1981
1981 1982 switch (rev) {
1982 1983 case -4:
1983 1984 raise_revlog_error();
1984 1985 return NULL;
1985 1986 case -2:
1986 1987 Py_RETURN_NONE;
1987 1988 case -1:
1988 1989 return PyBytes_FromStringAndSize(nullid, 20);
1989 1990 }
1990 1991
1991 1992 fullnode = index_node_existing(self, rev);
1992 1993 if (fullnode == NULL) {
1993 1994 return NULL;
1994 1995 }
1995 1996 return PyBytes_FromStringAndSize(fullnode, 20);
1996 1997 }
1997 1998
1998 1999 static PyObject *index_shortest(indexObject *self, PyObject *args)
1999 2000 {
2000 2001 PyObject *val;
2001 2002 char *node;
2002 2003 int length;
2003 2004
2004 2005 if (!PyArg_ParseTuple(args, "O", &val))
2005 2006 return NULL;
2006 2007 if (node_check(val, &node) == -1)
2007 2008 return NULL;
2008 2009
2009 2010 self->ntlookups++;
2010 2011 if (index_init_nt(self) == -1)
2011 2012 return NULL;
2012 2013 if (index_populate_nt(self) == -1)
2013 2014 return NULL;
2014 2015 length = nt_shortest(&self->nt, node);
2015 2016 if (length == -3)
2016 2017 return NULL;
2017 2018 if (length == -2) {
2018 2019 raise_revlog_error();
2019 2020 return NULL;
2020 2021 }
2021 2022 return PyInt_FromLong(length);
2022 2023 }
2023 2024
2024 2025 static PyObject *index_m_get(indexObject *self, PyObject *args)
2025 2026 {
2026 2027 PyObject *val;
2027 2028 char *node;
2028 2029 int rev;
2029 2030
2030 2031 if (!PyArg_ParseTuple(args, "O", &val))
2031 2032 return NULL;
2032 2033 if (node_check(val, &node) == -1)
2033 2034 return NULL;
2034 2035 rev = index_find_node(self, node, 20);
2035 2036 if (rev == -3)
2036 2037 return NULL;
2037 2038 if (rev == -2)
2038 2039 Py_RETURN_NONE;
2039 2040 return PyInt_FromLong(rev);
2040 2041 }
2041 2042
2042 2043 static int index_contains(indexObject *self, PyObject *value)
2043 2044 {
2044 2045 char *node;
2045 2046
2046 2047 if (PyInt_Check(value)) {
2047 2048 long rev;
2048 2049 if (!pylong_to_long(value, &rev)) {
2049 2050 return -1;
2050 2051 }
2051 2052 return rev >= -1 && rev < index_length(self);
2052 2053 }
2053 2054
2054 2055 if (node_check(value, &node) == -1)
2055 2056 return -1;
2056 2057
2057 2058 switch (index_find_node(self, node, 20)) {
2058 2059 case -3:
2059 2060 return -1;
2060 2061 case -2:
2061 2062 return 0;
2062 2063 default:
2063 2064 return 1;
2064 2065 }
2065 2066 }
2066 2067
2067 2068 typedef uint64_t bitmask;
2068 2069
2069 2070 /*
2070 2071 * Given a disjoint set of revs, return all candidates for the
2071 2072 * greatest common ancestor. In revset notation, this is the set
2072 2073 * "heads(::a and ::b and ...)"
2073 2074 */
2074 2075 static PyObject *find_gca_candidates(indexObject *self, const int *revs,
2075 2076 int revcount)
2076 2077 {
2077 2078 const bitmask allseen = (1ull << revcount) - 1;
2078 2079 const bitmask poison = 1ull << revcount;
2079 2080 PyObject *gca = PyList_New(0);
2080 2081 int i, v, interesting;
2081 2082 int maxrev = -1;
2082 2083 bitmask sp;
2083 2084 bitmask *seen;
2084 2085
2085 2086 if (gca == NULL)
2086 2087 return PyErr_NoMemory();
2087 2088
2088 2089 for (i = 0; i < revcount; i++) {
2089 2090 if (revs[i] > maxrev)
2090 2091 maxrev = revs[i];
2091 2092 }
2092 2093
2093 2094 seen = calloc(sizeof(*seen), maxrev + 1);
2094 2095 if (seen == NULL) {
2095 2096 Py_DECREF(gca);
2096 2097 return PyErr_NoMemory();
2097 2098 }
2098 2099
2099 2100 for (i = 0; i < revcount; i++)
2100 2101 seen[revs[i]] = 1ull << i;
2101 2102
2102 2103 interesting = revcount;
2103 2104
2104 2105 for (v = maxrev; v >= 0 && interesting; v--) {
2105 2106 bitmask sv = seen[v];
2106 2107 int parents[2];
2107 2108
2108 2109 if (!sv)
2109 2110 continue;
2110 2111
2111 2112 if (sv < poison) {
2112 2113 interesting -= 1;
2113 2114 if (sv == allseen) {
2114 2115 PyObject *obj = PyInt_FromLong(v);
2115 2116 if (obj == NULL)
2116 2117 goto bail;
2117 2118 if (PyList_Append(gca, obj) == -1) {
2118 2119 Py_DECREF(obj);
2119 2120 goto bail;
2120 2121 }
2121 2122 sv |= poison;
2122 2123 for (i = 0; i < revcount; i++) {
2123 2124 if (revs[i] == v)
2124 2125 goto done;
2125 2126 }
2126 2127 }
2127 2128 }
2128 2129 if (index_get_parents(self, v, parents, maxrev) < 0)
2129 2130 goto bail;
2130 2131
2131 2132 for (i = 0; i < 2; i++) {
2132 2133 int p = parents[i];
2133 2134 if (p == -1)
2134 2135 continue;
2135 2136 sp = seen[p];
2136 2137 if (sv < poison) {
2137 2138 if (sp == 0) {
2138 2139 seen[p] = sv;
2139 2140 interesting++;
2140 2141 } else if (sp != sv)
2141 2142 seen[p] |= sv;
2142 2143 } else {
2143 2144 if (sp && sp < poison)
2144 2145 interesting--;
2145 2146 seen[p] = sv;
2146 2147 }
2147 2148 }
2148 2149 }
2149 2150
2150 2151 done:
2151 2152 free(seen);
2152 2153 return gca;
2153 2154 bail:
2154 2155 free(seen);
2155 2156 Py_XDECREF(gca);
2156 2157 return NULL;
2157 2158 }
2158 2159
2159 2160 /*
2160 2161 * Given a disjoint set of revs, return the subset with the longest
2161 2162 * path to the root.
2162 2163 */
2163 2164 static PyObject *find_deepest(indexObject *self, PyObject *revs)
2164 2165 {
2165 2166 const Py_ssize_t revcount = PyList_GET_SIZE(revs);
2166 2167 static const Py_ssize_t capacity = 24;
2167 2168 int *depth, *interesting = NULL;
2168 2169 int i, j, v, ninteresting;
2169 2170 PyObject *dict = NULL, *keys = NULL;
2170 2171 long *seen = NULL;
2171 2172 int maxrev = -1;
2172 2173 long final;
2173 2174
2174 2175 if (revcount > capacity) {
2175 2176 PyErr_Format(PyExc_OverflowError,
2176 2177 "bitset size (%ld) > capacity (%ld)",
2177 2178 (long)revcount, (long)capacity);
2178 2179 return NULL;
2179 2180 }
2180 2181
2181 2182 for (i = 0; i < revcount; i++) {
2182 2183 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
2183 2184 if (n > maxrev)
2184 2185 maxrev = n;
2185 2186 }
2186 2187
2187 2188 depth = calloc(sizeof(*depth), maxrev + 1);
2188 2189 if (depth == NULL)
2189 2190 return PyErr_NoMemory();
2190 2191
2191 2192 seen = calloc(sizeof(*seen), maxrev + 1);
2192 2193 if (seen == NULL) {
2193 2194 PyErr_NoMemory();
2194 2195 goto bail;
2195 2196 }
2196 2197
2197 2198 interesting = calloc(sizeof(*interesting), ((size_t)1) << revcount);
2198 2199 if (interesting == NULL) {
2199 2200 PyErr_NoMemory();
2200 2201 goto bail;
2201 2202 }
2202 2203
2203 2204 if (PyList_Sort(revs) == -1)
2204 2205 goto bail;
2205 2206
2206 2207 for (i = 0; i < revcount; i++) {
2207 2208 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
2208 2209 long b = 1l << i;
2209 2210 depth[n] = 1;
2210 2211 seen[n] = b;
2211 2212 interesting[b] = 1;
2212 2213 }
2213 2214
2214 2215 /* invariant: ninteresting is the number of non-zero entries in
2215 2216 * interesting. */
2216 2217 ninteresting = (int)revcount;
2217 2218
2218 2219 for (v = maxrev; v >= 0 && ninteresting > 1; v--) {
2219 2220 int dv = depth[v];
2220 2221 int parents[2];
2221 2222 long sv;
2222 2223
2223 2224 if (dv == 0)
2224 2225 continue;
2225 2226
2226 2227 sv = seen[v];
2227 2228 if (index_get_parents(self, v, parents, maxrev) < 0)
2228 2229 goto bail;
2229 2230
2230 2231 for (i = 0; i < 2; i++) {
2231 2232 int p = parents[i];
2232 2233 long sp;
2233 2234 int dp;
2234 2235
2235 2236 if (p == -1)
2236 2237 continue;
2237 2238
2238 2239 dp = depth[p];
2239 2240 sp = seen[p];
2240 2241 if (dp <= dv) {
2241 2242 depth[p] = dv + 1;
2242 2243 if (sp != sv) {
2243 2244 interesting[sv] += 1;
2244 2245 seen[p] = sv;
2245 2246 if (sp) {
2246 2247 interesting[sp] -= 1;
2247 2248 if (interesting[sp] == 0)
2248 2249 ninteresting -= 1;
2249 2250 }
2250 2251 }
2251 2252 } else if (dv == dp - 1) {
2252 2253 long nsp = sp | sv;
2253 2254 if (nsp == sp)
2254 2255 continue;
2255 2256 seen[p] = nsp;
2256 2257 interesting[sp] -= 1;
2257 2258 if (interesting[sp] == 0)
2258 2259 ninteresting -= 1;
2259 2260 if (interesting[nsp] == 0)
2260 2261 ninteresting += 1;
2261 2262 interesting[nsp] += 1;
2262 2263 }
2263 2264 }
2264 2265 interesting[sv] -= 1;
2265 2266 if (interesting[sv] == 0)
2266 2267 ninteresting -= 1;
2267 2268 }
2268 2269
2269 2270 final = 0;
2270 2271 j = ninteresting;
2271 2272 for (i = 0; i < (int)(2 << revcount) && j > 0; i++) {
2272 2273 if (interesting[i] == 0)
2273 2274 continue;
2274 2275 final |= i;
2275 2276 j -= 1;
2276 2277 }
2277 2278 if (final == 0) {
2278 2279 keys = PyList_New(0);
2279 2280 goto bail;
2280 2281 }
2281 2282
2282 2283 dict = PyDict_New();
2283 2284 if (dict == NULL)
2284 2285 goto bail;
2285 2286
2286 2287 for (i = 0; i < revcount; i++) {
2287 2288 PyObject *key;
2288 2289
2289 2290 if ((final & (1 << i)) == 0)
2290 2291 continue;
2291 2292
2292 2293 key = PyList_GET_ITEM(revs, i);
2293 2294 Py_INCREF(key);
2294 2295 Py_INCREF(Py_None);
2295 2296 if (PyDict_SetItem(dict, key, Py_None) == -1) {
2296 2297 Py_DECREF(key);
2297 2298 Py_DECREF(Py_None);
2298 2299 goto bail;
2299 2300 }
2300 2301 }
2301 2302
2302 2303 keys = PyDict_Keys(dict);
2303 2304
2304 2305 bail:
2305 2306 free(depth);
2306 2307 free(seen);
2307 2308 free(interesting);
2308 2309 Py_XDECREF(dict);
2309 2310
2310 2311 return keys;
2311 2312 }
2312 2313
2313 2314 /*
2314 2315 * Given a (possibly overlapping) set of revs, return all the
2315 2316 * common ancestors heads: heads(::args[0] and ::a[1] and ...)
2316 2317 */
2317 2318 static PyObject *index_commonancestorsheads(indexObject *self, PyObject *args)
2318 2319 {
2319 2320 PyObject *ret = NULL;
2320 2321 Py_ssize_t argcount, i, len;
2321 2322 bitmask repeat = 0;
2322 2323 int revcount = 0;
2323 2324 int *revs;
2324 2325
2325 2326 argcount = PySequence_Length(args);
2326 2327 revs = PyMem_Malloc(argcount * sizeof(*revs));
2327 2328 if (argcount > 0 && revs == NULL)
2328 2329 return PyErr_NoMemory();
2329 2330 len = index_length(self);
2330 2331
2331 2332 for (i = 0; i < argcount; i++) {
2332 2333 static const int capacity = 24;
2333 2334 PyObject *obj = PySequence_GetItem(args, i);
2334 2335 bitmask x;
2335 2336 long val;
2336 2337
2337 2338 if (!PyInt_Check(obj)) {
2338 2339 PyErr_SetString(PyExc_TypeError,
2339 2340 "arguments must all be ints");
2340 2341 Py_DECREF(obj);
2341 2342 goto bail;
2342 2343 }
2343 2344 val = PyInt_AsLong(obj);
2344 2345 Py_DECREF(obj);
2345 2346 if (val == -1) {
2346 2347 ret = PyList_New(0);
2347 2348 goto done;
2348 2349 }
2349 2350 if (val < 0 || val >= len) {
2350 2351 PyErr_SetString(PyExc_IndexError, "index out of range");
2351 2352 goto bail;
2352 2353 }
2353 2354 /* this cheesy bloom filter lets us avoid some more
2354 2355 * expensive duplicate checks in the common set-is-disjoint
2355 2356 * case */
2356 2357 x = 1ull << (val & 0x3f);
2357 2358 if (repeat & x) {
2358 2359 int k;
2359 2360 for (k = 0; k < revcount; k++) {
2360 2361 if (val == revs[k])
2361 2362 goto duplicate;
2362 2363 }
2363 2364 } else
2364 2365 repeat |= x;
2365 2366 if (revcount >= capacity) {
2366 2367 PyErr_Format(PyExc_OverflowError,
2367 2368 "bitset size (%d) > capacity (%d)",
2368 2369 revcount, capacity);
2369 2370 goto bail;
2370 2371 }
2371 2372 revs[revcount++] = (int)val;
2372 2373 duplicate:;
2373 2374 }
2374 2375
2375 2376 if (revcount == 0) {
2376 2377 ret = PyList_New(0);
2377 2378 goto done;
2378 2379 }
2379 2380 if (revcount == 1) {
2380 2381 PyObject *obj;
2381 2382 ret = PyList_New(1);
2382 2383 if (ret == NULL)
2383 2384 goto bail;
2384 2385 obj = PyInt_FromLong(revs[0]);
2385 2386 if (obj == NULL)
2386 2387 goto bail;
2387 2388 PyList_SET_ITEM(ret, 0, obj);
2388 2389 goto done;
2389 2390 }
2390 2391
2391 2392 ret = find_gca_candidates(self, revs, revcount);
2392 2393 if (ret == NULL)
2393 2394 goto bail;
2394 2395
2395 2396 done:
2396 2397 PyMem_Free(revs);
2397 2398 return ret;
2398 2399
2399 2400 bail:
2400 2401 PyMem_Free(revs);
2401 2402 Py_XDECREF(ret);
2402 2403 return NULL;
2403 2404 }
2404 2405
2405 2406 /*
2406 2407 * Given a (possibly overlapping) set of revs, return the greatest
2407 2408 * common ancestors: those with the longest path to the root.
2408 2409 */
2409 2410 static PyObject *index_ancestors(indexObject *self, PyObject *args)
2410 2411 {
2411 2412 PyObject *ret;
2412 2413 PyObject *gca = index_commonancestorsheads(self, args);
2413 2414 if (gca == NULL)
2414 2415 return NULL;
2415 2416
2416 2417 if (PyList_GET_SIZE(gca) <= 1) {
2417 2418 return gca;
2418 2419 }
2419 2420
2420 2421 ret = find_deepest(self, gca);
2421 2422 Py_DECREF(gca);
2422 2423 return ret;
2423 2424 }
2424 2425
2425 2426 /*
2426 2427 * Invalidate any trie entries introduced by added revs.
2427 2428 */
2428 2429 static void index_invalidate_added(indexObject *self, Py_ssize_t start)
2429 2430 {
2430 2431 Py_ssize_t i, len = PyList_GET_SIZE(self->added);
2431 2432
2432 2433 for (i = start; i < len; i++) {
2433 2434 PyObject *tuple = PyList_GET_ITEM(self->added, i);
2434 2435 PyObject *node = PyTuple_GET_ITEM(tuple, 7);
2435 2436
2436 2437 nt_delete_node(&self->nt, PyBytes_AS_STRING(node));
2437 2438 }
2438 2439
2439 2440 if (start == 0)
2440 2441 Py_CLEAR(self->added);
2441 2442 }
2442 2443
2443 2444 /*
2444 2445 * Delete a numeric range of revs, which must be at the end of the
2445 2446 * range, but exclude the sentinel nullid entry.
2446 2447 */
2447 2448 static int index_slice_del(indexObject *self, PyObject *item)
2448 2449 {
2449 2450 Py_ssize_t start, stop, step, slicelength;
2450 2451 Py_ssize_t length = index_length(self) + 1;
2451 2452 int ret = 0;
2452 2453
2453 2454 /* Argument changed from PySliceObject* to PyObject* in Python 3. */
2454 2455 #ifdef IS_PY3K
2455 2456 if (PySlice_GetIndicesEx(item, length, &start, &stop, &step,
2456 2457 &slicelength) < 0)
2457 2458 #else
2458 2459 if (PySlice_GetIndicesEx((PySliceObject *)item, length, &start, &stop,
2459 2460 &step, &slicelength) < 0)
2460 2461 #endif
2461 2462 return -1;
2462 2463
2463 2464 if (slicelength <= 0)
2464 2465 return 0;
2465 2466
2466 2467 if ((step < 0 && start < stop) || (step > 0 && start > stop))
2467 2468 stop = start;
2468 2469
2469 2470 if (step < 0) {
2470 2471 stop = start + 1;
2471 2472 start = stop + step * (slicelength - 1) - 1;
2472 2473 step = -step;
2473 2474 }
2474 2475
2475 2476 if (step != 1) {
2476 2477 PyErr_SetString(PyExc_ValueError,
2477 2478 "revlog index delete requires step size of 1");
2478 2479 return -1;
2479 2480 }
2480 2481
2481 2482 if (stop != length - 1) {
2482 2483 PyErr_SetString(PyExc_IndexError,
2483 2484 "revlog index deletion indices are invalid");
2484 2485 return -1;
2485 2486 }
2486 2487
2487 2488 if (start < self->length) {
2488 2489 if (self->ntinitialized) {
2489 2490 Py_ssize_t i;
2490 2491
2491 2492 for (i = start + 1; i < self->length; i++) {
2492 2493 const char *node = index_node_existing(self, i);
2493 2494 if (node == NULL)
2494 2495 return -1;
2495 2496
2496 2497 nt_delete_node(&self->nt, node);
2497 2498 }
2498 2499 if (self->added)
2499 2500 index_invalidate_added(self, 0);
2500 2501 if (self->ntrev > start)
2501 2502 self->ntrev = (int)start;
2502 2503 }
2503 2504 self->length = start;
2504 2505 if (start < self->raw_length) {
2505 2506 if (self->cache) {
2506 2507 Py_ssize_t i;
2507 2508 for (i = start; i < self->raw_length; i++)
2508 2509 Py_CLEAR(self->cache[i]);
2509 2510 }
2510 2511 self->raw_length = start;
2511 2512 }
2512 2513 goto done;
2513 2514 }
2514 2515
2515 2516 if (self->ntinitialized) {
2516 2517 index_invalidate_added(self, start - self->length);
2517 2518 if (self->ntrev > start)
2518 2519 self->ntrev = (int)start;
2519 2520 }
2520 2521 if (self->added)
2521 2522 ret = PyList_SetSlice(self->added, start - self->length,
2522 2523 PyList_GET_SIZE(self->added), NULL);
2523 2524 done:
2524 2525 Py_CLEAR(self->headrevs);
2525 2526 return ret;
2526 2527 }
2527 2528
2528 2529 /*
2529 2530 * Supported ops:
2530 2531 *
2531 2532 * slice deletion
2532 2533 * string assignment (extend node->rev mapping)
2533 2534 * string deletion (shrink node->rev mapping)
2534 2535 */
2535 2536 static int index_assign_subscript(indexObject *self, PyObject *item,
2536 2537 PyObject *value)
2537 2538 {
2538 2539 char *node;
2539 2540 long rev;
2540 2541
2541 2542 if (PySlice_Check(item) && value == NULL)
2542 2543 return index_slice_del(self, item);
2543 2544
2544 2545 if (node_check(item, &node) == -1)
2545 2546 return -1;
2546 2547
2547 2548 if (value == NULL)
2548 2549 return self->ntinitialized ? nt_delete_node(&self->nt, node)
2549 2550 : 0;
2550 2551 rev = PyInt_AsLong(value);
2551 2552 if (rev > INT_MAX || rev < 0) {
2552 2553 if (!PyErr_Occurred())
2553 2554 PyErr_SetString(PyExc_ValueError, "rev out of range");
2554 2555 return -1;
2555 2556 }
2556 2557
2557 2558 if (index_init_nt(self) == -1)
2558 2559 return -1;
2559 2560 return nt_insert(&self->nt, node, (int)rev);
2560 2561 }
2561 2562
2562 2563 /*
2563 2564 * Find all RevlogNG entries in an index that has inline data. Update
2564 2565 * the optional "offsets" table with those entries.
2565 2566 */
2566 2567 static Py_ssize_t inline_scan(indexObject *self, const char **offsets)
2567 2568 {
2568 2569 const char *data = (const char *)self->buf.buf;
2569 2570 Py_ssize_t pos = 0;
2570 2571 Py_ssize_t end = self->buf.len;
2571 2572 long incr = v1_hdrsize;
2572 2573 Py_ssize_t len = 0;
2573 2574
2574 2575 while (pos + v1_hdrsize <= end && pos >= 0) {
2575 2576 uint32_t comp_len;
2576 2577 /* 3rd element of header is length of compressed inline data */
2577 2578 comp_len = getbe32(data + pos + 8);
2578 2579 incr = v1_hdrsize + comp_len;
2579 2580 if (offsets)
2580 2581 offsets[len] = data + pos;
2581 2582 len++;
2582 2583 pos += incr;
2583 2584 }
2584 2585
2585 2586 if (pos != end) {
2586 2587 if (!PyErr_Occurred())
2587 2588 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2588 2589 return -1;
2589 2590 }
2590 2591
2591 2592 return len;
2592 2593 }
2593 2594
2594 2595 static int index_init(indexObject *self, PyObject *args)
2595 2596 {
2596 2597 PyObject *data_obj, *inlined_obj;
2597 2598 Py_ssize_t size;
2598 2599
2599 2600 /* Initialize before argument-checking to avoid index_dealloc() crash.
2600 2601 */
2601 2602 self->raw_length = 0;
2602 2603 self->added = NULL;
2603 2604 self->cache = NULL;
2604 2605 self->data = NULL;
2605 2606 memset(&self->buf, 0, sizeof(self->buf));
2606 2607 self->headrevs = NULL;
2607 2608 self->filteredrevs = Py_None;
2608 2609 Py_INCREF(Py_None);
2609 2610 self->ntinitialized = 0;
2610 2611 self->offsets = NULL;
2611 2612
2612 2613 if (!PyArg_ParseTuple(args, "OO", &data_obj, &inlined_obj))
2613 2614 return -1;
2614 2615 if (!PyObject_CheckBuffer(data_obj)) {
2615 2616 PyErr_SetString(PyExc_TypeError,
2616 2617 "data does not support buffer interface");
2617 2618 return -1;
2618 2619 }
2619 2620
2620 2621 if (PyObject_GetBuffer(data_obj, &self->buf, PyBUF_SIMPLE) == -1)
2621 2622 return -1;
2622 2623 size = self->buf.len;
2623 2624
2624 2625 self->inlined = inlined_obj && PyObject_IsTrue(inlined_obj);
2625 2626 self->data = data_obj;
2626 2627
2627 2628 self->ntlookups = self->ntmisses = 0;
2628 2629 self->ntrev = -1;
2629 2630 Py_INCREF(self->data);
2630 2631
2631 2632 if (self->inlined) {
2632 2633 Py_ssize_t len = inline_scan(self, NULL);
2633 2634 if (len == -1)
2634 2635 goto bail;
2635 2636 self->raw_length = len;
2636 2637 self->length = len;
2637 2638 } else {
2638 2639 if (size % v1_hdrsize) {
2639 2640 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2640 2641 goto bail;
2641 2642 }
2642 2643 self->raw_length = size / v1_hdrsize;
2643 2644 self->length = self->raw_length;
2644 2645 }
2645 2646
2646 2647 return 0;
2647 2648 bail:
2648 2649 return -1;
2649 2650 }
2650 2651
2651 2652 static PyObject *index_nodemap(indexObject *self)
2652 2653 {
2653 2654 Py_INCREF(self);
2654 2655 return (PyObject *)self;
2655 2656 }
2656 2657
2657 2658 static void _index_clearcaches(indexObject *self)
2658 2659 {
2659 2660 if (self->cache) {
2660 2661 Py_ssize_t i;
2661 2662
2662 2663 for (i = 0; i < self->raw_length; i++)
2663 2664 Py_CLEAR(self->cache[i]);
2664 2665 free(self->cache);
2665 2666 self->cache = NULL;
2666 2667 }
2667 2668 if (self->offsets) {
2668 2669 PyMem_Free((void *)self->offsets);
2669 2670 self->offsets = NULL;
2670 2671 }
2671 2672 if (self->ntinitialized) {
2672 2673 nt_dealloc(&self->nt);
2673 2674 }
2674 2675 self->ntinitialized = 0;
2675 2676 Py_CLEAR(self->headrevs);
2676 2677 }
2677 2678
2678 2679 static PyObject *index_clearcaches(indexObject *self)
2679 2680 {
2680 2681 _index_clearcaches(self);
2681 2682 self->ntrev = -1;
2682 2683 self->ntlookups = self->ntmisses = 0;
2683 2684 Py_RETURN_NONE;
2684 2685 }
2685 2686
2686 2687 static void index_dealloc(indexObject *self)
2687 2688 {
2688 2689 _index_clearcaches(self);
2689 2690 Py_XDECREF(self->filteredrevs);
2690 2691 if (self->buf.buf) {
2691 2692 PyBuffer_Release(&self->buf);
2692 2693 memset(&self->buf, 0, sizeof(self->buf));
2693 2694 }
2694 2695 Py_XDECREF(self->data);
2695 2696 Py_XDECREF(self->added);
2696 2697 PyObject_Del(self);
2697 2698 }
2698 2699
2699 2700 static PySequenceMethods index_sequence_methods = {
2700 2701 (lenfunc)index_length, /* sq_length */
2701 2702 0, /* sq_concat */
2702 2703 0, /* sq_repeat */
2703 2704 (ssizeargfunc)index_get, /* sq_item */
2704 2705 0, /* sq_slice */
2705 2706 0, /* sq_ass_item */
2706 2707 0, /* sq_ass_slice */
2707 2708 (objobjproc)index_contains, /* sq_contains */
2708 2709 };
2709 2710
2710 2711 static PyMappingMethods index_mapping_methods = {
2711 2712 (lenfunc)index_length, /* mp_length */
2712 2713 (binaryfunc)index_getitem, /* mp_subscript */
2713 2714 (objobjargproc)index_assign_subscript, /* mp_ass_subscript */
2714 2715 };
2715 2716
2716 2717 static PyMethodDef index_methods[] = {
2717 2718 {"ancestors", (PyCFunction)index_ancestors, METH_VARARGS,
2718 2719 "return the gca set of the given revs"},
2719 2720 {"commonancestorsheads", (PyCFunction)index_commonancestorsheads,
2720 2721 METH_VARARGS,
2721 2722 "return the heads of the common ancestors of the given revs"},
2722 2723 {"clearcaches", (PyCFunction)index_clearcaches, METH_NOARGS,
2723 2724 "clear the index caches"},
2724 2725 {"get", (PyCFunction)index_m_get, METH_VARARGS, "get an index entry"},
2725 2726 {"computephasesmapsets", (PyCFunction)compute_phases_map_sets, METH_VARARGS,
2726 2727 "compute phases"},
2727 2728 {"reachableroots2", (PyCFunction)reachableroots2, METH_VARARGS,
2728 2729 "reachableroots"},
2729 2730 {"headrevs", (PyCFunction)index_headrevs, METH_VARARGS,
2730 2731 "get head revisions"}, /* Can do filtering since 3.2 */
2731 2732 {"headrevsfiltered", (PyCFunction)index_headrevs, METH_VARARGS,
2732 2733 "get filtered head revisions"}, /* Can always do filtering */
2733 2734 {"issnapshot", (PyCFunction)index_issnapshot, METH_O,
2734 2735 "True if the object is a snapshot"},
2735 2736 {"findsnapshots", (PyCFunction)index_findsnapshots, METH_VARARGS,
2736 2737 "Gather snapshot data in a cache dict"},
2737 2738 {"deltachain", (PyCFunction)index_deltachain, METH_VARARGS,
2738 2739 "determine revisions with deltas to reconstruct fulltext"},
2739 2740 {"slicechunktodensity", (PyCFunction)index_slicechunktodensity,
2740 2741 METH_VARARGS, "determine revisions with deltas to reconstruct fulltext"},
2741 2742 {"append", (PyCFunction)index_append, METH_O, "append an index entry"},
2742 2743 {"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS,
2743 2744 "match a potentially ambiguous node ID"},
2744 2745 {"shortest", (PyCFunction)index_shortest, METH_VARARGS,
2745 2746 "find length of shortest hex nodeid of a binary ID"},
2746 2747 {"stats", (PyCFunction)index_stats, METH_NOARGS, "stats for the index"},
2747 2748 {NULL} /* Sentinel */
2748 2749 };
2749 2750
2750 2751 static PyGetSetDef index_getset[] = {
2751 2752 {"nodemap", (getter)index_nodemap, NULL, "nodemap", NULL},
2752 2753 {NULL} /* Sentinel */
2753 2754 };
2754 2755
2755 2756 PyTypeObject HgRevlogIndex_Type = {
2756 2757 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2757 2758 "parsers.index", /* tp_name */
2758 2759 sizeof(indexObject), /* tp_basicsize */
2759 2760 0, /* tp_itemsize */
2760 2761 (destructor)index_dealloc, /* tp_dealloc */
2761 2762 0, /* tp_print */
2762 2763 0, /* tp_getattr */
2763 2764 0, /* tp_setattr */
2764 2765 0, /* tp_compare */
2765 2766 0, /* tp_repr */
2766 2767 0, /* tp_as_number */
2767 2768 &index_sequence_methods, /* tp_as_sequence */
2768 2769 &index_mapping_methods, /* tp_as_mapping */
2769 2770 0, /* tp_hash */
2770 2771 0, /* tp_call */
2771 2772 0, /* tp_str */
2772 2773 0, /* tp_getattro */
2773 2774 0, /* tp_setattro */
2774 2775 0, /* tp_as_buffer */
2775 2776 Py_TPFLAGS_DEFAULT, /* tp_flags */
2776 2777 "revlog index", /* tp_doc */
2777 2778 0, /* tp_traverse */
2778 2779 0, /* tp_clear */
2779 2780 0, /* tp_richcompare */
2780 2781 0, /* tp_weaklistoffset */
2781 2782 0, /* tp_iter */
2782 2783 0, /* tp_iternext */
2783 2784 index_methods, /* tp_methods */
2784 2785 0, /* tp_members */
2785 2786 index_getset, /* tp_getset */
2786 2787 0, /* tp_base */
2787 2788 0, /* tp_dict */
2788 2789 0, /* tp_descr_get */
2789 2790 0, /* tp_descr_set */
2790 2791 0, /* tp_dictoffset */
2791 2792 (initproc)index_init, /* tp_init */
2792 2793 0, /* tp_alloc */
2793 2794 };
2794 2795
2795 2796 /*
2796 2797 * returns a tuple of the form (index, index, cache) with elements as
2797 2798 * follows:
2798 2799 *
2799 2800 * index: an index object that lazily parses RevlogNG records
2800 2801 * cache: if data is inlined, a tuple (0, index_file_content), else None
2801 2802 * index_file_content could be a string, or a buffer
2802 2803 *
2803 2804 * added complications are for backwards compatibility
2804 2805 */
2805 2806 PyObject *parse_index2(PyObject *self, PyObject *args)
2806 2807 {
2807 2808 PyObject *tuple = NULL, *cache = NULL;
2808 2809 indexObject *idx;
2809 2810 int ret;
2810 2811
2811 2812 idx = PyObject_New(indexObject, &HgRevlogIndex_Type);
2812 2813 if (idx == NULL)
2813 2814 goto bail;
2814 2815
2815 2816 ret = index_init(idx, args);
2816 2817 if (ret == -1)
2817 2818 goto bail;
2818 2819
2819 2820 if (idx->inlined) {
2820 2821 cache = Py_BuildValue("iO", 0, idx->data);
2821 2822 if (cache == NULL)
2822 2823 goto bail;
2823 2824 } else {
2824 2825 cache = Py_None;
2825 2826 Py_INCREF(cache);
2826 2827 }
2827 2828
2828 2829 tuple = Py_BuildValue("NN", idx, cache);
2829 2830 if (!tuple)
2830 2831 goto bail;
2831 2832 return tuple;
2832 2833
2833 2834 bail:
2834 2835 Py_XDECREF(idx);
2835 2836 Py_XDECREF(cache);
2836 2837 Py_XDECREF(tuple);
2837 2838 return NULL;
2838 2839 }
2839 2840
2840 2841 #ifdef WITH_RUST
2841 2842
2842 2843 /* rustlazyancestors: iteration over ancestors implemented in Rust
2843 2844 *
2844 2845 * This class holds a reference to an index and to the Rust iterator.
2845 2846 */
2846 2847 typedef struct rustlazyancestorsObjectStruct rustlazyancestorsObject;
2847 2848
2848 2849 struct rustlazyancestorsObjectStruct {
2849 2850 PyObject_HEAD
2850 2851 /* Type-specific fields go here. */
2851 2852 indexObject *index; /* Ref kept to avoid GC'ing the index */
2852 2853 void *iter; /* Rust iterator */
2853 2854 };
2854 2855
2855 2856 /* FFI exposed from Rust code */
2856 2857 rustlazyancestorsObject *rustlazyancestors_init(indexObject *index,
2857 2858 /* intrevs vector */
2858 2859 Py_ssize_t initrevslen,
2859 2860 long *initrevs, long stoprev,
2860 2861 int inclusive);
2861 2862 void rustlazyancestors_drop(rustlazyancestorsObject *self);
2862 2863 int rustlazyancestors_next(rustlazyancestorsObject *self);
2863 2864 int rustlazyancestors_contains(rustlazyancestorsObject *self, long rev);
2864 2865
2865 2866 /* CPython instance methods */
2866 2867 static int rustla_init(rustlazyancestorsObject *self, PyObject *args)
2867 2868 {
2868 2869 PyObject *initrevsarg = NULL;
2869 2870 PyObject *inclusivearg = NULL;
2870 2871 long stoprev = 0;
2871 2872 long *initrevs = NULL;
2872 2873 int inclusive = 0;
2873 2874 Py_ssize_t i;
2874 2875
2875 2876 indexObject *index;
2876 2877 if (!PyArg_ParseTuple(args, "O!O!lO!", &HgRevlogIndex_Type, &index,
2877 2878 &PyList_Type, &initrevsarg, &stoprev,
2878 2879 &PyBool_Type, &inclusivearg))
2879 2880 return -1;
2880 2881
2881 2882 Py_INCREF(index);
2882 2883 self->index = index;
2883 2884
2884 2885 if (inclusivearg == Py_True)
2885 2886 inclusive = 1;
2886 2887
2887 2888 Py_ssize_t linit = PyList_GET_SIZE(initrevsarg);
2888 2889
2889 2890 initrevs = (long *)calloc(linit, sizeof(long));
2890 2891
2891 2892 if (initrevs == NULL) {
2892 2893 PyErr_NoMemory();
2893 2894 goto bail;
2894 2895 }
2895 2896
2896 2897 for (i = 0; i < linit; i++) {
2897 2898 initrevs[i] = PyInt_AsLong(PyList_GET_ITEM(initrevsarg, i));
2898 2899 }
2899 2900 if (PyErr_Occurred())
2900 2901 goto bail;
2901 2902
2902 2903 self->iter =
2903 2904 rustlazyancestors_init(index, linit, initrevs, stoprev, inclusive);
2904 2905 if (self->iter == NULL) {
2905 2906 /* if this is because of GraphError::ParentOutOfRange
2906 2907 * HgRevlogIndex_GetParents() has already set the proper
2907 2908 * exception */
2908 2909 goto bail;
2909 2910 }
2910 2911
2911 2912 free(initrevs);
2912 2913 return 0;
2913 2914
2914 2915 bail:
2915 2916 free(initrevs);
2916 2917 return -1;
2917 2918 };
2918 2919
2919 2920 static void rustla_dealloc(rustlazyancestorsObject *self)
2920 2921 {
2921 2922 Py_XDECREF(self->index);
2922 2923 if (self->iter != NULL) { /* can happen if rustla_init failed */
2923 2924 rustlazyancestors_drop(self->iter);
2924 2925 }
2925 2926 PyObject_Del(self);
2926 2927 }
2927 2928
2928 2929 static PyObject *rustla_next(rustlazyancestorsObject *self)
2929 2930 {
2930 2931 int res = rustlazyancestors_next(self->iter);
2931 2932 if (res == -1) {
2932 2933 /* Setting an explicit exception seems unnecessary
2933 2934 * as examples from Python source code (Objects/rangeobjets.c
2934 2935 * and Modules/_io/stringio.c) seem to demonstrate.
2935 2936 */
2936 2937 return NULL;
2937 2938 }
2938 2939 return PyInt_FromLong(res);
2939 2940 }
2940 2941
2941 2942 static int rustla_contains(rustlazyancestorsObject *self, PyObject *rev)
2942 2943 {
2943 2944 long lrev;
2944 2945 if (!pylong_to_long(rev, &lrev)) {
2945 2946 PyErr_Clear();
2946 2947 return 0;
2947 2948 }
2948 2949 return rustlazyancestors_contains(self->iter, lrev);
2949 2950 }
2950 2951
2951 2952 static PySequenceMethods rustla_sequence_methods = {
2952 2953 0, /* sq_length */
2953 2954 0, /* sq_concat */
2954 2955 0, /* sq_repeat */
2955 2956 0, /* sq_item */
2956 2957 0, /* sq_slice */
2957 2958 0, /* sq_ass_item */
2958 2959 0, /* sq_ass_slice */
2959 2960 (objobjproc)rustla_contains, /* sq_contains */
2960 2961 };
2961 2962
2962 2963 static PyTypeObject rustlazyancestorsType = {
2963 2964 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2964 2965 "parsers.rustlazyancestors", /* tp_name */
2965 2966 sizeof(rustlazyancestorsObject), /* tp_basicsize */
2966 2967 0, /* tp_itemsize */
2967 2968 (destructor)rustla_dealloc, /* tp_dealloc */
2968 2969 0, /* tp_print */
2969 2970 0, /* tp_getattr */
2970 2971 0, /* tp_setattr */
2971 2972 0, /* tp_compare */
2972 2973 0, /* tp_repr */
2973 2974 0, /* tp_as_number */
2974 2975 &rustla_sequence_methods, /* tp_as_sequence */
2975 2976 0, /* tp_as_mapping */
2976 2977 0, /* tp_hash */
2977 2978 0, /* tp_call */
2978 2979 0, /* tp_str */
2979 2980 0, /* tp_getattro */
2980 2981 0, /* tp_setattro */
2981 2982 0, /* tp_as_buffer */
2982 2983 Py_TPFLAGS_DEFAULT, /* tp_flags */
2983 2984 "Iterator over ancestors, implemented in Rust", /* tp_doc */
2984 2985 0, /* tp_traverse */
2985 2986 0, /* tp_clear */
2986 2987 0, /* tp_richcompare */
2987 2988 0, /* tp_weaklistoffset */
2988 2989 0, /* tp_iter */
2989 2990 (iternextfunc)rustla_next, /* tp_iternext */
2990 2991 0, /* tp_methods */
2991 2992 0, /* tp_members */
2992 2993 0, /* tp_getset */
2993 2994 0, /* tp_base */
2994 2995 0, /* tp_dict */
2995 2996 0, /* tp_descr_get */
2996 2997 0, /* tp_descr_set */
2997 2998 0, /* tp_dictoffset */
2998 2999 (initproc)rustla_init, /* tp_init */
2999 3000 0, /* tp_alloc */
3000 3001 };
3001 3002 #endif /* WITH_RUST */
3002 3003
3003 3004 void revlog_module_init(PyObject *mod)
3004 3005 {
3005 3006 PyObject *caps = NULL;
3006 3007 HgRevlogIndex_Type.tp_new = PyType_GenericNew;
3007 3008 if (PyType_Ready(&HgRevlogIndex_Type) < 0)
3008 3009 return;
3009 3010 Py_INCREF(&HgRevlogIndex_Type);
3010 3011 PyModule_AddObject(mod, "index", (PyObject *)&HgRevlogIndex_Type);
3011 3012
3012 3013 nodetreeType.tp_new = PyType_GenericNew;
3013 3014 if (PyType_Ready(&nodetreeType) < 0)
3014 3015 return;
3015 3016 Py_INCREF(&nodetreeType);
3016 3017 PyModule_AddObject(mod, "nodetree", (PyObject *)&nodetreeType);
3017 3018
3018 3019 if (!nullentry) {
3019 3020 nullentry = Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0,
3020 3021 0, -1, -1, -1, -1, nullid, 20);
3021 3022 }
3022 3023 if (nullentry)
3023 3024 PyObject_GC_UnTrack(nullentry);
3024 3025
3025 3026 caps = PyCapsule_New(HgRevlogIndex_GetParents,
3026 3027 "mercurial.cext.parsers.index_get_parents_CAPI",
3027 3028 NULL);
3028 3029 if (caps != NULL)
3029 3030 PyModule_AddObject(mod, "index_get_parents_CAPI", caps);
3030 3031
3031 3032 #ifdef WITH_RUST
3032 3033 rustlazyancestorsType.tp_new = PyType_GenericNew;
3033 3034 if (PyType_Ready(&rustlazyancestorsType) < 0)
3034 3035 return;
3035 3036 Py_INCREF(&rustlazyancestorsType);
3036 3037 PyModule_AddObject(mod, "rustlazyancestors",
3037 3038 (PyObject *)&rustlazyancestorsType);
3038 3039 #endif
3039 3040 }
General Comments 0
You need to be logged in to leave comments. Login now