##// END OF EJS Templates
revlog: fix excessive decref on tuple creation failure in parse_index2()...
Yuya Nishihara -
r45733:2bc5d153 default
parent child Browse files
Show More
@@ -1,2956 +1,2952 b''
1 1 /*
2 2 parsers.c - efficient content parsing
3 3
4 4 Copyright 2008 Matt Mackall <mpm@selenic.com> and others
5 5
6 6 This software may be used and distributed according to the terms of
7 7 the GNU General Public License, incorporated herein by reference.
8 8 */
9 9
10 10 #define PY_SSIZE_T_CLEAN
11 11 #include <Python.h>
12 12 #include <assert.h>
13 13 #include <ctype.h>
14 14 #include <limits.h>
15 15 #include <stddef.h>
16 16 #include <stdlib.h>
17 17 #include <string.h>
18 18
19 19 #include "bitmanipulation.h"
20 20 #include "charencode.h"
21 21 #include "revlog.h"
22 22 #include "util.h"
23 23
24 24 #ifdef IS_PY3K
25 25 /* The mapping of Python types is meant to be temporary to get Python
26 26 * 3 to compile. We should remove this once Python 3 support is fully
27 27 * supported and proper types are used in the extensions themselves. */
28 28 #define PyInt_Check PyLong_Check
29 29 #define PyInt_FromLong PyLong_FromLong
30 30 #define PyInt_FromSsize_t PyLong_FromSsize_t
31 31 #define PyInt_AsLong PyLong_AsLong
32 32 #endif
33 33
34 34 typedef struct indexObjectStruct indexObject;
35 35
36 36 typedef struct {
37 37 int children[16];
38 38 } nodetreenode;
39 39
40 40 typedef struct {
41 41 int abi_version;
42 42 Py_ssize_t (*index_length)(const indexObject *);
43 43 const char *(*index_node)(indexObject *, Py_ssize_t);
44 44 int (*index_parents)(PyObject *, int, int *);
45 45 } Revlog_CAPI;
46 46
47 47 /*
48 48 * A base-16 trie for fast node->rev mapping.
49 49 *
50 50 * Positive value is index of the next node in the trie
51 51 * Negative value is a leaf: -(rev + 2)
52 52 * Zero is empty
53 53 */
54 54 typedef struct {
55 55 indexObject *index;
56 56 nodetreenode *nodes;
57 57 unsigned length; /* # nodes in use */
58 58 unsigned capacity; /* # nodes allocated */
59 59 int depth; /* maximum depth of tree */
60 60 int splits; /* # splits performed */
61 61 } nodetree;
62 62
63 63 typedef struct {
64 64 PyObject_HEAD /* ; */
65 65 nodetree nt;
66 66 } nodetreeObject;
67 67
68 68 /*
69 69 * This class has two behaviors.
70 70 *
71 71 * When used in a list-like way (with integer keys), we decode an
72 72 * entry in a RevlogNG index file on demand. We have limited support for
73 73 * integer-keyed insert and delete, only at elements right before the
74 74 * end.
75 75 *
76 76 * With string keys, we lazily perform a reverse mapping from node to
77 77 * rev, using a base-16 trie.
78 78 */
79 79 struct indexObjectStruct {
80 80 PyObject_HEAD
81 81 /* Type-specific fields go here. */
82 82 PyObject *data; /* raw bytes of index */
83 83 Py_buffer buf; /* buffer of data */
84 84 PyObject **cache; /* cached tuples */
85 85 const char **offsets; /* populated on demand */
86 86 Py_ssize_t raw_length; /* original number of elements */
87 87 Py_ssize_t length; /* current number of elements */
88 88 PyObject *added; /* populated on demand */
89 89 PyObject *headrevs; /* cache, invalidated on changes */
90 90 PyObject *filteredrevs; /* filtered revs set */
91 91 nodetree nt; /* base-16 trie */
92 92 int ntinitialized; /* 0 or 1 */
93 93 int ntrev; /* last rev scanned */
94 94 int ntlookups; /* # lookups */
95 95 int ntmisses; /* # lookups that miss the cache */
96 96 int inlined;
97 97 };
98 98
99 99 static Py_ssize_t index_length(const indexObject *self)
100 100 {
101 101 if (self->added == NULL)
102 102 return self->length;
103 103 return self->length + PyList_GET_SIZE(self->added);
104 104 }
105 105
106 106 static PyObject *nullentry = NULL;
107 107 static const char nullid[20] = {0};
108 108 static const Py_ssize_t nullrev = -1;
109 109
110 110 static Py_ssize_t inline_scan(indexObject *self, const char **offsets);
111 111
112 112 static int index_find_node(indexObject *self, const char *node,
113 113 Py_ssize_t nodelen);
114 114
115 115 #if LONG_MAX == 0x7fffffffL
116 116 static const char *const tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
117 117 #else
118 118 static const char *const tuple_format = PY23("kiiiiiis#", "kiiiiiiy#");
119 119 #endif
120 120
121 121 /* A RevlogNG v1 index entry is 64 bytes long. */
122 122 static const long v1_hdrsize = 64;
123 123
124 124 static void raise_revlog_error(void)
125 125 {
126 126 PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
127 127
128 128 mod = PyImport_ImportModule("mercurial.error");
129 129 if (mod == NULL) {
130 130 goto cleanup;
131 131 }
132 132
133 133 dict = PyModule_GetDict(mod);
134 134 if (dict == NULL) {
135 135 goto cleanup;
136 136 }
137 137 Py_INCREF(dict);
138 138
139 139 errclass = PyDict_GetItemString(dict, "RevlogError");
140 140 if (errclass == NULL) {
141 141 PyErr_SetString(PyExc_SystemError,
142 142 "could not find RevlogError");
143 143 goto cleanup;
144 144 }
145 145
146 146 /* value of exception is ignored by callers */
147 147 PyErr_SetString(errclass, "RevlogError");
148 148
149 149 cleanup:
150 150 Py_XDECREF(dict);
151 151 Py_XDECREF(mod);
152 152 }
153 153
154 154 /*
155 155 * Return a pointer to the beginning of a RevlogNG record.
156 156 */
157 157 static const char *index_deref(indexObject *self, Py_ssize_t pos)
158 158 {
159 159 if (self->inlined && pos > 0) {
160 160 if (self->offsets == NULL) {
161 161 Py_ssize_t ret;
162 162 self->offsets = PyMem_Malloc(self->raw_length *
163 163 sizeof(*self->offsets));
164 164 if (self->offsets == NULL)
165 165 return (const char *)PyErr_NoMemory();
166 166 ret = inline_scan(self, self->offsets);
167 167 if (ret == -1) {
168 168 return NULL;
169 169 };
170 170 }
171 171 return self->offsets[pos];
172 172 }
173 173
174 174 return (const char *)(self->buf.buf) + pos * v1_hdrsize;
175 175 }
176 176
177 177 /*
178 178 * Get parents of the given rev.
179 179 *
180 180 * The specified rev must be valid and must not be nullrev. A returned
181 181 * parent revision may be nullrev, but is guaranteed to be in valid range.
182 182 */
183 183 static inline int index_get_parents(indexObject *self, Py_ssize_t rev, int *ps,
184 184 int maxrev)
185 185 {
186 186 if (rev >= self->length) {
187 187 long tmp;
188 188 PyObject *tuple =
189 189 PyList_GET_ITEM(self->added, rev - self->length);
190 190 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 5), &tmp)) {
191 191 return -1;
192 192 }
193 193 ps[0] = (int)tmp;
194 194 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 6), &tmp)) {
195 195 return -1;
196 196 }
197 197 ps[1] = (int)tmp;
198 198 } else {
199 199 const char *data = index_deref(self, rev);
200 200 ps[0] = getbe32(data + 24);
201 201 ps[1] = getbe32(data + 28);
202 202 }
203 203 /* If index file is corrupted, ps[] may point to invalid revisions. So
204 204 * there is a risk of buffer overflow to trust them unconditionally. */
205 205 if (ps[0] < -1 || ps[0] > maxrev || ps[1] < -1 || ps[1] > maxrev) {
206 206 PyErr_SetString(PyExc_ValueError, "parent out of range");
207 207 return -1;
208 208 }
209 209 return 0;
210 210 }
211 211
212 212 /*
213 213 * Get parents of the given rev.
214 214 *
215 215 * If the specified rev is out of range, IndexError will be raised. If the
216 216 * revlog entry is corrupted, ValueError may be raised.
217 217 *
218 218 * Returns 0 on success or -1 on failure.
219 219 */
220 220 static int HgRevlogIndex_GetParents(PyObject *op, int rev, int *ps)
221 221 {
222 222 int tiprev;
223 223 if (!op || !HgRevlogIndex_Check(op) || !ps) {
224 224 PyErr_BadInternalCall();
225 225 return -1;
226 226 }
227 227 tiprev = (int)index_length((indexObject *)op) - 1;
228 228 if (rev < -1 || rev > tiprev) {
229 229 PyErr_Format(PyExc_IndexError, "rev out of range: %d", rev);
230 230 return -1;
231 231 } else if (rev == -1) {
232 232 ps[0] = ps[1] = -1;
233 233 return 0;
234 234 } else {
235 235 return index_get_parents((indexObject *)op, rev, ps, tiprev);
236 236 }
237 237 }
238 238
239 239 static inline int64_t index_get_start(indexObject *self, Py_ssize_t rev)
240 240 {
241 241 uint64_t offset;
242 242 if (rev == nullrev) {
243 243 return 0;
244 244 }
245 245 if (rev >= self->length) {
246 246 PyObject *tuple;
247 247 PyObject *pylong;
248 248 PY_LONG_LONG tmp;
249 249 tuple = PyList_GET_ITEM(self->added, rev - self->length);
250 250 pylong = PyTuple_GET_ITEM(tuple, 0);
251 251 tmp = PyLong_AsLongLong(pylong);
252 252 if (tmp == -1 && PyErr_Occurred()) {
253 253 return -1;
254 254 }
255 255 if (tmp < 0) {
256 256 PyErr_Format(PyExc_OverflowError,
257 257 "revlog entry size out of bound (%lld)",
258 258 (long long)tmp);
259 259 return -1;
260 260 }
261 261 offset = (uint64_t)tmp;
262 262 } else {
263 263 const char *data = index_deref(self, rev);
264 264 offset = getbe32(data + 4);
265 265 if (rev == 0) {
266 266 /* mask out version number for the first entry */
267 267 offset &= 0xFFFF;
268 268 } else {
269 269 uint32_t offset_high = getbe32(data);
270 270 offset |= ((uint64_t)offset_high) << 32;
271 271 }
272 272 }
273 273 return (int64_t)(offset >> 16);
274 274 }
275 275
276 276 static inline int index_get_length(indexObject *self, Py_ssize_t rev)
277 277 {
278 278 if (rev == nullrev) {
279 279 return 0;
280 280 }
281 281 if (rev >= self->length) {
282 282 PyObject *tuple;
283 283 PyObject *pylong;
284 284 long ret;
285 285 tuple = PyList_GET_ITEM(self->added, rev - self->length);
286 286 pylong = PyTuple_GET_ITEM(tuple, 1);
287 287 ret = PyInt_AsLong(pylong);
288 288 if (ret == -1 && PyErr_Occurred()) {
289 289 return -1;
290 290 }
291 291 if (ret < 0 || ret > (long)INT_MAX) {
292 292 PyErr_Format(PyExc_OverflowError,
293 293 "revlog entry size out of bound (%ld)",
294 294 ret);
295 295 return -1;
296 296 }
297 297 return (int)ret;
298 298 } else {
299 299 const char *data = index_deref(self, rev);
300 300 int tmp = (int)getbe32(data + 8);
301 301 if (tmp < 0) {
302 302 PyErr_Format(PyExc_OverflowError,
303 303 "revlog entry size out of bound (%d)",
304 304 tmp);
305 305 return -1;
306 306 }
307 307 return tmp;
308 308 }
309 309 }
310 310
311 311 /*
312 312 * RevlogNG format (all in big endian, data may be inlined):
313 313 * 6 bytes: offset
314 314 * 2 bytes: flags
315 315 * 4 bytes: compressed length
316 316 * 4 bytes: uncompressed length
317 317 * 4 bytes: base revision
318 318 * 4 bytes: link revision
319 319 * 4 bytes: parent 1 revision
320 320 * 4 bytes: parent 2 revision
321 321 * 32 bytes: nodeid (only 20 bytes used)
322 322 */
323 323 static PyObject *index_get(indexObject *self, Py_ssize_t pos)
324 324 {
325 325 uint64_t offset_flags;
326 326 int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
327 327 const char *c_node_id;
328 328 const char *data;
329 329 Py_ssize_t length = index_length(self);
330 330 PyObject *entry;
331 331
332 332 if (pos == nullrev) {
333 333 Py_INCREF(nullentry);
334 334 return nullentry;
335 335 }
336 336
337 337 if (pos < 0 || pos >= length) {
338 338 PyErr_SetString(PyExc_IndexError, "revlog index out of range");
339 339 return NULL;
340 340 }
341 341
342 342 if (pos >= self->length) {
343 343 PyObject *obj;
344 344 obj = PyList_GET_ITEM(self->added, pos - self->length);
345 345 Py_INCREF(obj);
346 346 return obj;
347 347 }
348 348
349 349 if (self->cache) {
350 350 if (self->cache[pos]) {
351 351 Py_INCREF(self->cache[pos]);
352 352 return self->cache[pos];
353 353 }
354 354 } else {
355 355 self->cache = calloc(self->raw_length, sizeof(PyObject *));
356 356 if (self->cache == NULL)
357 357 return PyErr_NoMemory();
358 358 }
359 359
360 360 data = index_deref(self, pos);
361 361 if (data == NULL)
362 362 return NULL;
363 363
364 364 offset_flags = getbe32(data + 4);
365 365 if (pos == 0) /* mask out version number for the first entry */
366 366 offset_flags &= 0xFFFF;
367 367 else {
368 368 uint32_t offset_high = getbe32(data);
369 369 offset_flags |= ((uint64_t)offset_high) << 32;
370 370 }
371 371
372 372 comp_len = getbe32(data + 8);
373 373 uncomp_len = getbe32(data + 12);
374 374 base_rev = getbe32(data + 16);
375 375 link_rev = getbe32(data + 20);
376 376 parent_1 = getbe32(data + 24);
377 377 parent_2 = getbe32(data + 28);
378 378 c_node_id = data + 32;
379 379
380 380 entry = Py_BuildValue(tuple_format, offset_flags, comp_len, uncomp_len,
381 381 base_rev, link_rev, parent_1, parent_2, c_node_id,
382 382 (Py_ssize_t)20);
383 383
384 384 if (entry) {
385 385 PyObject_GC_UnTrack(entry);
386 386 Py_INCREF(entry);
387 387 }
388 388
389 389 self->cache[pos] = entry;
390 390
391 391 return entry;
392 392 }
393 393
394 394 /*
395 395 * Return the 20-byte SHA of the node corresponding to the given rev.
396 396 */
397 397 static const char *index_node(indexObject *self, Py_ssize_t pos)
398 398 {
399 399 Py_ssize_t length = index_length(self);
400 400 const char *data;
401 401
402 402 if (pos == nullrev)
403 403 return nullid;
404 404
405 405 if (pos >= length)
406 406 return NULL;
407 407
408 408 if (pos >= self->length) {
409 409 PyObject *tuple, *str;
410 410 tuple = PyList_GET_ITEM(self->added, pos - self->length);
411 411 str = PyTuple_GetItem(tuple, 7);
412 412 return str ? PyBytes_AS_STRING(str) : NULL;
413 413 }
414 414
415 415 data = index_deref(self, pos);
416 416 return data ? data + 32 : NULL;
417 417 }
418 418
419 419 /*
420 420 * Return the 20-byte SHA of the node corresponding to the given rev. The
421 421 * rev is assumed to be existing. If not, an exception is set.
422 422 */
423 423 static const char *index_node_existing(indexObject *self, Py_ssize_t pos)
424 424 {
425 425 const char *node = index_node(self, pos);
426 426 if (node == NULL) {
427 427 PyErr_Format(PyExc_IndexError, "could not access rev %d",
428 428 (int)pos);
429 429 }
430 430 return node;
431 431 }
432 432
433 433 static int nt_insert(nodetree *self, const char *node, int rev);
434 434
435 435 static int node_check(PyObject *obj, char **node)
436 436 {
437 437 Py_ssize_t nodelen;
438 438 if (PyBytes_AsStringAndSize(obj, node, &nodelen) == -1)
439 439 return -1;
440 440 if (nodelen == 20)
441 441 return 0;
442 442 PyErr_SetString(PyExc_ValueError, "20-byte hash required");
443 443 return -1;
444 444 }
445 445
446 446 static PyObject *index_append(indexObject *self, PyObject *obj)
447 447 {
448 448 char *node;
449 449 Py_ssize_t len;
450 450
451 451 if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj) != 8) {
452 452 PyErr_SetString(PyExc_TypeError, "8-tuple required");
453 453 return NULL;
454 454 }
455 455
456 456 if (node_check(PyTuple_GET_ITEM(obj, 7), &node) == -1)
457 457 return NULL;
458 458
459 459 len = index_length(self);
460 460
461 461 if (self->added == NULL) {
462 462 self->added = PyList_New(0);
463 463 if (self->added == NULL)
464 464 return NULL;
465 465 }
466 466
467 467 if (PyList_Append(self->added, obj) == -1)
468 468 return NULL;
469 469
470 470 if (self->ntinitialized)
471 471 nt_insert(&self->nt, node, (int)len);
472 472
473 473 Py_CLEAR(self->headrevs);
474 474 Py_RETURN_NONE;
475 475 }
476 476
477 477 static PyObject *index_stats(indexObject *self)
478 478 {
479 479 PyObject *obj = PyDict_New();
480 480 PyObject *s = NULL;
481 481 PyObject *t = NULL;
482 482
483 483 if (obj == NULL)
484 484 return NULL;
485 485
486 486 #define istat(__n, __d) \
487 487 do { \
488 488 s = PyBytes_FromString(__d); \
489 489 t = PyInt_FromSsize_t(self->__n); \
490 490 if (!s || !t) \
491 491 goto bail; \
492 492 if (PyDict_SetItem(obj, s, t) == -1) \
493 493 goto bail; \
494 494 Py_CLEAR(s); \
495 495 Py_CLEAR(t); \
496 496 } while (0)
497 497
498 498 if (self->added) {
499 499 Py_ssize_t len = PyList_GET_SIZE(self->added);
500 500 s = PyBytes_FromString("index entries added");
501 501 t = PyInt_FromSsize_t(len);
502 502 if (!s || !t)
503 503 goto bail;
504 504 if (PyDict_SetItem(obj, s, t) == -1)
505 505 goto bail;
506 506 Py_CLEAR(s);
507 507 Py_CLEAR(t);
508 508 }
509 509
510 510 if (self->raw_length != self->length)
511 511 istat(raw_length, "revs on disk");
512 512 istat(length, "revs in memory");
513 513 istat(ntlookups, "node trie lookups");
514 514 istat(ntmisses, "node trie misses");
515 515 istat(ntrev, "node trie last rev scanned");
516 516 if (self->ntinitialized) {
517 517 istat(nt.capacity, "node trie capacity");
518 518 istat(nt.depth, "node trie depth");
519 519 istat(nt.length, "node trie count");
520 520 istat(nt.splits, "node trie splits");
521 521 }
522 522
523 523 #undef istat
524 524
525 525 return obj;
526 526
527 527 bail:
528 528 Py_XDECREF(obj);
529 529 Py_XDECREF(s);
530 530 Py_XDECREF(t);
531 531 return NULL;
532 532 }
533 533
534 534 /*
535 535 * When we cache a list, we want to be sure the caller can't mutate
536 536 * the cached copy.
537 537 */
538 538 static PyObject *list_copy(PyObject *list)
539 539 {
540 540 Py_ssize_t len = PyList_GET_SIZE(list);
541 541 PyObject *newlist = PyList_New(len);
542 542 Py_ssize_t i;
543 543
544 544 if (newlist == NULL)
545 545 return NULL;
546 546
547 547 for (i = 0; i < len; i++) {
548 548 PyObject *obj = PyList_GET_ITEM(list, i);
549 549 Py_INCREF(obj);
550 550 PyList_SET_ITEM(newlist, i, obj);
551 551 }
552 552
553 553 return newlist;
554 554 }
555 555
556 556 static int check_filter(PyObject *filter, Py_ssize_t arg)
557 557 {
558 558 if (filter) {
559 559 PyObject *arglist, *result;
560 560 int isfiltered;
561 561
562 562 arglist = Py_BuildValue("(n)", arg);
563 563 if (!arglist) {
564 564 return -1;
565 565 }
566 566
567 567 result = PyEval_CallObject(filter, arglist);
568 568 Py_DECREF(arglist);
569 569 if (!result) {
570 570 return -1;
571 571 }
572 572
573 573 /* PyObject_IsTrue returns 1 if true, 0 if false, -1 if error,
574 574 * same as this function, so we can just return it directly.*/
575 575 isfiltered = PyObject_IsTrue(result);
576 576 Py_DECREF(result);
577 577 return isfiltered;
578 578 } else {
579 579 return 0;
580 580 }
581 581 }
582 582
583 583 static inline void set_phase_from_parents(char *phases, int parent_1,
584 584 int parent_2, Py_ssize_t i)
585 585 {
586 586 if (parent_1 >= 0 && phases[parent_1] > phases[i])
587 587 phases[i] = phases[parent_1];
588 588 if (parent_2 >= 0 && phases[parent_2] > phases[i])
589 589 phases[i] = phases[parent_2];
590 590 }
591 591
592 592 static PyObject *reachableroots2(indexObject *self, PyObject *args)
593 593 {
594 594
595 595 /* Input */
596 596 long minroot;
597 597 PyObject *includepatharg = NULL;
598 598 int includepath = 0;
599 599 /* heads and roots are lists */
600 600 PyObject *heads = NULL;
601 601 PyObject *roots = NULL;
602 602 PyObject *reachable = NULL;
603 603
604 604 PyObject *val;
605 605 Py_ssize_t len = index_length(self);
606 606 long revnum;
607 607 Py_ssize_t k;
608 608 Py_ssize_t i;
609 609 Py_ssize_t l;
610 610 int r;
611 611 int parents[2];
612 612
613 613 /* Internal data structure:
614 614 * tovisit: array of length len+1 (all revs + nullrev), filled upto
615 615 * lentovisit
616 616 *
617 617 * revstates: array of length len+1 (all revs + nullrev) */
618 618 int *tovisit = NULL;
619 619 long lentovisit = 0;
620 620 enum { RS_SEEN = 1, RS_ROOT = 2, RS_REACHABLE = 4 };
621 621 char *revstates = NULL;
622 622
623 623 /* Get arguments */
624 624 if (!PyArg_ParseTuple(args, "lO!O!O!", &minroot, &PyList_Type, &heads,
625 625 &PyList_Type, &roots, &PyBool_Type,
626 626 &includepatharg))
627 627 goto bail;
628 628
629 629 if (includepatharg == Py_True)
630 630 includepath = 1;
631 631
632 632 /* Initialize return set */
633 633 reachable = PyList_New(0);
634 634 if (reachable == NULL)
635 635 goto bail;
636 636
637 637 /* Initialize internal datastructures */
638 638 tovisit = (int *)malloc((len + 1) * sizeof(int));
639 639 if (tovisit == NULL) {
640 640 PyErr_NoMemory();
641 641 goto bail;
642 642 }
643 643
644 644 revstates = (char *)calloc(len + 1, 1);
645 645 if (revstates == NULL) {
646 646 PyErr_NoMemory();
647 647 goto bail;
648 648 }
649 649
650 650 l = PyList_GET_SIZE(roots);
651 651 for (i = 0; i < l; i++) {
652 652 revnum = PyInt_AsLong(PyList_GET_ITEM(roots, i));
653 653 if (revnum == -1 && PyErr_Occurred())
654 654 goto bail;
655 655 /* If root is out of range, e.g. wdir(), it must be unreachable
656 656 * from heads. So we can just ignore it. */
657 657 if (revnum + 1 < 0 || revnum + 1 >= len + 1)
658 658 continue;
659 659 revstates[revnum + 1] |= RS_ROOT;
660 660 }
661 661
662 662 /* Populate tovisit with all the heads */
663 663 l = PyList_GET_SIZE(heads);
664 664 for (i = 0; i < l; i++) {
665 665 revnum = PyInt_AsLong(PyList_GET_ITEM(heads, i));
666 666 if (revnum == -1 && PyErr_Occurred())
667 667 goto bail;
668 668 if (revnum + 1 < 0 || revnum + 1 >= len + 1) {
669 669 PyErr_SetString(PyExc_IndexError, "head out of range");
670 670 goto bail;
671 671 }
672 672 if (!(revstates[revnum + 1] & RS_SEEN)) {
673 673 tovisit[lentovisit++] = (int)revnum;
674 674 revstates[revnum + 1] |= RS_SEEN;
675 675 }
676 676 }
677 677
678 678 /* Visit the tovisit list and find the reachable roots */
679 679 k = 0;
680 680 while (k < lentovisit) {
681 681 /* Add the node to reachable if it is a root*/
682 682 revnum = tovisit[k++];
683 683 if (revstates[revnum + 1] & RS_ROOT) {
684 684 revstates[revnum + 1] |= RS_REACHABLE;
685 685 val = PyInt_FromLong(revnum);
686 686 if (val == NULL)
687 687 goto bail;
688 688 r = PyList_Append(reachable, val);
689 689 Py_DECREF(val);
690 690 if (r < 0)
691 691 goto bail;
692 692 if (includepath == 0)
693 693 continue;
694 694 }
695 695
696 696 /* Add its parents to the list of nodes to visit */
697 697 if (revnum == nullrev)
698 698 continue;
699 699 r = index_get_parents(self, revnum, parents, (int)len - 1);
700 700 if (r < 0)
701 701 goto bail;
702 702 for (i = 0; i < 2; i++) {
703 703 if (!(revstates[parents[i] + 1] & RS_SEEN) &&
704 704 parents[i] >= minroot) {
705 705 tovisit[lentovisit++] = parents[i];
706 706 revstates[parents[i] + 1] |= RS_SEEN;
707 707 }
708 708 }
709 709 }
710 710
711 711 /* Find all the nodes in between the roots we found and the heads
712 712 * and add them to the reachable set */
713 713 if (includepath == 1) {
714 714 long minidx = minroot;
715 715 if (minidx < 0)
716 716 minidx = 0;
717 717 for (i = minidx; i < len; i++) {
718 718 if (!(revstates[i + 1] & RS_SEEN))
719 719 continue;
720 720 r = index_get_parents(self, i, parents, (int)len - 1);
721 721 /* Corrupted index file, error is set from
722 722 * index_get_parents */
723 723 if (r < 0)
724 724 goto bail;
725 725 if (((revstates[parents[0] + 1] |
726 726 revstates[parents[1] + 1]) &
727 727 RS_REACHABLE) &&
728 728 !(revstates[i + 1] & RS_REACHABLE)) {
729 729 revstates[i + 1] |= RS_REACHABLE;
730 730 val = PyInt_FromSsize_t(i);
731 731 if (val == NULL)
732 732 goto bail;
733 733 r = PyList_Append(reachable, val);
734 734 Py_DECREF(val);
735 735 if (r < 0)
736 736 goto bail;
737 737 }
738 738 }
739 739 }
740 740
741 741 free(revstates);
742 742 free(tovisit);
743 743 return reachable;
744 744 bail:
745 745 Py_XDECREF(reachable);
746 746 free(revstates);
747 747 free(tovisit);
748 748 return NULL;
749 749 }
750 750
751 751 static int add_roots_get_min(indexObject *self, PyObject *roots, char *phases,
752 752 char phase)
753 753 {
754 754 Py_ssize_t len = index_length(self);
755 755 PyObject *item;
756 756 PyObject *iterator;
757 757 int rev, minrev = -1;
758 758 char *node;
759 759
760 760 if (!PySet_Check(roots))
761 761 return -2;
762 762 iterator = PyObject_GetIter(roots);
763 763 if (iterator == NULL)
764 764 return -2;
765 765 while ((item = PyIter_Next(iterator))) {
766 766 if (node_check(item, &node) == -1)
767 767 goto failed;
768 768 rev = index_find_node(self, node, 20);
769 769 /* null is implicitly public, so negative is invalid */
770 770 if (rev < 0 || rev >= len)
771 771 goto failed;
772 772 phases[rev] = phase;
773 773 if (minrev == -1 || minrev > rev)
774 774 minrev = rev;
775 775 Py_DECREF(item);
776 776 }
777 777 Py_DECREF(iterator);
778 778 return minrev;
779 779 failed:
780 780 Py_DECREF(iterator);
781 781 Py_DECREF(item);
782 782 return -2;
783 783 }
784 784
785 785 static PyObject *compute_phases_map_sets(indexObject *self, PyObject *args)
786 786 {
787 787 /* 0: public (untracked), 1: draft, 2: secret, 32: archive,
788 788 96: internal */
789 789 static const char trackedphases[] = {1, 2, 32, 96};
790 790 PyObject *ret = NULL;
791 791 PyObject *roots = Py_None;
792 792 PyObject *pyphase = NULL;
793 793 PyObject *pyrev = NULL;
794 794 PyObject *phaseroots = NULL;
795 795 PyObject *phasessize = NULL;
796 796 PyObject *phasesets[4] = {NULL, NULL, NULL, NULL};
797 797 Py_ssize_t len = index_length(self);
798 798 char *phases = NULL;
799 799 int minphaserev = -1, rev, i;
800 800 const int numphases = (int)(sizeof(phasesets) / sizeof(phasesets[0]));
801 801
802 802 if (!PyArg_ParseTuple(args, "O", &roots))
803 803 return NULL;
804 804 if (roots == NULL || !PyDict_Check(roots)) {
805 805 PyErr_SetString(PyExc_TypeError, "roots must be a dictionary");
806 806 return NULL;
807 807 }
808 808
809 809 phases = calloc(len, 1);
810 810 if (phases == NULL) {
811 811 PyErr_NoMemory();
812 812 return NULL;
813 813 }
814 814
815 815 for (i = 0; i < numphases; ++i) {
816 816 pyphase = PyInt_FromLong(trackedphases[i]);
817 817 if (pyphase == NULL)
818 818 goto release;
819 819 phaseroots = PyDict_GetItem(roots, pyphase);
820 820 Py_DECREF(pyphase);
821 821 if (phaseroots == NULL)
822 822 continue;
823 823 rev = add_roots_get_min(self, phaseroots, phases, trackedphases[i]);
824 824 phaseroots = NULL;
825 825 if (rev == -2)
826 826 goto release;
827 827 if (rev != -1 && (minphaserev == -1 || rev < minphaserev))
828 828 minphaserev = rev;
829 829 }
830 830
831 831 for (i = 0; i < numphases; ++i) {
832 832 phasesets[i] = PySet_New(NULL);
833 833 if (phasesets[i] == NULL)
834 834 goto release;
835 835 }
836 836
837 837 if (minphaserev == -1)
838 838 minphaserev = len;
839 839 for (rev = minphaserev; rev < len; ++rev) {
840 840 int parents[2];
841 841 /*
842 842 * The parent lookup could be skipped for phaseroots, but
843 843 * phase --force would historically not recompute them
844 844 * correctly, leaving descendents with a lower phase around.
845 845 * As such, unconditionally recompute the phase.
846 846 */
847 847 if (index_get_parents(self, rev, parents, (int)len - 1) < 0)
848 848 goto release;
849 849 set_phase_from_parents(phases, parents[0], parents[1], rev);
850 850 switch (phases[rev]) {
851 851 case 0:
852 852 continue;
853 853 case 1:
854 854 pyphase = phasesets[0];
855 855 break;
856 856 case 2:
857 857 pyphase = phasesets[1];
858 858 break;
859 859 case 32:
860 860 pyphase = phasesets[2];
861 861 break;
862 862 case 96:
863 863 pyphase = phasesets[3];
864 864 break;
865 865 default:
866 866 goto release;
867 867 }
868 868 pyrev = PyInt_FromLong(rev);
869 869 if (pyrev == NULL)
870 870 goto release;
871 871 if (PySet_Add(pyphase, pyrev) == -1) {
872 872 Py_DECREF(pyrev);
873 873 goto release;
874 874 }
875 875 Py_DECREF(pyrev);
876 876 }
877 877 phaseroots = _dict_new_presized(numphases);
878 878 if (phaseroots == NULL)
879 879 goto release;
880 880 for (i = 0; i < numphases; ++i) {
881 881 pyphase = PyInt_FromLong(trackedphases[i]);
882 882 if (pyphase == NULL)
883 883 goto release;
884 884 if (PyDict_SetItem(phaseroots, pyphase, phasesets[i]) == -1) {
885 885 Py_DECREF(pyphase);
886 886 goto release;
887 887 }
888 888 Py_DECREF(phasesets[i]);
889 889 phasesets[i] = NULL;
890 890 }
891 891 phasessize = PyInt_FromSsize_t(len);
892 892 if (phasessize == NULL)
893 893 goto release;
894 894
895 895 ret = PyTuple_Pack(2, phasessize, phaseroots);
896 896 Py_DECREF(phasessize);
897 897 Py_DECREF(phaseroots);
898 898 return ret;
899 899
900 900 release:
901 901 for (i = 0; i < numphases; ++i)
902 902 Py_XDECREF(phasesets[i]);
903 903 Py_XDECREF(phaseroots);
904 904
905 905 free(phases);
906 906 return NULL;
907 907 }
908 908
909 909 static PyObject *index_headrevs(indexObject *self, PyObject *args)
910 910 {
911 911 Py_ssize_t i, j, len;
912 912 char *nothead = NULL;
913 913 PyObject *heads = NULL;
914 914 PyObject *filter = NULL;
915 915 PyObject *filteredrevs = Py_None;
916 916
917 917 if (!PyArg_ParseTuple(args, "|O", &filteredrevs)) {
918 918 return NULL;
919 919 }
920 920
921 921 if (self->headrevs && filteredrevs == self->filteredrevs)
922 922 return list_copy(self->headrevs);
923 923
924 924 Py_DECREF(self->filteredrevs);
925 925 self->filteredrevs = filteredrevs;
926 926 Py_INCREF(filteredrevs);
927 927
928 928 if (filteredrevs != Py_None) {
929 929 filter = PyObject_GetAttrString(filteredrevs, "__contains__");
930 930 if (!filter) {
931 931 PyErr_SetString(
932 932 PyExc_TypeError,
933 933 "filteredrevs has no attribute __contains__");
934 934 goto bail;
935 935 }
936 936 }
937 937
938 938 len = index_length(self);
939 939 heads = PyList_New(0);
940 940 if (heads == NULL)
941 941 goto bail;
942 942 if (len == 0) {
943 943 PyObject *nullid = PyInt_FromLong(-1);
944 944 if (nullid == NULL || PyList_Append(heads, nullid) == -1) {
945 945 Py_XDECREF(nullid);
946 946 goto bail;
947 947 }
948 948 goto done;
949 949 }
950 950
951 951 nothead = calloc(len, 1);
952 952 if (nothead == NULL) {
953 953 PyErr_NoMemory();
954 954 goto bail;
955 955 }
956 956
957 957 for (i = len - 1; i >= 0; i--) {
958 958 int isfiltered;
959 959 int parents[2];
960 960
961 961 /* If nothead[i] == 1, it means we've seen an unfiltered child
962 962 * of this node already, and therefore this node is not
963 963 * filtered. So we can skip the expensive check_filter step.
964 964 */
965 965 if (nothead[i] != 1) {
966 966 isfiltered = check_filter(filter, i);
967 967 if (isfiltered == -1) {
968 968 PyErr_SetString(PyExc_TypeError,
969 969 "unable to check filter");
970 970 goto bail;
971 971 }
972 972
973 973 if (isfiltered) {
974 974 nothead[i] = 1;
975 975 continue;
976 976 }
977 977 }
978 978
979 979 if (index_get_parents(self, i, parents, (int)len - 1) < 0)
980 980 goto bail;
981 981 for (j = 0; j < 2; j++) {
982 982 if (parents[j] >= 0)
983 983 nothead[parents[j]] = 1;
984 984 }
985 985 }
986 986
987 987 for (i = 0; i < len; i++) {
988 988 PyObject *head;
989 989
990 990 if (nothead[i])
991 991 continue;
992 992 head = PyInt_FromSsize_t(i);
993 993 if (head == NULL || PyList_Append(heads, head) == -1) {
994 994 Py_XDECREF(head);
995 995 goto bail;
996 996 }
997 997 }
998 998
999 999 done:
1000 1000 self->headrevs = heads;
1001 1001 Py_XDECREF(filter);
1002 1002 free(nothead);
1003 1003 return list_copy(self->headrevs);
1004 1004 bail:
1005 1005 Py_XDECREF(filter);
1006 1006 Py_XDECREF(heads);
1007 1007 free(nothead);
1008 1008 return NULL;
1009 1009 }
1010 1010
1011 1011 /**
1012 1012 * Obtain the base revision index entry.
1013 1013 *
1014 1014 * Callers must ensure that rev >= 0 or illegal memory access may occur.
1015 1015 */
1016 1016 static inline int index_baserev(indexObject *self, int rev)
1017 1017 {
1018 1018 const char *data;
1019 1019 int result;
1020 1020
1021 1021 if (rev >= self->length) {
1022 1022 PyObject *tuple =
1023 1023 PyList_GET_ITEM(self->added, rev - self->length);
1024 1024 long ret;
1025 1025 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 3), &ret)) {
1026 1026 return -2;
1027 1027 }
1028 1028 result = (int)ret;
1029 1029 } else {
1030 1030 data = index_deref(self, rev);
1031 1031 if (data == NULL) {
1032 1032 return -2;
1033 1033 }
1034 1034
1035 1035 result = getbe32(data + 16);
1036 1036 }
1037 1037 if (result > rev) {
1038 1038 PyErr_Format(
1039 1039 PyExc_ValueError,
1040 1040 "corrupted revlog, revision base above revision: %d, %d",
1041 1041 rev, result);
1042 1042 return -2;
1043 1043 }
1044 1044 if (result < -1) {
1045 1045 PyErr_Format(
1046 1046 PyExc_ValueError,
1047 1047 "corrupted revlog, revision base out of range: %d, %d", rev,
1048 1048 result);
1049 1049 return -2;
1050 1050 }
1051 1051 return result;
1052 1052 }
1053 1053
1054 1054 /**
1055 1055 * Find if a revision is a snapshot or not
1056 1056 *
1057 1057 * Only relevant for sparse-revlog case.
1058 1058 * Callers must ensure that rev is in a valid range.
1059 1059 */
1060 1060 static int index_issnapshotrev(indexObject *self, Py_ssize_t rev)
1061 1061 {
1062 1062 int ps[2];
1063 1063 Py_ssize_t base;
1064 1064 while (rev >= 0) {
1065 1065 base = (Py_ssize_t)index_baserev(self, rev);
1066 1066 if (base == rev) {
1067 1067 base = -1;
1068 1068 }
1069 1069 if (base == -2) {
1070 1070 assert(PyErr_Occurred());
1071 1071 return -1;
1072 1072 }
1073 1073 if (base == -1) {
1074 1074 return 1;
1075 1075 }
1076 1076 if (index_get_parents(self, rev, ps, (int)rev) < 0) {
1077 1077 assert(PyErr_Occurred());
1078 1078 return -1;
1079 1079 };
1080 1080 if (base == ps[0] || base == ps[1]) {
1081 1081 return 0;
1082 1082 }
1083 1083 rev = base;
1084 1084 }
1085 1085 return rev == -1;
1086 1086 }
1087 1087
1088 1088 static PyObject *index_issnapshot(indexObject *self, PyObject *value)
1089 1089 {
1090 1090 long rev;
1091 1091 int issnap;
1092 1092 Py_ssize_t length = index_length(self);
1093 1093
1094 1094 if (!pylong_to_long(value, &rev)) {
1095 1095 return NULL;
1096 1096 }
1097 1097 if (rev < -1 || rev >= length) {
1098 1098 PyErr_Format(PyExc_ValueError, "revlog index out of range: %ld",
1099 1099 rev);
1100 1100 return NULL;
1101 1101 };
1102 1102 issnap = index_issnapshotrev(self, (Py_ssize_t)rev);
1103 1103 if (issnap < 0) {
1104 1104 return NULL;
1105 1105 };
1106 1106 return PyBool_FromLong((long)issnap);
1107 1107 }
1108 1108
1109 1109 static PyObject *index_findsnapshots(indexObject *self, PyObject *args)
1110 1110 {
1111 1111 Py_ssize_t start_rev;
1112 1112 PyObject *cache;
1113 1113 Py_ssize_t base;
1114 1114 Py_ssize_t rev;
1115 1115 PyObject *key = NULL;
1116 1116 PyObject *value = NULL;
1117 1117 const Py_ssize_t length = index_length(self);
1118 1118 if (!PyArg_ParseTuple(args, "O!n", &PyDict_Type, &cache, &start_rev)) {
1119 1119 return NULL;
1120 1120 }
1121 1121 for (rev = start_rev; rev < length; rev++) {
1122 1122 int issnap;
1123 1123 PyObject *allvalues = NULL;
1124 1124 issnap = index_issnapshotrev(self, rev);
1125 1125 if (issnap < 0) {
1126 1126 goto bail;
1127 1127 }
1128 1128 if (issnap == 0) {
1129 1129 continue;
1130 1130 }
1131 1131 base = (Py_ssize_t)index_baserev(self, rev);
1132 1132 if (base == rev) {
1133 1133 base = -1;
1134 1134 }
1135 1135 if (base == -2) {
1136 1136 assert(PyErr_Occurred());
1137 1137 goto bail;
1138 1138 }
1139 1139 key = PyInt_FromSsize_t(base);
1140 1140 allvalues = PyDict_GetItem(cache, key);
1141 1141 if (allvalues == NULL && PyErr_Occurred()) {
1142 1142 goto bail;
1143 1143 }
1144 1144 if (allvalues == NULL) {
1145 1145 int r;
1146 1146 allvalues = PyList_New(0);
1147 1147 if (!allvalues) {
1148 1148 goto bail;
1149 1149 }
1150 1150 r = PyDict_SetItem(cache, key, allvalues);
1151 1151 Py_DECREF(allvalues);
1152 1152 if (r < 0) {
1153 1153 goto bail;
1154 1154 }
1155 1155 }
1156 1156 value = PyInt_FromSsize_t(rev);
1157 1157 if (PyList_Append(allvalues, value)) {
1158 1158 goto bail;
1159 1159 }
1160 1160 Py_CLEAR(key);
1161 1161 Py_CLEAR(value);
1162 1162 }
1163 1163 Py_RETURN_NONE;
1164 1164 bail:
1165 1165 Py_XDECREF(key);
1166 1166 Py_XDECREF(value);
1167 1167 return NULL;
1168 1168 }
1169 1169
1170 1170 static PyObject *index_deltachain(indexObject *self, PyObject *args)
1171 1171 {
1172 1172 int rev, generaldelta;
1173 1173 PyObject *stoparg;
1174 1174 int stoprev, iterrev, baserev = -1;
1175 1175 int stopped;
1176 1176 PyObject *chain = NULL, *result = NULL;
1177 1177 const Py_ssize_t length = index_length(self);
1178 1178
1179 1179 if (!PyArg_ParseTuple(args, "iOi", &rev, &stoparg, &generaldelta)) {
1180 1180 return NULL;
1181 1181 }
1182 1182
1183 1183 if (PyInt_Check(stoparg)) {
1184 1184 stoprev = (int)PyInt_AsLong(stoparg);
1185 1185 if (stoprev == -1 && PyErr_Occurred()) {
1186 1186 return NULL;
1187 1187 }
1188 1188 } else if (stoparg == Py_None) {
1189 1189 stoprev = -2;
1190 1190 } else {
1191 1191 PyErr_SetString(PyExc_ValueError,
1192 1192 "stoprev must be integer or None");
1193 1193 return NULL;
1194 1194 }
1195 1195
1196 1196 if (rev < 0 || rev >= length) {
1197 1197 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
1198 1198 return NULL;
1199 1199 }
1200 1200
1201 1201 chain = PyList_New(0);
1202 1202 if (chain == NULL) {
1203 1203 return NULL;
1204 1204 }
1205 1205
1206 1206 baserev = index_baserev(self, rev);
1207 1207
1208 1208 /* This should never happen. */
1209 1209 if (baserev <= -2) {
1210 1210 /* Error should be set by index_deref() */
1211 1211 assert(PyErr_Occurred());
1212 1212 goto bail;
1213 1213 }
1214 1214
1215 1215 iterrev = rev;
1216 1216
1217 1217 while (iterrev != baserev && iterrev != stoprev) {
1218 1218 PyObject *value = PyInt_FromLong(iterrev);
1219 1219 if (value == NULL) {
1220 1220 goto bail;
1221 1221 }
1222 1222 if (PyList_Append(chain, value)) {
1223 1223 Py_DECREF(value);
1224 1224 goto bail;
1225 1225 }
1226 1226 Py_DECREF(value);
1227 1227
1228 1228 if (generaldelta) {
1229 1229 iterrev = baserev;
1230 1230 } else {
1231 1231 iterrev--;
1232 1232 }
1233 1233
1234 1234 if (iterrev < 0) {
1235 1235 break;
1236 1236 }
1237 1237
1238 1238 if (iterrev >= length) {
1239 1239 PyErr_SetString(PyExc_IndexError,
1240 1240 "revision outside index");
1241 1241 return NULL;
1242 1242 }
1243 1243
1244 1244 baserev = index_baserev(self, iterrev);
1245 1245
1246 1246 /* This should never happen. */
1247 1247 if (baserev <= -2) {
1248 1248 /* Error should be set by index_deref() */
1249 1249 assert(PyErr_Occurred());
1250 1250 goto bail;
1251 1251 }
1252 1252 }
1253 1253
1254 1254 if (iterrev == stoprev) {
1255 1255 stopped = 1;
1256 1256 } else {
1257 1257 PyObject *value = PyInt_FromLong(iterrev);
1258 1258 if (value == NULL) {
1259 1259 goto bail;
1260 1260 }
1261 1261 if (PyList_Append(chain, value)) {
1262 1262 Py_DECREF(value);
1263 1263 goto bail;
1264 1264 }
1265 1265 Py_DECREF(value);
1266 1266
1267 1267 stopped = 0;
1268 1268 }
1269 1269
1270 1270 if (PyList_Reverse(chain)) {
1271 1271 goto bail;
1272 1272 }
1273 1273
1274 1274 result = Py_BuildValue("OO", chain, stopped ? Py_True : Py_False);
1275 1275 Py_DECREF(chain);
1276 1276 return result;
1277 1277
1278 1278 bail:
1279 1279 Py_DECREF(chain);
1280 1280 return NULL;
1281 1281 }
1282 1282
1283 1283 static inline int64_t
1284 1284 index_segment_span(indexObject *self, Py_ssize_t start_rev, Py_ssize_t end_rev)
1285 1285 {
1286 1286 int64_t start_offset;
1287 1287 int64_t end_offset;
1288 1288 int end_size;
1289 1289 start_offset = index_get_start(self, start_rev);
1290 1290 if (start_offset < 0) {
1291 1291 return -1;
1292 1292 }
1293 1293 end_offset = index_get_start(self, end_rev);
1294 1294 if (end_offset < 0) {
1295 1295 return -1;
1296 1296 }
1297 1297 end_size = index_get_length(self, end_rev);
1298 1298 if (end_size < 0) {
1299 1299 return -1;
1300 1300 }
1301 1301 if (end_offset < start_offset) {
1302 1302 PyErr_Format(PyExc_ValueError,
1303 1303 "corrupted revlog index: inconsistent offset "
1304 1304 "between revisions (%zd) and (%zd)",
1305 1305 start_rev, end_rev);
1306 1306 return -1;
1307 1307 }
1308 1308 return (end_offset - start_offset) + (int64_t)end_size;
1309 1309 }
1310 1310
1311 1311 /* returns endidx so that revs[startidx:endidx] has no empty trailing revs */
1312 1312 static Py_ssize_t trim_endidx(indexObject *self, const Py_ssize_t *revs,
1313 1313 Py_ssize_t startidx, Py_ssize_t endidx)
1314 1314 {
1315 1315 int length;
1316 1316 while (endidx > 1 && endidx > startidx) {
1317 1317 length = index_get_length(self, revs[endidx - 1]);
1318 1318 if (length < 0) {
1319 1319 return -1;
1320 1320 }
1321 1321 if (length != 0) {
1322 1322 break;
1323 1323 }
1324 1324 endidx -= 1;
1325 1325 }
1326 1326 return endidx;
1327 1327 }
1328 1328
1329 1329 struct Gap {
1330 1330 int64_t size;
1331 1331 Py_ssize_t idx;
1332 1332 };
1333 1333
1334 1334 static int gap_compare(const void *left, const void *right)
1335 1335 {
1336 1336 const struct Gap *l_left = ((const struct Gap *)left);
1337 1337 const struct Gap *l_right = ((const struct Gap *)right);
1338 1338 if (l_left->size < l_right->size) {
1339 1339 return -1;
1340 1340 } else if (l_left->size > l_right->size) {
1341 1341 return 1;
1342 1342 }
1343 1343 return 0;
1344 1344 }
1345 1345 static int Py_ssize_t_compare(const void *left, const void *right)
1346 1346 {
1347 1347 const Py_ssize_t l_left = *(const Py_ssize_t *)left;
1348 1348 const Py_ssize_t l_right = *(const Py_ssize_t *)right;
1349 1349 if (l_left < l_right) {
1350 1350 return -1;
1351 1351 } else if (l_left > l_right) {
1352 1352 return 1;
1353 1353 }
1354 1354 return 0;
1355 1355 }
1356 1356
1357 1357 static PyObject *index_slicechunktodensity(indexObject *self, PyObject *args)
1358 1358 {
1359 1359 /* method arguments */
1360 1360 PyObject *list_revs = NULL; /* revisions in the chain */
1361 1361 double targetdensity = 0; /* min density to achieve */
1362 1362 Py_ssize_t mingapsize = 0; /* threshold to ignore gaps */
1363 1363
1364 1364 /* other core variables */
1365 1365 Py_ssize_t idxlen = index_length(self);
1366 1366 Py_ssize_t i; /* used for various iteration */
1367 1367 PyObject *result = NULL; /* the final return of the function */
1368 1368
1369 1369 /* generic information about the delta chain being slice */
1370 1370 Py_ssize_t num_revs = 0; /* size of the full delta chain */
1371 1371 Py_ssize_t *revs = NULL; /* native array of revision in the chain */
1372 1372 int64_t chainpayload = 0; /* sum of all delta in the chain */
1373 1373 int64_t deltachainspan = 0; /* distance from first byte to last byte */
1374 1374
1375 1375 /* variable used for slicing the delta chain */
1376 1376 int64_t readdata = 0; /* amount of data currently planned to be read */
1377 1377 double density = 0; /* ration of payload data compared to read ones */
1378 1378 int64_t previous_end;
1379 1379 struct Gap *gaps = NULL; /* array of notable gap in the chain */
1380 1380 Py_ssize_t num_gaps =
1381 1381 0; /* total number of notable gap recorded so far */
1382 1382 Py_ssize_t *selected_indices = NULL; /* indices of gap skipped over */
1383 1383 Py_ssize_t num_selected = 0; /* number of gaps skipped */
1384 1384 PyObject *chunk = NULL; /* individual slice */
1385 1385 PyObject *allchunks = NULL; /* all slices */
1386 1386 Py_ssize_t previdx;
1387 1387
1388 1388 /* parsing argument */
1389 1389 if (!PyArg_ParseTuple(args, "O!dn", &PyList_Type, &list_revs,
1390 1390 &targetdensity, &mingapsize)) {
1391 1391 goto bail;
1392 1392 }
1393 1393
1394 1394 /* If the delta chain contains a single element, we do not need slicing
1395 1395 */
1396 1396 num_revs = PyList_GET_SIZE(list_revs);
1397 1397 if (num_revs <= 1) {
1398 1398 result = PyTuple_Pack(1, list_revs);
1399 1399 goto done;
1400 1400 }
1401 1401
1402 1402 /* Turn the python list into a native integer array (for efficiency) */
1403 1403 revs = (Py_ssize_t *)calloc(num_revs, sizeof(Py_ssize_t));
1404 1404 if (revs == NULL) {
1405 1405 PyErr_NoMemory();
1406 1406 goto bail;
1407 1407 }
1408 1408 for (i = 0; i < num_revs; i++) {
1409 1409 Py_ssize_t revnum = PyInt_AsLong(PyList_GET_ITEM(list_revs, i));
1410 1410 if (revnum == -1 && PyErr_Occurred()) {
1411 1411 goto bail;
1412 1412 }
1413 1413 if (revnum < nullrev || revnum >= idxlen) {
1414 1414 PyErr_Format(PyExc_IndexError,
1415 1415 "index out of range: %zd", revnum);
1416 1416 goto bail;
1417 1417 }
1418 1418 revs[i] = revnum;
1419 1419 }
1420 1420
1421 1421 /* Compute and check various property of the unsliced delta chain */
1422 1422 deltachainspan = index_segment_span(self, revs[0], revs[num_revs - 1]);
1423 1423 if (deltachainspan < 0) {
1424 1424 goto bail;
1425 1425 }
1426 1426
1427 1427 if (deltachainspan <= mingapsize) {
1428 1428 result = PyTuple_Pack(1, list_revs);
1429 1429 goto done;
1430 1430 }
1431 1431 chainpayload = 0;
1432 1432 for (i = 0; i < num_revs; i++) {
1433 1433 int tmp = index_get_length(self, revs[i]);
1434 1434 if (tmp < 0) {
1435 1435 goto bail;
1436 1436 }
1437 1437 chainpayload += tmp;
1438 1438 }
1439 1439
1440 1440 readdata = deltachainspan;
1441 1441 density = 1.0;
1442 1442
1443 1443 if (0 < deltachainspan) {
1444 1444 density = (double)chainpayload / (double)deltachainspan;
1445 1445 }
1446 1446
1447 1447 if (density >= targetdensity) {
1448 1448 result = PyTuple_Pack(1, list_revs);
1449 1449 goto done;
1450 1450 }
1451 1451
1452 1452 /* if chain is too sparse, look for relevant gaps */
1453 1453 gaps = (struct Gap *)calloc(num_revs, sizeof(struct Gap));
1454 1454 if (gaps == NULL) {
1455 1455 PyErr_NoMemory();
1456 1456 goto bail;
1457 1457 }
1458 1458
1459 1459 previous_end = -1;
1460 1460 for (i = 0; i < num_revs; i++) {
1461 1461 int64_t revstart;
1462 1462 int revsize;
1463 1463 revstart = index_get_start(self, revs[i]);
1464 1464 if (revstart < 0) {
1465 1465 goto bail;
1466 1466 };
1467 1467 revsize = index_get_length(self, revs[i]);
1468 1468 if (revsize < 0) {
1469 1469 goto bail;
1470 1470 };
1471 1471 if (revsize == 0) {
1472 1472 continue;
1473 1473 }
1474 1474 if (previous_end >= 0) {
1475 1475 int64_t gapsize = revstart - previous_end;
1476 1476 if (gapsize > mingapsize) {
1477 1477 gaps[num_gaps].size = gapsize;
1478 1478 gaps[num_gaps].idx = i;
1479 1479 num_gaps += 1;
1480 1480 }
1481 1481 }
1482 1482 previous_end = revstart + revsize;
1483 1483 }
1484 1484 if (num_gaps == 0) {
1485 1485 result = PyTuple_Pack(1, list_revs);
1486 1486 goto done;
1487 1487 }
1488 1488 qsort(gaps, num_gaps, sizeof(struct Gap), &gap_compare);
1489 1489
1490 1490 /* Slice the largest gap first, they improve the density the most */
1491 1491 selected_indices =
1492 1492 (Py_ssize_t *)malloc((num_gaps + 1) * sizeof(Py_ssize_t));
1493 1493 if (selected_indices == NULL) {
1494 1494 PyErr_NoMemory();
1495 1495 goto bail;
1496 1496 }
1497 1497
1498 1498 for (i = num_gaps - 1; i >= 0; i--) {
1499 1499 selected_indices[num_selected] = gaps[i].idx;
1500 1500 readdata -= gaps[i].size;
1501 1501 num_selected += 1;
1502 1502 if (readdata <= 0) {
1503 1503 density = 1.0;
1504 1504 } else {
1505 1505 density = (double)chainpayload / (double)readdata;
1506 1506 }
1507 1507 if (density >= targetdensity) {
1508 1508 break;
1509 1509 }
1510 1510 }
1511 1511 qsort(selected_indices, num_selected, sizeof(Py_ssize_t),
1512 1512 &Py_ssize_t_compare);
1513 1513
1514 1514 /* create the resulting slice */
1515 1515 allchunks = PyList_New(0);
1516 1516 if (allchunks == NULL) {
1517 1517 goto bail;
1518 1518 }
1519 1519 previdx = 0;
1520 1520 selected_indices[num_selected] = num_revs;
1521 1521 for (i = 0; i <= num_selected; i++) {
1522 1522 Py_ssize_t idx = selected_indices[i];
1523 1523 Py_ssize_t endidx = trim_endidx(self, revs, previdx, idx);
1524 1524 if (endidx < 0) {
1525 1525 goto bail;
1526 1526 }
1527 1527 if (previdx < endidx) {
1528 1528 chunk = PyList_GetSlice(list_revs, previdx, endidx);
1529 1529 if (chunk == NULL) {
1530 1530 goto bail;
1531 1531 }
1532 1532 if (PyList_Append(allchunks, chunk) == -1) {
1533 1533 goto bail;
1534 1534 }
1535 1535 Py_DECREF(chunk);
1536 1536 chunk = NULL;
1537 1537 }
1538 1538 previdx = idx;
1539 1539 }
1540 1540 result = allchunks;
1541 1541 goto done;
1542 1542
1543 1543 bail:
1544 1544 Py_XDECREF(allchunks);
1545 1545 Py_XDECREF(chunk);
1546 1546 done:
1547 1547 free(revs);
1548 1548 free(gaps);
1549 1549 free(selected_indices);
1550 1550 return result;
1551 1551 }
1552 1552
1553 1553 static inline int nt_level(const char *node, Py_ssize_t level)
1554 1554 {
1555 1555 int v = node[level >> 1];
1556 1556 if (!(level & 1))
1557 1557 v >>= 4;
1558 1558 return v & 0xf;
1559 1559 }
1560 1560
1561 1561 /*
1562 1562 * Return values:
1563 1563 *
1564 1564 * -4: match is ambiguous (multiple candidates)
1565 1565 * -2: not found
1566 1566 * rest: valid rev
1567 1567 */
1568 1568 static int nt_find(nodetree *self, const char *node, Py_ssize_t nodelen,
1569 1569 int hex)
1570 1570 {
1571 1571 int (*getnybble)(const char *, Py_ssize_t) = hex ? hexdigit : nt_level;
1572 1572 int level, maxlevel, off;
1573 1573
1574 1574 if (nodelen == 20 && node[0] == '\0' && memcmp(node, nullid, 20) == 0)
1575 1575 return -1;
1576 1576
1577 1577 if (hex)
1578 1578 maxlevel = nodelen > 40 ? 40 : (int)nodelen;
1579 1579 else
1580 1580 maxlevel = nodelen > 20 ? 40 : ((int)nodelen * 2);
1581 1581
1582 1582 for (level = off = 0; level < maxlevel; level++) {
1583 1583 int k = getnybble(node, level);
1584 1584 nodetreenode *n = &self->nodes[off];
1585 1585 int v = n->children[k];
1586 1586
1587 1587 if (v < 0) {
1588 1588 const char *n;
1589 1589 Py_ssize_t i;
1590 1590
1591 1591 v = -(v + 2);
1592 1592 n = index_node(self->index, v);
1593 1593 if (n == NULL)
1594 1594 return -2;
1595 1595 for (i = level; i < maxlevel; i++)
1596 1596 if (getnybble(node, i) != nt_level(n, i))
1597 1597 return -2;
1598 1598 return v;
1599 1599 }
1600 1600 if (v == 0)
1601 1601 return -2;
1602 1602 off = v;
1603 1603 }
1604 1604 /* multiple matches against an ambiguous prefix */
1605 1605 return -4;
1606 1606 }
1607 1607
1608 1608 static int nt_new(nodetree *self)
1609 1609 {
1610 1610 if (self->length == self->capacity) {
1611 1611 unsigned newcapacity;
1612 1612 nodetreenode *newnodes;
1613 1613 newcapacity = self->capacity * 2;
1614 1614 if (newcapacity >= INT_MAX / sizeof(nodetreenode)) {
1615 1615 PyErr_SetString(PyExc_MemoryError,
1616 1616 "overflow in nt_new");
1617 1617 return -1;
1618 1618 }
1619 1619 newnodes =
1620 1620 realloc(self->nodes, newcapacity * sizeof(nodetreenode));
1621 1621 if (newnodes == NULL) {
1622 1622 PyErr_SetString(PyExc_MemoryError, "out of memory");
1623 1623 return -1;
1624 1624 }
1625 1625 self->capacity = newcapacity;
1626 1626 self->nodes = newnodes;
1627 1627 memset(&self->nodes[self->length], 0,
1628 1628 sizeof(nodetreenode) * (self->capacity - self->length));
1629 1629 }
1630 1630 return self->length++;
1631 1631 }
1632 1632
1633 1633 static int nt_insert(nodetree *self, const char *node, int rev)
1634 1634 {
1635 1635 int level = 0;
1636 1636 int off = 0;
1637 1637
1638 1638 while (level < 40) {
1639 1639 int k = nt_level(node, level);
1640 1640 nodetreenode *n;
1641 1641 int v;
1642 1642
1643 1643 n = &self->nodes[off];
1644 1644 v = n->children[k];
1645 1645
1646 1646 if (v == 0) {
1647 1647 n->children[k] = -rev - 2;
1648 1648 return 0;
1649 1649 }
1650 1650 if (v < 0) {
1651 1651 const char *oldnode =
1652 1652 index_node_existing(self->index, -(v + 2));
1653 1653 int noff;
1654 1654
1655 1655 if (oldnode == NULL)
1656 1656 return -1;
1657 1657 if (!memcmp(oldnode, node, 20)) {
1658 1658 n->children[k] = -rev - 2;
1659 1659 return 0;
1660 1660 }
1661 1661 noff = nt_new(self);
1662 1662 if (noff == -1)
1663 1663 return -1;
1664 1664 /* self->nodes may have been changed by realloc */
1665 1665 self->nodes[off].children[k] = noff;
1666 1666 off = noff;
1667 1667 n = &self->nodes[off];
1668 1668 n->children[nt_level(oldnode, ++level)] = v;
1669 1669 if (level > self->depth)
1670 1670 self->depth = level;
1671 1671 self->splits += 1;
1672 1672 } else {
1673 1673 level += 1;
1674 1674 off = v;
1675 1675 }
1676 1676 }
1677 1677
1678 1678 return -1;
1679 1679 }
1680 1680
1681 1681 static PyObject *ntobj_insert(nodetreeObject *self, PyObject *args)
1682 1682 {
1683 1683 Py_ssize_t rev;
1684 1684 const char *node;
1685 1685 Py_ssize_t length;
1686 1686 if (!PyArg_ParseTuple(args, "n", &rev))
1687 1687 return NULL;
1688 1688 length = index_length(self->nt.index);
1689 1689 if (rev < 0 || rev >= length) {
1690 1690 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
1691 1691 return NULL;
1692 1692 }
1693 1693 node = index_node_existing(self->nt.index, rev);
1694 1694 if (nt_insert(&self->nt, node, (int)rev) == -1)
1695 1695 return NULL;
1696 1696 Py_RETURN_NONE;
1697 1697 }
1698 1698
1699 1699 static int nt_delete_node(nodetree *self, const char *node)
1700 1700 {
1701 1701 /* rev==-2 happens to get encoded as 0, which is interpreted as not set
1702 1702 */
1703 1703 return nt_insert(self, node, -2);
1704 1704 }
1705 1705
1706 1706 static int nt_init(nodetree *self, indexObject *index, unsigned capacity)
1707 1707 {
1708 1708 /* Initialize before overflow-checking to avoid nt_dealloc() crash. */
1709 1709 self->nodes = NULL;
1710 1710
1711 1711 self->index = index;
1712 1712 /* The input capacity is in terms of revisions, while the field is in
1713 1713 * terms of nodetree nodes. */
1714 1714 self->capacity = (capacity < 4 ? 4 : capacity / 2);
1715 1715 self->depth = 0;
1716 1716 self->splits = 0;
1717 1717 if ((size_t)self->capacity > INT_MAX / sizeof(nodetreenode)) {
1718 1718 PyErr_SetString(PyExc_ValueError, "overflow in init_nt");
1719 1719 return -1;
1720 1720 }
1721 1721 self->nodes = calloc(self->capacity, sizeof(nodetreenode));
1722 1722 if (self->nodes == NULL) {
1723 1723 PyErr_NoMemory();
1724 1724 return -1;
1725 1725 }
1726 1726 self->length = 1;
1727 1727 return 0;
1728 1728 }
1729 1729
1730 1730 static int ntobj_init(nodetreeObject *self, PyObject *args)
1731 1731 {
1732 1732 PyObject *index;
1733 1733 unsigned capacity;
1734 1734 if (!PyArg_ParseTuple(args, "O!I", &HgRevlogIndex_Type, &index,
1735 1735 &capacity))
1736 1736 return -1;
1737 1737 Py_INCREF(index);
1738 1738 return nt_init(&self->nt, (indexObject *)index, capacity);
1739 1739 }
1740 1740
1741 1741 static int nt_partialmatch(nodetree *self, const char *node, Py_ssize_t nodelen)
1742 1742 {
1743 1743 return nt_find(self, node, nodelen, 1);
1744 1744 }
1745 1745
1746 1746 /*
1747 1747 * Find the length of the shortest unique prefix of node.
1748 1748 *
1749 1749 * Return values:
1750 1750 *
1751 1751 * -3: error (exception set)
1752 1752 * -2: not found (no exception set)
1753 1753 * rest: length of shortest prefix
1754 1754 */
1755 1755 static int nt_shortest(nodetree *self, const char *node)
1756 1756 {
1757 1757 int level, off;
1758 1758
1759 1759 for (level = off = 0; level < 40; level++) {
1760 1760 int k, v;
1761 1761 nodetreenode *n = &self->nodes[off];
1762 1762 k = nt_level(node, level);
1763 1763 v = n->children[k];
1764 1764 if (v < 0) {
1765 1765 const char *n;
1766 1766 v = -(v + 2);
1767 1767 n = index_node_existing(self->index, v);
1768 1768 if (n == NULL)
1769 1769 return -3;
1770 1770 if (memcmp(node, n, 20) != 0)
1771 1771 /*
1772 1772 * Found a unique prefix, but it wasn't for the
1773 1773 * requested node (i.e the requested node does
1774 1774 * not exist).
1775 1775 */
1776 1776 return -2;
1777 1777 return level + 1;
1778 1778 }
1779 1779 if (v == 0)
1780 1780 return -2;
1781 1781 off = v;
1782 1782 }
1783 1783 /*
1784 1784 * The node was still not unique after 40 hex digits, so this won't
1785 1785 * happen. Also, if we get here, then there's a programming error in
1786 1786 * this file that made us insert a node longer than 40 hex digits.
1787 1787 */
1788 1788 PyErr_SetString(PyExc_Exception, "broken node tree");
1789 1789 return -3;
1790 1790 }
1791 1791
1792 1792 static PyObject *ntobj_shortest(nodetreeObject *self, PyObject *args)
1793 1793 {
1794 1794 PyObject *val;
1795 1795 char *node;
1796 1796 int length;
1797 1797
1798 1798 if (!PyArg_ParseTuple(args, "O", &val))
1799 1799 return NULL;
1800 1800 if (node_check(val, &node) == -1)
1801 1801 return NULL;
1802 1802
1803 1803 length = nt_shortest(&self->nt, node);
1804 1804 if (length == -3)
1805 1805 return NULL;
1806 1806 if (length == -2) {
1807 1807 raise_revlog_error();
1808 1808 return NULL;
1809 1809 }
1810 1810 return PyInt_FromLong(length);
1811 1811 }
1812 1812
1813 1813 static void nt_dealloc(nodetree *self)
1814 1814 {
1815 1815 free(self->nodes);
1816 1816 self->nodes = NULL;
1817 1817 }
1818 1818
1819 1819 static void ntobj_dealloc(nodetreeObject *self)
1820 1820 {
1821 1821 Py_XDECREF(self->nt.index);
1822 1822 nt_dealloc(&self->nt);
1823 1823 PyObject_Del(self);
1824 1824 }
1825 1825
1826 1826 static PyMethodDef ntobj_methods[] = {
1827 1827 {"insert", (PyCFunction)ntobj_insert, METH_VARARGS,
1828 1828 "insert an index entry"},
1829 1829 {"shortest", (PyCFunction)ntobj_shortest, METH_VARARGS,
1830 1830 "find length of shortest hex nodeid of a binary ID"},
1831 1831 {NULL} /* Sentinel */
1832 1832 };
1833 1833
1834 1834 static PyTypeObject nodetreeType = {
1835 1835 PyVarObject_HEAD_INIT(NULL, 0) /* header */
1836 1836 "parsers.nodetree", /* tp_name */
1837 1837 sizeof(nodetreeObject), /* tp_basicsize */
1838 1838 0, /* tp_itemsize */
1839 1839 (destructor)ntobj_dealloc, /* tp_dealloc */
1840 1840 0, /* tp_print */
1841 1841 0, /* tp_getattr */
1842 1842 0, /* tp_setattr */
1843 1843 0, /* tp_compare */
1844 1844 0, /* tp_repr */
1845 1845 0, /* tp_as_number */
1846 1846 0, /* tp_as_sequence */
1847 1847 0, /* tp_as_mapping */
1848 1848 0, /* tp_hash */
1849 1849 0, /* tp_call */
1850 1850 0, /* tp_str */
1851 1851 0, /* tp_getattro */
1852 1852 0, /* tp_setattro */
1853 1853 0, /* tp_as_buffer */
1854 1854 Py_TPFLAGS_DEFAULT, /* tp_flags */
1855 1855 "nodetree", /* tp_doc */
1856 1856 0, /* tp_traverse */
1857 1857 0, /* tp_clear */
1858 1858 0, /* tp_richcompare */
1859 1859 0, /* tp_weaklistoffset */
1860 1860 0, /* tp_iter */
1861 1861 0, /* tp_iternext */
1862 1862 ntobj_methods, /* tp_methods */
1863 1863 0, /* tp_members */
1864 1864 0, /* tp_getset */
1865 1865 0, /* tp_base */
1866 1866 0, /* tp_dict */
1867 1867 0, /* tp_descr_get */
1868 1868 0, /* tp_descr_set */
1869 1869 0, /* tp_dictoffset */
1870 1870 (initproc)ntobj_init, /* tp_init */
1871 1871 0, /* tp_alloc */
1872 1872 };
1873 1873
1874 1874 static int index_init_nt(indexObject *self)
1875 1875 {
1876 1876 if (!self->ntinitialized) {
1877 1877 if (nt_init(&self->nt, self, (int)self->raw_length) == -1) {
1878 1878 nt_dealloc(&self->nt);
1879 1879 return -1;
1880 1880 }
1881 1881 if (nt_insert(&self->nt, nullid, -1) == -1) {
1882 1882 nt_dealloc(&self->nt);
1883 1883 return -1;
1884 1884 }
1885 1885 self->ntinitialized = 1;
1886 1886 self->ntrev = (int)index_length(self);
1887 1887 self->ntlookups = 1;
1888 1888 self->ntmisses = 0;
1889 1889 }
1890 1890 return 0;
1891 1891 }
1892 1892
1893 1893 /*
1894 1894 * Return values:
1895 1895 *
1896 1896 * -3: error (exception set)
1897 1897 * -2: not found (no exception set)
1898 1898 * rest: valid rev
1899 1899 */
1900 1900 static int index_find_node(indexObject *self, const char *node,
1901 1901 Py_ssize_t nodelen)
1902 1902 {
1903 1903 int rev;
1904 1904
1905 1905 if (index_init_nt(self) == -1)
1906 1906 return -3;
1907 1907
1908 1908 self->ntlookups++;
1909 1909 rev = nt_find(&self->nt, node, nodelen, 0);
1910 1910 if (rev >= -1)
1911 1911 return rev;
1912 1912
1913 1913 /*
1914 1914 * For the first handful of lookups, we scan the entire index,
1915 1915 * and cache only the matching nodes. This optimizes for cases
1916 1916 * like "hg tip", where only a few nodes are accessed.
1917 1917 *
1918 1918 * After that, we cache every node we visit, using a single
1919 1919 * scan amortized over multiple lookups. This gives the best
1920 1920 * bulk performance, e.g. for "hg log".
1921 1921 */
1922 1922 if (self->ntmisses++ < 4) {
1923 1923 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1924 1924 const char *n = index_node_existing(self, rev);
1925 1925 if (n == NULL)
1926 1926 return -3;
1927 1927 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1928 1928 if (nt_insert(&self->nt, n, rev) == -1)
1929 1929 return -3;
1930 1930 break;
1931 1931 }
1932 1932 }
1933 1933 } else {
1934 1934 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1935 1935 const char *n = index_node_existing(self, rev);
1936 1936 if (n == NULL)
1937 1937 return -3;
1938 1938 if (nt_insert(&self->nt, n, rev) == -1) {
1939 1939 self->ntrev = rev + 1;
1940 1940 return -3;
1941 1941 }
1942 1942 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1943 1943 break;
1944 1944 }
1945 1945 }
1946 1946 self->ntrev = rev;
1947 1947 }
1948 1948
1949 1949 if (rev >= 0)
1950 1950 return rev;
1951 1951 return -2;
1952 1952 }
1953 1953
1954 1954 static PyObject *index_getitem(indexObject *self, PyObject *value)
1955 1955 {
1956 1956 char *node;
1957 1957 int rev;
1958 1958
1959 1959 if (PyInt_Check(value)) {
1960 1960 long idx;
1961 1961 if (!pylong_to_long(value, &idx)) {
1962 1962 return NULL;
1963 1963 }
1964 1964 return index_get(self, idx);
1965 1965 }
1966 1966
1967 1967 if (node_check(value, &node) == -1)
1968 1968 return NULL;
1969 1969 rev = index_find_node(self, node, 20);
1970 1970 if (rev >= -1)
1971 1971 return PyInt_FromLong(rev);
1972 1972 if (rev == -2)
1973 1973 raise_revlog_error();
1974 1974 return NULL;
1975 1975 }
1976 1976
1977 1977 /*
1978 1978 * Fully populate the radix tree.
1979 1979 */
1980 1980 static int index_populate_nt(indexObject *self)
1981 1981 {
1982 1982 int rev;
1983 1983 if (self->ntrev > 0) {
1984 1984 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1985 1985 const char *n = index_node_existing(self, rev);
1986 1986 if (n == NULL)
1987 1987 return -1;
1988 1988 if (nt_insert(&self->nt, n, rev) == -1)
1989 1989 return -1;
1990 1990 }
1991 1991 self->ntrev = -1;
1992 1992 }
1993 1993 return 0;
1994 1994 }
1995 1995
1996 1996 static PyObject *index_partialmatch(indexObject *self, PyObject *args)
1997 1997 {
1998 1998 const char *fullnode;
1999 1999 Py_ssize_t nodelen;
2000 2000 char *node;
2001 2001 int rev, i;
2002 2002
2003 2003 if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &node, &nodelen))
2004 2004 return NULL;
2005 2005
2006 2006 if (nodelen < 1) {
2007 2007 PyErr_SetString(PyExc_ValueError, "key too short");
2008 2008 return NULL;
2009 2009 }
2010 2010
2011 2011 if (nodelen > 40) {
2012 2012 PyErr_SetString(PyExc_ValueError, "key too long");
2013 2013 return NULL;
2014 2014 }
2015 2015
2016 2016 for (i = 0; i < nodelen; i++)
2017 2017 hexdigit(node, i);
2018 2018 if (PyErr_Occurred()) {
2019 2019 /* input contains non-hex characters */
2020 2020 PyErr_Clear();
2021 2021 Py_RETURN_NONE;
2022 2022 }
2023 2023
2024 2024 if (index_init_nt(self) == -1)
2025 2025 return NULL;
2026 2026 if (index_populate_nt(self) == -1)
2027 2027 return NULL;
2028 2028 rev = nt_partialmatch(&self->nt, node, nodelen);
2029 2029
2030 2030 switch (rev) {
2031 2031 case -4:
2032 2032 raise_revlog_error();
2033 2033 return NULL;
2034 2034 case -2:
2035 2035 Py_RETURN_NONE;
2036 2036 case -1:
2037 2037 return PyBytes_FromStringAndSize(nullid, 20);
2038 2038 }
2039 2039
2040 2040 fullnode = index_node_existing(self, rev);
2041 2041 if (fullnode == NULL) {
2042 2042 return NULL;
2043 2043 }
2044 2044 return PyBytes_FromStringAndSize(fullnode, 20);
2045 2045 }
2046 2046
2047 2047 static PyObject *index_shortest(indexObject *self, PyObject *args)
2048 2048 {
2049 2049 PyObject *val;
2050 2050 char *node;
2051 2051 int length;
2052 2052
2053 2053 if (!PyArg_ParseTuple(args, "O", &val))
2054 2054 return NULL;
2055 2055 if (node_check(val, &node) == -1)
2056 2056 return NULL;
2057 2057
2058 2058 self->ntlookups++;
2059 2059 if (index_init_nt(self) == -1)
2060 2060 return NULL;
2061 2061 if (index_populate_nt(self) == -1)
2062 2062 return NULL;
2063 2063 length = nt_shortest(&self->nt, node);
2064 2064 if (length == -3)
2065 2065 return NULL;
2066 2066 if (length == -2) {
2067 2067 raise_revlog_error();
2068 2068 return NULL;
2069 2069 }
2070 2070 return PyInt_FromLong(length);
2071 2071 }
2072 2072
2073 2073 static PyObject *index_m_get(indexObject *self, PyObject *args)
2074 2074 {
2075 2075 PyObject *val;
2076 2076 char *node;
2077 2077 int rev;
2078 2078
2079 2079 if (!PyArg_ParseTuple(args, "O", &val))
2080 2080 return NULL;
2081 2081 if (node_check(val, &node) == -1)
2082 2082 return NULL;
2083 2083 rev = index_find_node(self, node, 20);
2084 2084 if (rev == -3)
2085 2085 return NULL;
2086 2086 if (rev == -2)
2087 2087 Py_RETURN_NONE;
2088 2088 return PyInt_FromLong(rev);
2089 2089 }
2090 2090
2091 2091 static int index_contains(indexObject *self, PyObject *value)
2092 2092 {
2093 2093 char *node;
2094 2094
2095 2095 if (PyInt_Check(value)) {
2096 2096 long rev;
2097 2097 if (!pylong_to_long(value, &rev)) {
2098 2098 return -1;
2099 2099 }
2100 2100 return rev >= -1 && rev < index_length(self);
2101 2101 }
2102 2102
2103 2103 if (node_check(value, &node) == -1)
2104 2104 return -1;
2105 2105
2106 2106 switch (index_find_node(self, node, 20)) {
2107 2107 case -3:
2108 2108 return -1;
2109 2109 case -2:
2110 2110 return 0;
2111 2111 default:
2112 2112 return 1;
2113 2113 }
2114 2114 }
2115 2115
2116 2116 static PyObject *index_m_has_node(indexObject *self, PyObject *args)
2117 2117 {
2118 2118 int ret = index_contains(self, args);
2119 2119 if (ret < 0)
2120 2120 return NULL;
2121 2121 return PyBool_FromLong((long)ret);
2122 2122 }
2123 2123
2124 2124 static PyObject *index_m_rev(indexObject *self, PyObject *val)
2125 2125 {
2126 2126 char *node;
2127 2127 int rev;
2128 2128
2129 2129 if (node_check(val, &node) == -1)
2130 2130 return NULL;
2131 2131 rev = index_find_node(self, node, 20);
2132 2132 if (rev >= -1)
2133 2133 return PyInt_FromLong(rev);
2134 2134 if (rev == -2)
2135 2135 raise_revlog_error();
2136 2136 return NULL;
2137 2137 }
2138 2138
2139 2139 typedef uint64_t bitmask;
2140 2140
2141 2141 /*
2142 2142 * Given a disjoint set of revs, return all candidates for the
2143 2143 * greatest common ancestor. In revset notation, this is the set
2144 2144 * "heads(::a and ::b and ...)"
2145 2145 */
2146 2146 static PyObject *find_gca_candidates(indexObject *self, const int *revs,
2147 2147 int revcount)
2148 2148 {
2149 2149 const bitmask allseen = (1ull << revcount) - 1;
2150 2150 const bitmask poison = 1ull << revcount;
2151 2151 PyObject *gca = PyList_New(0);
2152 2152 int i, v, interesting;
2153 2153 int maxrev = -1;
2154 2154 bitmask sp;
2155 2155 bitmask *seen;
2156 2156
2157 2157 if (gca == NULL)
2158 2158 return PyErr_NoMemory();
2159 2159
2160 2160 for (i = 0; i < revcount; i++) {
2161 2161 if (revs[i] > maxrev)
2162 2162 maxrev = revs[i];
2163 2163 }
2164 2164
2165 2165 seen = calloc(sizeof(*seen), maxrev + 1);
2166 2166 if (seen == NULL) {
2167 2167 Py_DECREF(gca);
2168 2168 return PyErr_NoMemory();
2169 2169 }
2170 2170
2171 2171 for (i = 0; i < revcount; i++)
2172 2172 seen[revs[i]] = 1ull << i;
2173 2173
2174 2174 interesting = revcount;
2175 2175
2176 2176 for (v = maxrev; v >= 0 && interesting; v--) {
2177 2177 bitmask sv = seen[v];
2178 2178 int parents[2];
2179 2179
2180 2180 if (!sv)
2181 2181 continue;
2182 2182
2183 2183 if (sv < poison) {
2184 2184 interesting -= 1;
2185 2185 if (sv == allseen) {
2186 2186 PyObject *obj = PyInt_FromLong(v);
2187 2187 if (obj == NULL)
2188 2188 goto bail;
2189 2189 if (PyList_Append(gca, obj) == -1) {
2190 2190 Py_DECREF(obj);
2191 2191 goto bail;
2192 2192 }
2193 2193 sv |= poison;
2194 2194 for (i = 0; i < revcount; i++) {
2195 2195 if (revs[i] == v)
2196 2196 goto done;
2197 2197 }
2198 2198 }
2199 2199 }
2200 2200 if (index_get_parents(self, v, parents, maxrev) < 0)
2201 2201 goto bail;
2202 2202
2203 2203 for (i = 0; i < 2; i++) {
2204 2204 int p = parents[i];
2205 2205 if (p == -1)
2206 2206 continue;
2207 2207 sp = seen[p];
2208 2208 if (sv < poison) {
2209 2209 if (sp == 0) {
2210 2210 seen[p] = sv;
2211 2211 interesting++;
2212 2212 } else if (sp != sv)
2213 2213 seen[p] |= sv;
2214 2214 } else {
2215 2215 if (sp && sp < poison)
2216 2216 interesting--;
2217 2217 seen[p] = sv;
2218 2218 }
2219 2219 }
2220 2220 }
2221 2221
2222 2222 done:
2223 2223 free(seen);
2224 2224 return gca;
2225 2225 bail:
2226 2226 free(seen);
2227 2227 Py_XDECREF(gca);
2228 2228 return NULL;
2229 2229 }
2230 2230
2231 2231 /*
2232 2232 * Given a disjoint set of revs, return the subset with the longest
2233 2233 * path to the root.
2234 2234 */
2235 2235 static PyObject *find_deepest(indexObject *self, PyObject *revs)
2236 2236 {
2237 2237 const Py_ssize_t revcount = PyList_GET_SIZE(revs);
2238 2238 static const Py_ssize_t capacity = 24;
2239 2239 int *depth, *interesting = NULL;
2240 2240 int i, j, v, ninteresting;
2241 2241 PyObject *dict = NULL, *keys = NULL;
2242 2242 long *seen = NULL;
2243 2243 int maxrev = -1;
2244 2244 long final;
2245 2245
2246 2246 if (revcount > capacity) {
2247 2247 PyErr_Format(PyExc_OverflowError,
2248 2248 "bitset size (%ld) > capacity (%ld)",
2249 2249 (long)revcount, (long)capacity);
2250 2250 return NULL;
2251 2251 }
2252 2252
2253 2253 for (i = 0; i < revcount; i++) {
2254 2254 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
2255 2255 if (n > maxrev)
2256 2256 maxrev = n;
2257 2257 }
2258 2258
2259 2259 depth = calloc(sizeof(*depth), maxrev + 1);
2260 2260 if (depth == NULL)
2261 2261 return PyErr_NoMemory();
2262 2262
2263 2263 seen = calloc(sizeof(*seen), maxrev + 1);
2264 2264 if (seen == NULL) {
2265 2265 PyErr_NoMemory();
2266 2266 goto bail;
2267 2267 }
2268 2268
2269 2269 interesting = calloc(sizeof(*interesting), ((size_t)1) << revcount);
2270 2270 if (interesting == NULL) {
2271 2271 PyErr_NoMemory();
2272 2272 goto bail;
2273 2273 }
2274 2274
2275 2275 if (PyList_Sort(revs) == -1)
2276 2276 goto bail;
2277 2277
2278 2278 for (i = 0; i < revcount; i++) {
2279 2279 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
2280 2280 long b = 1l << i;
2281 2281 depth[n] = 1;
2282 2282 seen[n] = b;
2283 2283 interesting[b] = 1;
2284 2284 }
2285 2285
2286 2286 /* invariant: ninteresting is the number of non-zero entries in
2287 2287 * interesting. */
2288 2288 ninteresting = (int)revcount;
2289 2289
2290 2290 for (v = maxrev; v >= 0 && ninteresting > 1; v--) {
2291 2291 int dv = depth[v];
2292 2292 int parents[2];
2293 2293 long sv;
2294 2294
2295 2295 if (dv == 0)
2296 2296 continue;
2297 2297
2298 2298 sv = seen[v];
2299 2299 if (index_get_parents(self, v, parents, maxrev) < 0)
2300 2300 goto bail;
2301 2301
2302 2302 for (i = 0; i < 2; i++) {
2303 2303 int p = parents[i];
2304 2304 long sp;
2305 2305 int dp;
2306 2306
2307 2307 if (p == -1)
2308 2308 continue;
2309 2309
2310 2310 dp = depth[p];
2311 2311 sp = seen[p];
2312 2312 if (dp <= dv) {
2313 2313 depth[p] = dv + 1;
2314 2314 if (sp != sv) {
2315 2315 interesting[sv] += 1;
2316 2316 seen[p] = sv;
2317 2317 if (sp) {
2318 2318 interesting[sp] -= 1;
2319 2319 if (interesting[sp] == 0)
2320 2320 ninteresting -= 1;
2321 2321 }
2322 2322 }
2323 2323 } else if (dv == dp - 1) {
2324 2324 long nsp = sp | sv;
2325 2325 if (nsp == sp)
2326 2326 continue;
2327 2327 seen[p] = nsp;
2328 2328 interesting[sp] -= 1;
2329 2329 if (interesting[sp] == 0)
2330 2330 ninteresting -= 1;
2331 2331 if (interesting[nsp] == 0)
2332 2332 ninteresting += 1;
2333 2333 interesting[nsp] += 1;
2334 2334 }
2335 2335 }
2336 2336 interesting[sv] -= 1;
2337 2337 if (interesting[sv] == 0)
2338 2338 ninteresting -= 1;
2339 2339 }
2340 2340
2341 2341 final = 0;
2342 2342 j = ninteresting;
2343 2343 for (i = 0; i < (int)(2 << revcount) && j > 0; i++) {
2344 2344 if (interesting[i] == 0)
2345 2345 continue;
2346 2346 final |= i;
2347 2347 j -= 1;
2348 2348 }
2349 2349 if (final == 0) {
2350 2350 keys = PyList_New(0);
2351 2351 goto bail;
2352 2352 }
2353 2353
2354 2354 dict = PyDict_New();
2355 2355 if (dict == NULL)
2356 2356 goto bail;
2357 2357
2358 2358 for (i = 0; i < revcount; i++) {
2359 2359 PyObject *key;
2360 2360
2361 2361 if ((final & (1 << i)) == 0)
2362 2362 continue;
2363 2363
2364 2364 key = PyList_GET_ITEM(revs, i);
2365 2365 Py_INCREF(key);
2366 2366 Py_INCREF(Py_None);
2367 2367 if (PyDict_SetItem(dict, key, Py_None) == -1) {
2368 2368 Py_DECREF(key);
2369 2369 Py_DECREF(Py_None);
2370 2370 goto bail;
2371 2371 }
2372 2372 }
2373 2373
2374 2374 keys = PyDict_Keys(dict);
2375 2375
2376 2376 bail:
2377 2377 free(depth);
2378 2378 free(seen);
2379 2379 free(interesting);
2380 2380 Py_XDECREF(dict);
2381 2381
2382 2382 return keys;
2383 2383 }
2384 2384
2385 2385 /*
2386 2386 * Given a (possibly overlapping) set of revs, return all the
2387 2387 * common ancestors heads: heads(::args[0] and ::a[1] and ...)
2388 2388 */
2389 2389 static PyObject *index_commonancestorsheads(indexObject *self, PyObject *args)
2390 2390 {
2391 2391 PyObject *ret = NULL;
2392 2392 Py_ssize_t argcount, i, len;
2393 2393 bitmask repeat = 0;
2394 2394 int revcount = 0;
2395 2395 int *revs;
2396 2396
2397 2397 argcount = PySequence_Length(args);
2398 2398 revs = PyMem_Malloc(argcount * sizeof(*revs));
2399 2399 if (argcount > 0 && revs == NULL)
2400 2400 return PyErr_NoMemory();
2401 2401 len = index_length(self);
2402 2402
2403 2403 for (i = 0; i < argcount; i++) {
2404 2404 static const int capacity = 24;
2405 2405 PyObject *obj = PySequence_GetItem(args, i);
2406 2406 bitmask x;
2407 2407 long val;
2408 2408
2409 2409 if (!PyInt_Check(obj)) {
2410 2410 PyErr_SetString(PyExc_TypeError,
2411 2411 "arguments must all be ints");
2412 2412 Py_DECREF(obj);
2413 2413 goto bail;
2414 2414 }
2415 2415 val = PyInt_AsLong(obj);
2416 2416 Py_DECREF(obj);
2417 2417 if (val == -1) {
2418 2418 ret = PyList_New(0);
2419 2419 goto done;
2420 2420 }
2421 2421 if (val < 0 || val >= len) {
2422 2422 PyErr_SetString(PyExc_IndexError, "index out of range");
2423 2423 goto bail;
2424 2424 }
2425 2425 /* this cheesy bloom filter lets us avoid some more
2426 2426 * expensive duplicate checks in the common set-is-disjoint
2427 2427 * case */
2428 2428 x = 1ull << (val & 0x3f);
2429 2429 if (repeat & x) {
2430 2430 int k;
2431 2431 for (k = 0; k < revcount; k++) {
2432 2432 if (val == revs[k])
2433 2433 goto duplicate;
2434 2434 }
2435 2435 } else
2436 2436 repeat |= x;
2437 2437 if (revcount >= capacity) {
2438 2438 PyErr_Format(PyExc_OverflowError,
2439 2439 "bitset size (%d) > capacity (%d)",
2440 2440 revcount, capacity);
2441 2441 goto bail;
2442 2442 }
2443 2443 revs[revcount++] = (int)val;
2444 2444 duplicate:;
2445 2445 }
2446 2446
2447 2447 if (revcount == 0) {
2448 2448 ret = PyList_New(0);
2449 2449 goto done;
2450 2450 }
2451 2451 if (revcount == 1) {
2452 2452 PyObject *obj;
2453 2453 ret = PyList_New(1);
2454 2454 if (ret == NULL)
2455 2455 goto bail;
2456 2456 obj = PyInt_FromLong(revs[0]);
2457 2457 if (obj == NULL)
2458 2458 goto bail;
2459 2459 PyList_SET_ITEM(ret, 0, obj);
2460 2460 goto done;
2461 2461 }
2462 2462
2463 2463 ret = find_gca_candidates(self, revs, revcount);
2464 2464 if (ret == NULL)
2465 2465 goto bail;
2466 2466
2467 2467 done:
2468 2468 PyMem_Free(revs);
2469 2469 return ret;
2470 2470
2471 2471 bail:
2472 2472 PyMem_Free(revs);
2473 2473 Py_XDECREF(ret);
2474 2474 return NULL;
2475 2475 }
2476 2476
2477 2477 /*
2478 2478 * Given a (possibly overlapping) set of revs, return the greatest
2479 2479 * common ancestors: those with the longest path to the root.
2480 2480 */
2481 2481 static PyObject *index_ancestors(indexObject *self, PyObject *args)
2482 2482 {
2483 2483 PyObject *ret;
2484 2484 PyObject *gca = index_commonancestorsheads(self, args);
2485 2485 if (gca == NULL)
2486 2486 return NULL;
2487 2487
2488 2488 if (PyList_GET_SIZE(gca) <= 1) {
2489 2489 return gca;
2490 2490 }
2491 2491
2492 2492 ret = find_deepest(self, gca);
2493 2493 Py_DECREF(gca);
2494 2494 return ret;
2495 2495 }
2496 2496
2497 2497 /*
2498 2498 * Invalidate any trie entries introduced by added revs.
2499 2499 */
2500 2500 static void index_invalidate_added(indexObject *self, Py_ssize_t start)
2501 2501 {
2502 2502 Py_ssize_t i, len = PyList_GET_SIZE(self->added);
2503 2503
2504 2504 for (i = start; i < len; i++) {
2505 2505 PyObject *tuple = PyList_GET_ITEM(self->added, i);
2506 2506 PyObject *node = PyTuple_GET_ITEM(tuple, 7);
2507 2507
2508 2508 nt_delete_node(&self->nt, PyBytes_AS_STRING(node));
2509 2509 }
2510 2510
2511 2511 if (start == 0)
2512 2512 Py_CLEAR(self->added);
2513 2513 }
2514 2514
2515 2515 /*
2516 2516 * Delete a numeric range of revs, which must be at the end of the
2517 2517 * range.
2518 2518 */
2519 2519 static int index_slice_del(indexObject *self, PyObject *item)
2520 2520 {
2521 2521 Py_ssize_t start, stop, step, slicelength;
2522 2522 Py_ssize_t length = index_length(self) + 1;
2523 2523 int ret = 0;
2524 2524
2525 2525 /* Argument changed from PySliceObject* to PyObject* in Python 3. */
2526 2526 #ifdef IS_PY3K
2527 2527 if (PySlice_GetIndicesEx(item, length, &start, &stop, &step,
2528 2528 &slicelength) < 0)
2529 2529 #else
2530 2530 if (PySlice_GetIndicesEx((PySliceObject *)item, length, &start, &stop,
2531 2531 &step, &slicelength) < 0)
2532 2532 #endif
2533 2533 return -1;
2534 2534
2535 2535 if (slicelength <= 0)
2536 2536 return 0;
2537 2537
2538 2538 if ((step < 0 && start < stop) || (step > 0 && start > stop))
2539 2539 stop = start;
2540 2540
2541 2541 if (step < 0) {
2542 2542 stop = start + 1;
2543 2543 start = stop + step * (slicelength - 1) - 1;
2544 2544 step = -step;
2545 2545 }
2546 2546
2547 2547 if (step != 1) {
2548 2548 PyErr_SetString(PyExc_ValueError,
2549 2549 "revlog index delete requires step size of 1");
2550 2550 return -1;
2551 2551 }
2552 2552
2553 2553 if (stop != length - 1) {
2554 2554 PyErr_SetString(PyExc_IndexError,
2555 2555 "revlog index deletion indices are invalid");
2556 2556 return -1;
2557 2557 }
2558 2558
2559 2559 if (start < self->length) {
2560 2560 if (self->ntinitialized) {
2561 2561 Py_ssize_t i;
2562 2562
2563 2563 for (i = start; i < self->length; i++) {
2564 2564 const char *node = index_node_existing(self, i);
2565 2565 if (node == NULL)
2566 2566 return -1;
2567 2567
2568 2568 nt_delete_node(&self->nt, node);
2569 2569 }
2570 2570 if (self->added)
2571 2571 index_invalidate_added(self, 0);
2572 2572 if (self->ntrev > start)
2573 2573 self->ntrev = (int)start;
2574 2574 } else if (self->added) {
2575 2575 Py_CLEAR(self->added);
2576 2576 }
2577 2577
2578 2578 self->length = start;
2579 2579 if (start < self->raw_length) {
2580 2580 if (self->cache) {
2581 2581 Py_ssize_t i;
2582 2582 for (i = start; i < self->raw_length; i++)
2583 2583 Py_CLEAR(self->cache[i]);
2584 2584 }
2585 2585 self->raw_length = start;
2586 2586 }
2587 2587 goto done;
2588 2588 }
2589 2589
2590 2590 if (self->ntinitialized) {
2591 2591 index_invalidate_added(self, start - self->length);
2592 2592 if (self->ntrev > start)
2593 2593 self->ntrev = (int)start;
2594 2594 }
2595 2595 if (self->added)
2596 2596 ret = PyList_SetSlice(self->added, start - self->length,
2597 2597 PyList_GET_SIZE(self->added), NULL);
2598 2598 done:
2599 2599 Py_CLEAR(self->headrevs);
2600 2600 return ret;
2601 2601 }
2602 2602
2603 2603 /*
2604 2604 * Supported ops:
2605 2605 *
2606 2606 * slice deletion
2607 2607 * string assignment (extend node->rev mapping)
2608 2608 * string deletion (shrink node->rev mapping)
2609 2609 */
2610 2610 static int index_assign_subscript(indexObject *self, PyObject *item,
2611 2611 PyObject *value)
2612 2612 {
2613 2613 char *node;
2614 2614 long rev;
2615 2615
2616 2616 if (PySlice_Check(item) && value == NULL)
2617 2617 return index_slice_del(self, item);
2618 2618
2619 2619 if (node_check(item, &node) == -1)
2620 2620 return -1;
2621 2621
2622 2622 if (value == NULL)
2623 2623 return self->ntinitialized ? nt_delete_node(&self->nt, node)
2624 2624 : 0;
2625 2625 rev = PyInt_AsLong(value);
2626 2626 if (rev > INT_MAX || rev < 0) {
2627 2627 if (!PyErr_Occurred())
2628 2628 PyErr_SetString(PyExc_ValueError, "rev out of range");
2629 2629 return -1;
2630 2630 }
2631 2631
2632 2632 if (index_init_nt(self) == -1)
2633 2633 return -1;
2634 2634 return nt_insert(&self->nt, node, (int)rev);
2635 2635 }
2636 2636
2637 2637 /*
2638 2638 * Find all RevlogNG entries in an index that has inline data. Update
2639 2639 * the optional "offsets" table with those entries.
2640 2640 */
2641 2641 static Py_ssize_t inline_scan(indexObject *self, const char **offsets)
2642 2642 {
2643 2643 const char *data = (const char *)self->buf.buf;
2644 2644 Py_ssize_t pos = 0;
2645 2645 Py_ssize_t end = self->buf.len;
2646 2646 long incr = v1_hdrsize;
2647 2647 Py_ssize_t len = 0;
2648 2648
2649 2649 while (pos + v1_hdrsize <= end && pos >= 0) {
2650 2650 uint32_t comp_len;
2651 2651 /* 3rd element of header is length of compressed inline data */
2652 2652 comp_len = getbe32(data + pos + 8);
2653 2653 incr = v1_hdrsize + comp_len;
2654 2654 if (offsets)
2655 2655 offsets[len] = data + pos;
2656 2656 len++;
2657 2657 pos += incr;
2658 2658 }
2659 2659
2660 2660 if (pos != end) {
2661 2661 if (!PyErr_Occurred())
2662 2662 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2663 2663 return -1;
2664 2664 }
2665 2665
2666 2666 return len;
2667 2667 }
2668 2668
2669 2669 static int index_init(indexObject *self, PyObject *args)
2670 2670 {
2671 2671 PyObject *data_obj, *inlined_obj;
2672 2672 Py_ssize_t size;
2673 2673
2674 2674 /* Initialize before argument-checking to avoid index_dealloc() crash.
2675 2675 */
2676 2676 self->raw_length = 0;
2677 2677 self->added = NULL;
2678 2678 self->cache = NULL;
2679 2679 self->data = NULL;
2680 2680 memset(&self->buf, 0, sizeof(self->buf));
2681 2681 self->headrevs = NULL;
2682 2682 self->filteredrevs = Py_None;
2683 2683 Py_INCREF(Py_None);
2684 2684 self->ntinitialized = 0;
2685 2685 self->offsets = NULL;
2686 2686
2687 2687 if (!PyArg_ParseTuple(args, "OO", &data_obj, &inlined_obj))
2688 2688 return -1;
2689 2689 if (!PyObject_CheckBuffer(data_obj)) {
2690 2690 PyErr_SetString(PyExc_TypeError,
2691 2691 "data does not support buffer interface");
2692 2692 return -1;
2693 2693 }
2694 2694
2695 2695 if (PyObject_GetBuffer(data_obj, &self->buf, PyBUF_SIMPLE) == -1)
2696 2696 return -1;
2697 2697 size = self->buf.len;
2698 2698
2699 2699 self->inlined = inlined_obj && PyObject_IsTrue(inlined_obj);
2700 2700 self->data = data_obj;
2701 2701
2702 2702 self->ntlookups = self->ntmisses = 0;
2703 2703 self->ntrev = -1;
2704 2704 Py_INCREF(self->data);
2705 2705
2706 2706 if (self->inlined) {
2707 2707 Py_ssize_t len = inline_scan(self, NULL);
2708 2708 if (len == -1)
2709 2709 goto bail;
2710 2710 self->raw_length = len;
2711 2711 self->length = len;
2712 2712 } else {
2713 2713 if (size % v1_hdrsize) {
2714 2714 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2715 2715 goto bail;
2716 2716 }
2717 2717 self->raw_length = size / v1_hdrsize;
2718 2718 self->length = self->raw_length;
2719 2719 }
2720 2720
2721 2721 return 0;
2722 2722 bail:
2723 2723 return -1;
2724 2724 }
2725 2725
2726 2726 static PyObject *index_nodemap(indexObject *self)
2727 2727 {
2728 2728 Py_INCREF(self);
2729 2729 return (PyObject *)self;
2730 2730 }
2731 2731
2732 2732 static void _index_clearcaches(indexObject *self)
2733 2733 {
2734 2734 if (self->cache) {
2735 2735 Py_ssize_t i;
2736 2736
2737 2737 for (i = 0; i < self->raw_length; i++)
2738 2738 Py_CLEAR(self->cache[i]);
2739 2739 free(self->cache);
2740 2740 self->cache = NULL;
2741 2741 }
2742 2742 if (self->offsets) {
2743 2743 PyMem_Free((void *)self->offsets);
2744 2744 self->offsets = NULL;
2745 2745 }
2746 2746 if (self->ntinitialized) {
2747 2747 nt_dealloc(&self->nt);
2748 2748 }
2749 2749 self->ntinitialized = 0;
2750 2750 Py_CLEAR(self->headrevs);
2751 2751 }
2752 2752
2753 2753 static PyObject *index_clearcaches(indexObject *self)
2754 2754 {
2755 2755 _index_clearcaches(self);
2756 2756 self->ntrev = -1;
2757 2757 self->ntlookups = self->ntmisses = 0;
2758 2758 Py_RETURN_NONE;
2759 2759 }
2760 2760
2761 2761 static void index_dealloc(indexObject *self)
2762 2762 {
2763 2763 _index_clearcaches(self);
2764 2764 Py_XDECREF(self->filteredrevs);
2765 2765 if (self->buf.buf) {
2766 2766 PyBuffer_Release(&self->buf);
2767 2767 memset(&self->buf, 0, sizeof(self->buf));
2768 2768 }
2769 2769 Py_XDECREF(self->data);
2770 2770 Py_XDECREF(self->added);
2771 2771 PyObject_Del(self);
2772 2772 }
2773 2773
2774 2774 static PySequenceMethods index_sequence_methods = {
2775 2775 (lenfunc)index_length, /* sq_length */
2776 2776 0, /* sq_concat */
2777 2777 0, /* sq_repeat */
2778 2778 (ssizeargfunc)index_get, /* sq_item */
2779 2779 0, /* sq_slice */
2780 2780 0, /* sq_ass_item */
2781 2781 0, /* sq_ass_slice */
2782 2782 (objobjproc)index_contains, /* sq_contains */
2783 2783 };
2784 2784
2785 2785 static PyMappingMethods index_mapping_methods = {
2786 2786 (lenfunc)index_length, /* mp_length */
2787 2787 (binaryfunc)index_getitem, /* mp_subscript */
2788 2788 (objobjargproc)index_assign_subscript, /* mp_ass_subscript */
2789 2789 };
2790 2790
2791 2791 static PyMethodDef index_methods[] = {
2792 2792 {"ancestors", (PyCFunction)index_ancestors, METH_VARARGS,
2793 2793 "return the gca set of the given revs"},
2794 2794 {"commonancestorsheads", (PyCFunction)index_commonancestorsheads,
2795 2795 METH_VARARGS,
2796 2796 "return the heads of the common ancestors of the given revs"},
2797 2797 {"clearcaches", (PyCFunction)index_clearcaches, METH_NOARGS,
2798 2798 "clear the index caches"},
2799 2799 {"get", (PyCFunction)index_m_get, METH_VARARGS, "get an index entry"},
2800 2800 {"get_rev", (PyCFunction)index_m_get, METH_VARARGS,
2801 2801 "return `rev` associated with a node or None"},
2802 2802 {"has_node", (PyCFunction)index_m_has_node, METH_O,
2803 2803 "return True if the node exist in the index"},
2804 2804 {"rev", (PyCFunction)index_m_rev, METH_O,
2805 2805 "return `rev` associated with a node or raise RevlogError"},
2806 2806 {"computephasesmapsets", (PyCFunction)compute_phases_map_sets, METH_VARARGS,
2807 2807 "compute phases"},
2808 2808 {"reachableroots2", (PyCFunction)reachableroots2, METH_VARARGS,
2809 2809 "reachableroots"},
2810 2810 {"headrevs", (PyCFunction)index_headrevs, METH_VARARGS,
2811 2811 "get head revisions"}, /* Can do filtering since 3.2 */
2812 2812 {"headrevsfiltered", (PyCFunction)index_headrevs, METH_VARARGS,
2813 2813 "get filtered head revisions"}, /* Can always do filtering */
2814 2814 {"issnapshot", (PyCFunction)index_issnapshot, METH_O,
2815 2815 "True if the object is a snapshot"},
2816 2816 {"findsnapshots", (PyCFunction)index_findsnapshots, METH_VARARGS,
2817 2817 "Gather snapshot data in a cache dict"},
2818 2818 {"deltachain", (PyCFunction)index_deltachain, METH_VARARGS,
2819 2819 "determine revisions with deltas to reconstruct fulltext"},
2820 2820 {"slicechunktodensity", (PyCFunction)index_slicechunktodensity,
2821 2821 METH_VARARGS, "determine revisions with deltas to reconstruct fulltext"},
2822 2822 {"append", (PyCFunction)index_append, METH_O, "append an index entry"},
2823 2823 {"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS,
2824 2824 "match a potentially ambiguous node ID"},
2825 2825 {"shortest", (PyCFunction)index_shortest, METH_VARARGS,
2826 2826 "find length of shortest hex nodeid of a binary ID"},
2827 2827 {"stats", (PyCFunction)index_stats, METH_NOARGS, "stats for the index"},
2828 2828 {NULL} /* Sentinel */
2829 2829 };
2830 2830
2831 2831 static PyGetSetDef index_getset[] = {
2832 2832 {"nodemap", (getter)index_nodemap, NULL, "nodemap", NULL},
2833 2833 {NULL} /* Sentinel */
2834 2834 };
2835 2835
2836 2836 PyTypeObject HgRevlogIndex_Type = {
2837 2837 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2838 2838 "parsers.index", /* tp_name */
2839 2839 sizeof(indexObject), /* tp_basicsize */
2840 2840 0, /* tp_itemsize */
2841 2841 (destructor)index_dealloc, /* tp_dealloc */
2842 2842 0, /* tp_print */
2843 2843 0, /* tp_getattr */
2844 2844 0, /* tp_setattr */
2845 2845 0, /* tp_compare */
2846 2846 0, /* tp_repr */
2847 2847 0, /* tp_as_number */
2848 2848 &index_sequence_methods, /* tp_as_sequence */
2849 2849 &index_mapping_methods, /* tp_as_mapping */
2850 2850 0, /* tp_hash */
2851 2851 0, /* tp_call */
2852 2852 0, /* tp_str */
2853 2853 0, /* tp_getattro */
2854 2854 0, /* tp_setattro */
2855 2855 0, /* tp_as_buffer */
2856 2856 Py_TPFLAGS_DEFAULT, /* tp_flags */
2857 2857 "revlog index", /* tp_doc */
2858 2858 0, /* tp_traverse */
2859 2859 0, /* tp_clear */
2860 2860 0, /* tp_richcompare */
2861 2861 0, /* tp_weaklistoffset */
2862 2862 0, /* tp_iter */
2863 2863 0, /* tp_iternext */
2864 2864 index_methods, /* tp_methods */
2865 2865 0, /* tp_members */
2866 2866 index_getset, /* tp_getset */
2867 2867 0, /* tp_base */
2868 2868 0, /* tp_dict */
2869 2869 0, /* tp_descr_get */
2870 2870 0, /* tp_descr_set */
2871 2871 0, /* tp_dictoffset */
2872 2872 (initproc)index_init, /* tp_init */
2873 2873 0, /* tp_alloc */
2874 2874 };
2875 2875
2876 2876 /*
2877 2877 * returns a tuple of the form (index, index, cache) with elements as
2878 2878 * follows:
2879 2879 *
2880 2880 * index: an index object that lazily parses RevlogNG records
2881 2881 * cache: if data is inlined, a tuple (0, index_file_content), else None
2882 2882 * index_file_content could be a string, or a buffer
2883 2883 *
2884 2884 * added complications are for backwards compatibility
2885 2885 */
2886 2886 PyObject *parse_index2(PyObject *self, PyObject *args)
2887 2887 {
2888 PyObject *tuple = NULL, *cache = NULL;
2888 PyObject *cache = NULL;
2889 2889 indexObject *idx;
2890 2890 int ret;
2891 2891
2892 2892 idx = PyObject_New(indexObject, &HgRevlogIndex_Type);
2893 2893 if (idx == NULL)
2894 2894 goto bail;
2895 2895
2896 2896 ret = index_init(idx, args);
2897 2897 if (ret == -1)
2898 2898 goto bail;
2899 2899
2900 2900 if (idx->inlined) {
2901 2901 cache = Py_BuildValue("iO", 0, idx->data);
2902 2902 if (cache == NULL)
2903 2903 goto bail;
2904 2904 } else {
2905 2905 cache = Py_None;
2906 2906 Py_INCREF(cache);
2907 2907 }
2908 2908
2909 tuple = Py_BuildValue("NN", idx, cache);
2910 if (!tuple)
2911 goto bail;
2912 return tuple;
2909 return Py_BuildValue("NN", idx, cache);
2913 2910
2914 2911 bail:
2915 2912 Py_XDECREF(idx);
2916 2913 Py_XDECREF(cache);
2917 Py_XDECREF(tuple);
2918 2914 return NULL;
2919 2915 }
2920 2916
2921 2917 static Revlog_CAPI CAPI = {
2922 2918 /* increment the abi_version field upon each change in the Revlog_CAPI
2923 2919 struct or in the ABI of the listed functions */
2924 2920 2,
2925 2921 index_length,
2926 2922 index_node,
2927 2923 HgRevlogIndex_GetParents,
2928 2924 };
2929 2925
2930 2926 void revlog_module_init(PyObject *mod)
2931 2927 {
2932 2928 PyObject *caps = NULL;
2933 2929 HgRevlogIndex_Type.tp_new = PyType_GenericNew;
2934 2930 if (PyType_Ready(&HgRevlogIndex_Type) < 0)
2935 2931 return;
2936 2932 Py_INCREF(&HgRevlogIndex_Type);
2937 2933 PyModule_AddObject(mod, "index", (PyObject *)&HgRevlogIndex_Type);
2938 2934
2939 2935 nodetreeType.tp_new = PyType_GenericNew;
2940 2936 if (PyType_Ready(&nodetreeType) < 0)
2941 2937 return;
2942 2938 Py_INCREF(&nodetreeType);
2943 2939 PyModule_AddObject(mod, "nodetree", (PyObject *)&nodetreeType);
2944 2940
2945 2941 if (!nullentry) {
2946 2942 nullentry =
2947 2943 Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0, 0, -1,
2948 2944 -1, -1, -1, nullid, (Py_ssize_t)20);
2949 2945 }
2950 2946 if (nullentry)
2951 2947 PyObject_GC_UnTrack(nullentry);
2952 2948
2953 2949 caps = PyCapsule_New(&CAPI, "mercurial.cext.parsers.revlog_CAPI", NULL);
2954 2950 if (caps != NULL)
2955 2951 PyModule_AddObject(mod, "revlog_CAPI", caps);
2956 2952 }
General Comments 0
You need to be logged in to leave comments. Login now