##// END OF EJS Templates
revlog: don't cache parsed tuples in the C module...
Joerg Sonnenberger -
r46413:4404f129 default
parent child Browse files
Show More
@@ -1,2954 +1,2917 b''
1 1 /*
2 2 parsers.c - efficient content parsing
3 3
4 4 Copyright 2008 Matt Mackall <mpm@selenic.com> and others
5 5
6 6 This software may be used and distributed according to the terms of
7 7 the GNU General Public License, incorporated herein by reference.
8 8 */
9 9
10 10 #define PY_SSIZE_T_CLEAN
11 11 #include <Python.h>
12 12 #include <assert.h>
13 13 #include <ctype.h>
14 14 #include <limits.h>
15 15 #include <stddef.h>
16 16 #include <stdlib.h>
17 17 #include <string.h>
18 18
19 19 #include "bitmanipulation.h"
20 20 #include "charencode.h"
21 21 #include "revlog.h"
22 22 #include "util.h"
23 23
24 24 #ifdef IS_PY3K
25 25 /* The mapping of Python types is meant to be temporary to get Python
26 26 * 3 to compile. We should remove this once Python 3 support is fully
27 27 * supported and proper types are used in the extensions themselves. */
28 28 #define PyInt_Check PyLong_Check
29 29 #define PyInt_FromLong PyLong_FromLong
30 30 #define PyInt_FromSsize_t PyLong_FromSsize_t
31 31 #define PyInt_AsLong PyLong_AsLong
32 32 #endif
33 33
34 34 typedef struct indexObjectStruct indexObject;
35 35
36 36 typedef struct {
37 37 int children[16];
38 38 } nodetreenode;
39 39
40 40 typedef struct {
41 41 int abi_version;
42 42 Py_ssize_t (*index_length)(const indexObject *);
43 43 const char *(*index_node)(indexObject *, Py_ssize_t);
44 44 int (*index_parents)(PyObject *, int, int *);
45 45 } Revlog_CAPI;
46 46
47 47 /*
48 48 * A base-16 trie for fast node->rev mapping.
49 49 *
50 50 * Positive value is index of the next node in the trie
51 51 * Negative value is a leaf: -(rev + 2)
52 52 * Zero is empty
53 53 */
54 54 typedef struct {
55 55 indexObject *index;
56 56 nodetreenode *nodes;
57 57 unsigned length; /* # nodes in use */
58 58 unsigned capacity; /* # nodes allocated */
59 59 int depth; /* maximum depth of tree */
60 60 int splits; /* # splits performed */
61 61 } nodetree;
62 62
63 63 typedef struct {
64 64 PyObject_HEAD /* ; */
65 65 nodetree nt;
66 66 } nodetreeObject;
67 67
68 68 /*
69 69 * This class has two behaviors.
70 70 *
71 71 * When used in a list-like way (with integer keys), we decode an
72 72 * entry in a RevlogNG index file on demand. We have limited support for
73 73 * integer-keyed insert and delete, only at elements right before the
74 74 * end.
75 75 *
76 76 * With string keys, we lazily perform a reverse mapping from node to
77 77 * rev, using a base-16 trie.
78 78 */
79 79 struct indexObjectStruct {
80 80 PyObject_HEAD
81 81 /* Type-specific fields go here. */
82 82 PyObject *data; /* raw bytes of index */
83 83 Py_buffer buf; /* buffer of data */
84 PyObject **cache; /* cached tuples */
85 84 const char **offsets; /* populated on demand */
86 85 Py_ssize_t raw_length; /* original number of elements */
87 86 Py_ssize_t length; /* current number of elements */
88 87 PyObject *added; /* populated on demand */
89 88 PyObject *headrevs; /* cache, invalidated on changes */
90 89 PyObject *filteredrevs; /* filtered revs set */
91 90 nodetree nt; /* base-16 trie */
92 91 int ntinitialized; /* 0 or 1 */
93 92 int ntrev; /* last rev scanned */
94 93 int ntlookups; /* # lookups */
95 94 int ntmisses; /* # lookups that miss the cache */
96 95 int inlined;
97 96 };
98 97
99 98 static Py_ssize_t index_length(const indexObject *self)
100 99 {
101 100 if (self->added == NULL)
102 101 return self->length;
103 102 return self->length + PyList_GET_SIZE(self->added);
104 103 }
105 104
106 105 static PyObject *nullentry = NULL;
107 106 static const char nullid[20] = {0};
108 107 static const Py_ssize_t nullrev = -1;
109 108
110 109 static Py_ssize_t inline_scan(indexObject *self, const char **offsets);
111 110
112 111 static int index_find_node(indexObject *self, const char *node,
113 112 Py_ssize_t nodelen);
114 113
115 114 #if LONG_MAX == 0x7fffffffL
116 115 static const char *const tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
117 116 #else
118 117 static const char *const tuple_format = PY23("kiiiiiis#", "kiiiiiiy#");
119 118 #endif
120 119
121 120 /* A RevlogNG v1 index entry is 64 bytes long. */
122 121 static const long v1_hdrsize = 64;
123 122
124 123 static void raise_revlog_error(void)
125 124 {
126 125 PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
127 126
128 127 mod = PyImport_ImportModule("mercurial.error");
129 128 if (mod == NULL) {
130 129 goto cleanup;
131 130 }
132 131
133 132 dict = PyModule_GetDict(mod);
134 133 if (dict == NULL) {
135 134 goto cleanup;
136 135 }
137 136 Py_INCREF(dict);
138 137
139 138 errclass = PyDict_GetItemString(dict, "RevlogError");
140 139 if (errclass == NULL) {
141 140 PyErr_SetString(PyExc_SystemError,
142 141 "could not find RevlogError");
143 142 goto cleanup;
144 143 }
145 144
146 145 /* value of exception is ignored by callers */
147 146 PyErr_SetString(errclass, "RevlogError");
148 147
149 148 cleanup:
150 149 Py_XDECREF(dict);
151 150 Py_XDECREF(mod);
152 151 }
153 152
154 153 /*
155 154 * Return a pointer to the beginning of a RevlogNG record.
156 155 */
157 156 static const char *index_deref(indexObject *self, Py_ssize_t pos)
158 157 {
159 158 if (self->inlined && pos > 0) {
160 159 if (self->offsets == NULL) {
161 160 Py_ssize_t ret;
162 161 self->offsets = PyMem_Malloc(self->raw_length *
163 162 sizeof(*self->offsets));
164 163 if (self->offsets == NULL)
165 164 return (const char *)PyErr_NoMemory();
166 165 ret = inline_scan(self, self->offsets);
167 166 if (ret == -1) {
168 167 return NULL;
169 168 };
170 169 }
171 170 return self->offsets[pos];
172 171 }
173 172
174 173 return (const char *)(self->buf.buf) + pos * v1_hdrsize;
175 174 }
176 175
177 176 /*
178 177 * Get parents of the given rev.
179 178 *
180 179 * The specified rev must be valid and must not be nullrev. A returned
181 180 * parent revision may be nullrev, but is guaranteed to be in valid range.
182 181 */
183 182 static inline int index_get_parents(indexObject *self, Py_ssize_t rev, int *ps,
184 183 int maxrev)
185 184 {
186 185 if (rev >= self->length) {
187 186 long tmp;
188 187 PyObject *tuple =
189 188 PyList_GET_ITEM(self->added, rev - self->length);
190 189 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 5), &tmp)) {
191 190 return -1;
192 191 }
193 192 ps[0] = (int)tmp;
194 193 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 6), &tmp)) {
195 194 return -1;
196 195 }
197 196 ps[1] = (int)tmp;
198 197 } else {
199 198 const char *data = index_deref(self, rev);
200 199 ps[0] = getbe32(data + 24);
201 200 ps[1] = getbe32(data + 28);
202 201 }
203 202 /* If index file is corrupted, ps[] may point to invalid revisions. So
204 203 * there is a risk of buffer overflow to trust them unconditionally. */
205 204 if (ps[0] < -1 || ps[0] > maxrev || ps[1] < -1 || ps[1] > maxrev) {
206 205 PyErr_SetString(PyExc_ValueError, "parent out of range");
207 206 return -1;
208 207 }
209 208 return 0;
210 209 }
211 210
212 211 /*
213 212 * Get parents of the given rev.
214 213 *
215 214 * If the specified rev is out of range, IndexError will be raised. If the
216 215 * revlog entry is corrupted, ValueError may be raised.
217 216 *
218 217 * Returns 0 on success or -1 on failure.
219 218 */
220 219 static int HgRevlogIndex_GetParents(PyObject *op, int rev, int *ps)
221 220 {
222 221 int tiprev;
223 222 if (!op || !HgRevlogIndex_Check(op) || !ps) {
224 223 PyErr_BadInternalCall();
225 224 return -1;
226 225 }
227 226 tiprev = (int)index_length((indexObject *)op) - 1;
228 227 if (rev < -1 || rev > tiprev) {
229 228 PyErr_Format(PyExc_IndexError, "rev out of range: %d", rev);
230 229 return -1;
231 230 } else if (rev == -1) {
232 231 ps[0] = ps[1] = -1;
233 232 return 0;
234 233 } else {
235 234 return index_get_parents((indexObject *)op, rev, ps, tiprev);
236 235 }
237 236 }
238 237
239 238 static inline int64_t index_get_start(indexObject *self, Py_ssize_t rev)
240 239 {
241 240 uint64_t offset;
242 241 if (rev == nullrev) {
243 242 return 0;
244 243 }
245 244 if (rev >= self->length) {
246 245 PyObject *tuple;
247 246 PyObject *pylong;
248 247 PY_LONG_LONG tmp;
249 248 tuple = PyList_GET_ITEM(self->added, rev - self->length);
250 249 pylong = PyTuple_GET_ITEM(tuple, 0);
251 250 tmp = PyLong_AsLongLong(pylong);
252 251 if (tmp == -1 && PyErr_Occurred()) {
253 252 return -1;
254 253 }
255 254 if (tmp < 0) {
256 255 PyErr_Format(PyExc_OverflowError,
257 256 "revlog entry size out of bound (%lld)",
258 257 (long long)tmp);
259 258 return -1;
260 259 }
261 260 offset = (uint64_t)tmp;
262 261 } else {
263 262 const char *data = index_deref(self, rev);
264 263 offset = getbe32(data + 4);
265 264 if (rev == 0) {
266 265 /* mask out version number for the first entry */
267 266 offset &= 0xFFFF;
268 267 } else {
269 268 uint32_t offset_high = getbe32(data);
270 269 offset |= ((uint64_t)offset_high) << 32;
271 270 }
272 271 }
273 272 return (int64_t)(offset >> 16);
274 273 }
275 274
276 275 static inline int index_get_length(indexObject *self, Py_ssize_t rev)
277 276 {
278 277 if (rev == nullrev) {
279 278 return 0;
280 279 }
281 280 if (rev >= self->length) {
282 281 PyObject *tuple;
283 282 PyObject *pylong;
284 283 long ret;
285 284 tuple = PyList_GET_ITEM(self->added, rev - self->length);
286 285 pylong = PyTuple_GET_ITEM(tuple, 1);
287 286 ret = PyInt_AsLong(pylong);
288 287 if (ret == -1 && PyErr_Occurred()) {
289 288 return -1;
290 289 }
291 290 if (ret < 0 || ret > (long)INT_MAX) {
292 291 PyErr_Format(PyExc_OverflowError,
293 292 "revlog entry size out of bound (%ld)",
294 293 ret);
295 294 return -1;
296 295 }
297 296 return (int)ret;
298 297 } else {
299 298 const char *data = index_deref(self, rev);
300 299 int tmp = (int)getbe32(data + 8);
301 300 if (tmp < 0) {
302 301 PyErr_Format(PyExc_OverflowError,
303 302 "revlog entry size out of bound (%d)",
304 303 tmp);
305 304 return -1;
306 305 }
307 306 return tmp;
308 307 }
309 308 }
310 309
311 310 /*
312 311 * RevlogNG format (all in big endian, data may be inlined):
313 312 * 6 bytes: offset
314 313 * 2 bytes: flags
315 314 * 4 bytes: compressed length
316 315 * 4 bytes: uncompressed length
317 316 * 4 bytes: base revision
318 317 * 4 bytes: link revision
319 318 * 4 bytes: parent 1 revision
320 319 * 4 bytes: parent 2 revision
321 320 * 32 bytes: nodeid (only 20 bytes used)
322 321 */
323 322 static PyObject *index_get(indexObject *self, Py_ssize_t pos)
324 323 {
325 324 uint64_t offset_flags;
326 325 int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
327 326 const char *c_node_id;
328 327 const char *data;
329 328 Py_ssize_t length = index_length(self);
330 PyObject *entry;
331 329
332 330 if (pos == nullrev) {
333 331 Py_INCREF(nullentry);
334 332 return nullentry;
335 333 }
336 334
337 335 if (pos < 0 || pos >= length) {
338 336 PyErr_SetString(PyExc_IndexError, "revlog index out of range");
339 337 return NULL;
340 338 }
341 339
342 340 if (pos >= self->length) {
343 341 PyObject *obj;
344 342 obj = PyList_GET_ITEM(self->added, pos - self->length);
345 343 Py_INCREF(obj);
346 344 return obj;
347 345 }
348 346
349 if (self->cache) {
350 if (self->cache[pos]) {
351 Py_INCREF(self->cache[pos]);
352 return self->cache[pos];
353 }
354 } else {
355 self->cache = calloc(self->raw_length, sizeof(PyObject *));
356 if (self->cache == NULL)
357 return PyErr_NoMemory();
358 }
359
360 347 data = index_deref(self, pos);
361 348 if (data == NULL)
362 349 return NULL;
363 350
364 351 offset_flags = getbe32(data + 4);
365 352 if (pos == 0) /* mask out version number for the first entry */
366 353 offset_flags &= 0xFFFF;
367 354 else {
368 355 uint32_t offset_high = getbe32(data);
369 356 offset_flags |= ((uint64_t)offset_high) << 32;
370 357 }
371 358
372 359 comp_len = getbe32(data + 8);
373 360 uncomp_len = getbe32(data + 12);
374 361 base_rev = getbe32(data + 16);
375 362 link_rev = getbe32(data + 20);
376 363 parent_1 = getbe32(data + 24);
377 364 parent_2 = getbe32(data + 28);
378 365 c_node_id = data + 32;
379 366
380 entry = Py_BuildValue(tuple_format, offset_flags, comp_len, uncomp_len,
381 base_rev, link_rev, parent_1, parent_2, c_node_id,
382 (Py_ssize_t)20);
383
384 if (entry) {
385 PyObject_GC_UnTrack(entry);
386 Py_INCREF(entry);
387 }
388
389 self->cache[pos] = entry;
390
391 return entry;
367 return Py_BuildValue(tuple_format, offset_flags, comp_len, uncomp_len,
368 base_rev, link_rev, parent_1, parent_2, c_node_id,
369 (Py_ssize_t)20);
392 370 }
393 371
394 372 /*
395 373 * Return the 20-byte SHA of the node corresponding to the given rev.
396 374 */
397 375 static const char *index_node(indexObject *self, Py_ssize_t pos)
398 376 {
399 377 Py_ssize_t length = index_length(self);
400 378 const char *data;
401 379
402 380 if (pos == nullrev)
403 381 return nullid;
404 382
405 383 if (pos >= length)
406 384 return NULL;
407 385
408 386 if (pos >= self->length) {
409 387 PyObject *tuple, *str;
410 388 tuple = PyList_GET_ITEM(self->added, pos - self->length);
411 389 str = PyTuple_GetItem(tuple, 7);
412 390 return str ? PyBytes_AS_STRING(str) : NULL;
413 391 }
414 392
415 393 data = index_deref(self, pos);
416 394 return data ? data + 32 : NULL;
417 395 }
418 396
419 397 /*
420 398 * Return the 20-byte SHA of the node corresponding to the given rev. The
421 399 * rev is assumed to be existing. If not, an exception is set.
422 400 */
423 401 static const char *index_node_existing(indexObject *self, Py_ssize_t pos)
424 402 {
425 403 const char *node = index_node(self, pos);
426 404 if (node == NULL) {
427 405 PyErr_Format(PyExc_IndexError, "could not access rev %d",
428 406 (int)pos);
429 407 }
430 408 return node;
431 409 }
432 410
433 411 static int nt_insert(nodetree *self, const char *node, int rev);
434 412
435 413 static int node_check(PyObject *obj, char **node)
436 414 {
437 415 Py_ssize_t nodelen;
438 416 if (PyBytes_AsStringAndSize(obj, node, &nodelen) == -1)
439 417 return -1;
440 418 if (nodelen == 20)
441 419 return 0;
442 420 PyErr_SetString(PyExc_ValueError, "20-byte hash required");
443 421 return -1;
444 422 }
445 423
446 424 static PyObject *index_append(indexObject *self, PyObject *obj)
447 425 {
448 426 char *node;
449 427 Py_ssize_t len;
450 428
451 429 if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj) != 8) {
452 430 PyErr_SetString(PyExc_TypeError, "8-tuple required");
453 431 return NULL;
454 432 }
455 433
456 434 if (node_check(PyTuple_GET_ITEM(obj, 7), &node) == -1)
457 435 return NULL;
458 436
459 437 len = index_length(self);
460 438
461 439 if (self->added == NULL) {
462 440 self->added = PyList_New(0);
463 441 if (self->added == NULL)
464 442 return NULL;
465 443 }
466 444
467 445 if (PyList_Append(self->added, obj) == -1)
468 446 return NULL;
469 447
470 448 if (self->ntinitialized)
471 449 nt_insert(&self->nt, node, (int)len);
472 450
473 451 Py_CLEAR(self->headrevs);
474 452 Py_RETURN_NONE;
475 453 }
476 454
477 455 static PyObject *index_stats(indexObject *self)
478 456 {
479 457 PyObject *obj = PyDict_New();
480 458 PyObject *s = NULL;
481 459 PyObject *t = NULL;
482 460
483 461 if (obj == NULL)
484 462 return NULL;
485 463
486 464 #define istat(__n, __d) \
487 465 do { \
488 466 s = PyBytes_FromString(__d); \
489 467 t = PyInt_FromSsize_t(self->__n); \
490 468 if (!s || !t) \
491 469 goto bail; \
492 470 if (PyDict_SetItem(obj, s, t) == -1) \
493 471 goto bail; \
494 472 Py_CLEAR(s); \
495 473 Py_CLEAR(t); \
496 474 } while (0)
497 475
498 476 if (self->added) {
499 477 Py_ssize_t len = PyList_GET_SIZE(self->added);
500 478 s = PyBytes_FromString("index entries added");
501 479 t = PyInt_FromSsize_t(len);
502 480 if (!s || !t)
503 481 goto bail;
504 482 if (PyDict_SetItem(obj, s, t) == -1)
505 483 goto bail;
506 484 Py_CLEAR(s);
507 485 Py_CLEAR(t);
508 486 }
509 487
510 488 if (self->raw_length != self->length)
511 489 istat(raw_length, "revs on disk");
512 490 istat(length, "revs in memory");
513 491 istat(ntlookups, "node trie lookups");
514 492 istat(ntmisses, "node trie misses");
515 493 istat(ntrev, "node trie last rev scanned");
516 494 if (self->ntinitialized) {
517 495 istat(nt.capacity, "node trie capacity");
518 496 istat(nt.depth, "node trie depth");
519 497 istat(nt.length, "node trie count");
520 498 istat(nt.splits, "node trie splits");
521 499 }
522 500
523 501 #undef istat
524 502
525 503 return obj;
526 504
527 505 bail:
528 506 Py_XDECREF(obj);
529 507 Py_XDECREF(s);
530 508 Py_XDECREF(t);
531 509 return NULL;
532 510 }
533 511
534 512 /*
535 513 * When we cache a list, we want to be sure the caller can't mutate
536 514 * the cached copy.
537 515 */
538 516 static PyObject *list_copy(PyObject *list)
539 517 {
540 518 Py_ssize_t len = PyList_GET_SIZE(list);
541 519 PyObject *newlist = PyList_New(len);
542 520 Py_ssize_t i;
543 521
544 522 if (newlist == NULL)
545 523 return NULL;
546 524
547 525 for (i = 0; i < len; i++) {
548 526 PyObject *obj = PyList_GET_ITEM(list, i);
549 527 Py_INCREF(obj);
550 528 PyList_SET_ITEM(newlist, i, obj);
551 529 }
552 530
553 531 return newlist;
554 532 }
555 533
556 534 static int check_filter(PyObject *filter, Py_ssize_t arg)
557 535 {
558 536 if (filter) {
559 537 PyObject *arglist, *result;
560 538 int isfiltered;
561 539
562 540 arglist = Py_BuildValue("(n)", arg);
563 541 if (!arglist) {
564 542 return -1;
565 543 }
566 544
567 545 result = PyEval_CallObject(filter, arglist);
568 546 Py_DECREF(arglist);
569 547 if (!result) {
570 548 return -1;
571 549 }
572 550
573 551 /* PyObject_IsTrue returns 1 if true, 0 if false, -1 if error,
574 552 * same as this function, so we can just return it directly.*/
575 553 isfiltered = PyObject_IsTrue(result);
576 554 Py_DECREF(result);
577 555 return isfiltered;
578 556 } else {
579 557 return 0;
580 558 }
581 559 }
582 560
583 561 static inline void set_phase_from_parents(char *phases, int parent_1,
584 562 int parent_2, Py_ssize_t i)
585 563 {
586 564 if (parent_1 >= 0 && phases[parent_1] > phases[i])
587 565 phases[i] = phases[parent_1];
588 566 if (parent_2 >= 0 && phases[parent_2] > phases[i])
589 567 phases[i] = phases[parent_2];
590 568 }
591 569
592 570 static PyObject *reachableroots2(indexObject *self, PyObject *args)
593 571 {
594 572
595 573 /* Input */
596 574 long minroot;
597 575 PyObject *includepatharg = NULL;
598 576 int includepath = 0;
599 577 /* heads and roots are lists */
600 578 PyObject *heads = NULL;
601 579 PyObject *roots = NULL;
602 580 PyObject *reachable = NULL;
603 581
604 582 PyObject *val;
605 583 Py_ssize_t len = index_length(self);
606 584 long revnum;
607 585 Py_ssize_t k;
608 586 Py_ssize_t i;
609 587 Py_ssize_t l;
610 588 int r;
611 589 int parents[2];
612 590
613 591 /* Internal data structure:
614 592 * tovisit: array of length len+1 (all revs + nullrev), filled upto
615 593 * lentovisit
616 594 *
617 595 * revstates: array of length len+1 (all revs + nullrev) */
618 596 int *tovisit = NULL;
619 597 long lentovisit = 0;
620 598 enum { RS_SEEN = 1, RS_ROOT = 2, RS_REACHABLE = 4 };
621 599 char *revstates = NULL;
622 600
623 601 /* Get arguments */
624 602 if (!PyArg_ParseTuple(args, "lO!O!O!", &minroot, &PyList_Type, &heads,
625 603 &PyList_Type, &roots, &PyBool_Type,
626 604 &includepatharg))
627 605 goto bail;
628 606
629 607 if (includepatharg == Py_True)
630 608 includepath = 1;
631 609
632 610 /* Initialize return set */
633 611 reachable = PyList_New(0);
634 612 if (reachable == NULL)
635 613 goto bail;
636 614
637 615 /* Initialize internal datastructures */
638 616 tovisit = (int *)malloc((len + 1) * sizeof(int));
639 617 if (tovisit == NULL) {
640 618 PyErr_NoMemory();
641 619 goto bail;
642 620 }
643 621
644 622 revstates = (char *)calloc(len + 1, 1);
645 623 if (revstates == NULL) {
646 624 PyErr_NoMemory();
647 625 goto bail;
648 626 }
649 627
650 628 l = PyList_GET_SIZE(roots);
651 629 for (i = 0; i < l; i++) {
652 630 revnum = PyInt_AsLong(PyList_GET_ITEM(roots, i));
653 631 if (revnum == -1 && PyErr_Occurred())
654 632 goto bail;
655 633 /* If root is out of range, e.g. wdir(), it must be unreachable
656 634 * from heads. So we can just ignore it. */
657 635 if (revnum + 1 < 0 || revnum + 1 >= len + 1)
658 636 continue;
659 637 revstates[revnum + 1] |= RS_ROOT;
660 638 }
661 639
662 640 /* Populate tovisit with all the heads */
663 641 l = PyList_GET_SIZE(heads);
664 642 for (i = 0; i < l; i++) {
665 643 revnum = PyInt_AsLong(PyList_GET_ITEM(heads, i));
666 644 if (revnum == -1 && PyErr_Occurred())
667 645 goto bail;
668 646 if (revnum + 1 < 0 || revnum + 1 >= len + 1) {
669 647 PyErr_SetString(PyExc_IndexError, "head out of range");
670 648 goto bail;
671 649 }
672 650 if (!(revstates[revnum + 1] & RS_SEEN)) {
673 651 tovisit[lentovisit++] = (int)revnum;
674 652 revstates[revnum + 1] |= RS_SEEN;
675 653 }
676 654 }
677 655
678 656 /* Visit the tovisit list and find the reachable roots */
679 657 k = 0;
680 658 while (k < lentovisit) {
681 659 /* Add the node to reachable if it is a root*/
682 660 revnum = tovisit[k++];
683 661 if (revstates[revnum + 1] & RS_ROOT) {
684 662 revstates[revnum + 1] |= RS_REACHABLE;
685 663 val = PyInt_FromLong(revnum);
686 664 if (val == NULL)
687 665 goto bail;
688 666 r = PyList_Append(reachable, val);
689 667 Py_DECREF(val);
690 668 if (r < 0)
691 669 goto bail;
692 670 if (includepath == 0)
693 671 continue;
694 672 }
695 673
696 674 /* Add its parents to the list of nodes to visit */
697 675 if (revnum == nullrev)
698 676 continue;
699 677 r = index_get_parents(self, revnum, parents, (int)len - 1);
700 678 if (r < 0)
701 679 goto bail;
702 680 for (i = 0; i < 2; i++) {
703 681 if (!(revstates[parents[i] + 1] & RS_SEEN) &&
704 682 parents[i] >= minroot) {
705 683 tovisit[lentovisit++] = parents[i];
706 684 revstates[parents[i] + 1] |= RS_SEEN;
707 685 }
708 686 }
709 687 }
710 688
711 689 /* Find all the nodes in between the roots we found and the heads
712 690 * and add them to the reachable set */
713 691 if (includepath == 1) {
714 692 long minidx = minroot;
715 693 if (minidx < 0)
716 694 minidx = 0;
717 695 for (i = minidx; i < len; i++) {
718 696 if (!(revstates[i + 1] & RS_SEEN))
719 697 continue;
720 698 r = index_get_parents(self, i, parents, (int)len - 1);
721 699 /* Corrupted index file, error is set from
722 700 * index_get_parents */
723 701 if (r < 0)
724 702 goto bail;
725 703 if (((revstates[parents[0] + 1] |
726 704 revstates[parents[1] + 1]) &
727 705 RS_REACHABLE) &&
728 706 !(revstates[i + 1] & RS_REACHABLE)) {
729 707 revstates[i + 1] |= RS_REACHABLE;
730 708 val = PyInt_FromSsize_t(i);
731 709 if (val == NULL)
732 710 goto bail;
733 711 r = PyList_Append(reachable, val);
734 712 Py_DECREF(val);
735 713 if (r < 0)
736 714 goto bail;
737 715 }
738 716 }
739 717 }
740 718
741 719 free(revstates);
742 720 free(tovisit);
743 721 return reachable;
744 722 bail:
745 723 Py_XDECREF(reachable);
746 724 free(revstates);
747 725 free(tovisit);
748 726 return NULL;
749 727 }
750 728
751 729 static int add_roots_get_min(indexObject *self, PyObject *roots, char *phases,
752 730 char phase)
753 731 {
754 732 Py_ssize_t len = index_length(self);
755 733 PyObject *item;
756 734 PyObject *iterator;
757 735 int rev, minrev = -1;
758 736 char *node;
759 737
760 738 if (!PySet_Check(roots)) {
761 739 PyErr_SetString(PyExc_TypeError,
762 740 "roots must be a set of nodes");
763 741 return -2;
764 742 }
765 743 iterator = PyObject_GetIter(roots);
766 744 if (iterator == NULL)
767 745 return -2;
768 746 while ((item = PyIter_Next(iterator))) {
769 747 if (node_check(item, &node) == -1)
770 748 goto failed;
771 749 rev = index_find_node(self, node, 20);
772 750 /* null is implicitly public, so negative is invalid */
773 751 if (rev < 0 || rev >= len)
774 752 goto failed;
775 753 phases[rev] = phase;
776 754 if (minrev == -1 || minrev > rev)
777 755 minrev = rev;
778 756 Py_DECREF(item);
779 757 }
780 758 Py_DECREF(iterator);
781 759 return minrev;
782 760 failed:
783 761 Py_DECREF(iterator);
784 762 Py_DECREF(item);
785 763 return -2;
786 764 }
787 765
788 766 static PyObject *compute_phases_map_sets(indexObject *self, PyObject *args)
789 767 {
790 768 /* 0: public (untracked), 1: draft, 2: secret, 32: archive,
791 769 96: internal */
792 770 static const char trackedphases[] = {1, 2, 32, 96};
793 771 PyObject *roots = Py_None;
794 772 PyObject *phasesetsdict = NULL;
795 773 PyObject *phasesets[4] = {NULL, NULL, NULL, NULL};
796 774 Py_ssize_t len = index_length(self);
797 775 char *phases = NULL;
798 776 int minphaserev = -1, rev, i;
799 777 const int numphases = (int)(sizeof(phasesets) / sizeof(phasesets[0]));
800 778
801 779 if (!PyArg_ParseTuple(args, "O", &roots))
802 780 return NULL;
803 781 if (roots == NULL || !PyDict_Check(roots)) {
804 782 PyErr_SetString(PyExc_TypeError, "roots must be a dictionary");
805 783 return NULL;
806 784 }
807 785
808 786 phases = calloc(len, 1);
809 787 if (phases == NULL) {
810 788 PyErr_NoMemory();
811 789 return NULL;
812 790 }
813 791
814 792 for (i = 0; i < numphases; ++i) {
815 793 PyObject *pyphase = PyInt_FromLong(trackedphases[i]);
816 794 PyObject *phaseroots = NULL;
817 795 if (pyphase == NULL)
818 796 goto release;
819 797 phaseroots = PyDict_GetItem(roots, pyphase);
820 798 Py_DECREF(pyphase);
821 799 if (phaseroots == NULL)
822 800 continue;
823 801 rev = add_roots_get_min(self, phaseroots, phases,
824 802 trackedphases[i]);
825 803 if (rev == -2)
826 804 goto release;
827 805 if (rev != -1 && (minphaserev == -1 || rev < minphaserev))
828 806 minphaserev = rev;
829 807 }
830 808
831 809 for (i = 0; i < numphases; ++i) {
832 810 phasesets[i] = PySet_New(NULL);
833 811 if (phasesets[i] == NULL)
834 812 goto release;
835 813 }
836 814
837 815 if (minphaserev == -1)
838 816 minphaserev = len;
839 817 for (rev = minphaserev; rev < len; ++rev) {
840 818 PyObject *pyphase = NULL;
841 819 PyObject *pyrev = NULL;
842 820 int parents[2];
843 821 /*
844 822 * The parent lookup could be skipped for phaseroots, but
845 823 * phase --force would historically not recompute them
846 824 * correctly, leaving descendents with a lower phase around.
847 825 * As such, unconditionally recompute the phase.
848 826 */
849 827 if (index_get_parents(self, rev, parents, (int)len - 1) < 0)
850 828 goto release;
851 829 set_phase_from_parents(phases, parents[0], parents[1], rev);
852 830 switch (phases[rev]) {
853 831 case 0:
854 832 continue;
855 833 case 1:
856 834 pyphase = phasesets[0];
857 835 break;
858 836 case 2:
859 837 pyphase = phasesets[1];
860 838 break;
861 839 case 32:
862 840 pyphase = phasesets[2];
863 841 break;
864 842 case 96:
865 843 pyphase = phasesets[3];
866 844 break;
867 845 default:
868 846 /* this should never happen since the phase number is
869 847 * specified by this function. */
870 848 PyErr_SetString(PyExc_SystemError,
871 849 "bad phase number in internal list");
872 850 goto release;
873 851 }
874 852 pyrev = PyInt_FromLong(rev);
875 853 if (pyrev == NULL)
876 854 goto release;
877 855 if (PySet_Add(pyphase, pyrev) == -1) {
878 856 Py_DECREF(pyrev);
879 857 goto release;
880 858 }
881 859 Py_DECREF(pyrev);
882 860 }
883 861
884 862 phasesetsdict = _dict_new_presized(numphases);
885 863 if (phasesetsdict == NULL)
886 864 goto release;
887 865 for (i = 0; i < numphases; ++i) {
888 866 PyObject *pyphase = PyInt_FromLong(trackedphases[i]);
889 867 if (pyphase == NULL)
890 868 goto release;
891 869 if (PyDict_SetItem(phasesetsdict, pyphase, phasesets[i]) ==
892 870 -1) {
893 871 Py_DECREF(pyphase);
894 872 goto release;
895 873 }
896 874 Py_DECREF(phasesets[i]);
897 875 phasesets[i] = NULL;
898 876 }
899 877
900 878 return Py_BuildValue("nN", len, phasesetsdict);
901 879
902 880 release:
903 881 for (i = 0; i < numphases; ++i)
904 882 Py_XDECREF(phasesets[i]);
905 883 Py_XDECREF(phasesetsdict);
906 884
907 885 free(phases);
908 886 return NULL;
909 887 }
910 888
911 889 static PyObject *index_headrevs(indexObject *self, PyObject *args)
912 890 {
913 891 Py_ssize_t i, j, len;
914 892 char *nothead = NULL;
915 893 PyObject *heads = NULL;
916 894 PyObject *filter = NULL;
917 895 PyObject *filteredrevs = Py_None;
918 896
919 897 if (!PyArg_ParseTuple(args, "|O", &filteredrevs)) {
920 898 return NULL;
921 899 }
922 900
923 901 if (self->headrevs && filteredrevs == self->filteredrevs)
924 902 return list_copy(self->headrevs);
925 903
926 904 Py_DECREF(self->filteredrevs);
927 905 self->filteredrevs = filteredrevs;
928 906 Py_INCREF(filteredrevs);
929 907
930 908 if (filteredrevs != Py_None) {
931 909 filter = PyObject_GetAttrString(filteredrevs, "__contains__");
932 910 if (!filter) {
933 911 PyErr_SetString(
934 912 PyExc_TypeError,
935 913 "filteredrevs has no attribute __contains__");
936 914 goto bail;
937 915 }
938 916 }
939 917
940 918 len = index_length(self);
941 919 heads = PyList_New(0);
942 920 if (heads == NULL)
943 921 goto bail;
944 922 if (len == 0) {
945 923 PyObject *nullid = PyInt_FromLong(-1);
946 924 if (nullid == NULL || PyList_Append(heads, nullid) == -1) {
947 925 Py_XDECREF(nullid);
948 926 goto bail;
949 927 }
950 928 goto done;
951 929 }
952 930
953 931 nothead = calloc(len, 1);
954 932 if (nothead == NULL) {
955 933 PyErr_NoMemory();
956 934 goto bail;
957 935 }
958 936
959 937 for (i = len - 1; i >= 0; i--) {
960 938 int isfiltered;
961 939 int parents[2];
962 940
963 941 /* If nothead[i] == 1, it means we've seen an unfiltered child
964 942 * of this node already, and therefore this node is not
965 943 * filtered. So we can skip the expensive check_filter step.
966 944 */
967 945 if (nothead[i] != 1) {
968 946 isfiltered = check_filter(filter, i);
969 947 if (isfiltered == -1) {
970 948 PyErr_SetString(PyExc_TypeError,
971 949 "unable to check filter");
972 950 goto bail;
973 951 }
974 952
975 953 if (isfiltered) {
976 954 nothead[i] = 1;
977 955 continue;
978 956 }
979 957 }
980 958
981 959 if (index_get_parents(self, i, parents, (int)len - 1) < 0)
982 960 goto bail;
983 961 for (j = 0; j < 2; j++) {
984 962 if (parents[j] >= 0)
985 963 nothead[parents[j]] = 1;
986 964 }
987 965 }
988 966
989 967 for (i = 0; i < len; i++) {
990 968 PyObject *head;
991 969
992 970 if (nothead[i])
993 971 continue;
994 972 head = PyInt_FromSsize_t(i);
995 973 if (head == NULL || PyList_Append(heads, head) == -1) {
996 974 Py_XDECREF(head);
997 975 goto bail;
998 976 }
999 977 }
1000 978
1001 979 done:
1002 980 self->headrevs = heads;
1003 981 Py_XDECREF(filter);
1004 982 free(nothead);
1005 983 return list_copy(self->headrevs);
1006 984 bail:
1007 985 Py_XDECREF(filter);
1008 986 Py_XDECREF(heads);
1009 987 free(nothead);
1010 988 return NULL;
1011 989 }
1012 990
1013 991 /**
1014 992 * Obtain the base revision index entry.
1015 993 *
1016 994 * Callers must ensure that rev >= 0 or illegal memory access may occur.
1017 995 */
1018 996 static inline int index_baserev(indexObject *self, int rev)
1019 997 {
1020 998 const char *data;
1021 999 int result;
1022 1000
1023 1001 if (rev >= self->length) {
1024 1002 PyObject *tuple =
1025 1003 PyList_GET_ITEM(self->added, rev - self->length);
1026 1004 long ret;
1027 1005 if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 3), &ret)) {
1028 1006 return -2;
1029 1007 }
1030 1008 result = (int)ret;
1031 1009 } else {
1032 1010 data = index_deref(self, rev);
1033 1011 if (data == NULL) {
1034 1012 return -2;
1035 1013 }
1036 1014
1037 1015 result = getbe32(data + 16);
1038 1016 }
1039 1017 if (result > rev) {
1040 1018 PyErr_Format(
1041 1019 PyExc_ValueError,
1042 1020 "corrupted revlog, revision base above revision: %d, %d",
1043 1021 rev, result);
1044 1022 return -2;
1045 1023 }
1046 1024 if (result < -1) {
1047 1025 PyErr_Format(
1048 1026 PyExc_ValueError,
1049 1027 "corrupted revlog, revision base out of range: %d, %d", rev,
1050 1028 result);
1051 1029 return -2;
1052 1030 }
1053 1031 return result;
1054 1032 }
1055 1033
1056 1034 /**
1057 1035 * Find if a revision is a snapshot or not
1058 1036 *
1059 1037 * Only relevant for sparse-revlog case.
1060 1038 * Callers must ensure that rev is in a valid range.
1061 1039 */
1062 1040 static int index_issnapshotrev(indexObject *self, Py_ssize_t rev)
1063 1041 {
1064 1042 int ps[2];
1065 1043 Py_ssize_t base;
1066 1044 while (rev >= 0) {
1067 1045 base = (Py_ssize_t)index_baserev(self, rev);
1068 1046 if (base == rev) {
1069 1047 base = -1;
1070 1048 }
1071 1049 if (base == -2) {
1072 1050 assert(PyErr_Occurred());
1073 1051 return -1;
1074 1052 }
1075 1053 if (base == -1) {
1076 1054 return 1;
1077 1055 }
1078 1056 if (index_get_parents(self, rev, ps, (int)rev) < 0) {
1079 1057 assert(PyErr_Occurred());
1080 1058 return -1;
1081 1059 };
1082 1060 if (base == ps[0] || base == ps[1]) {
1083 1061 return 0;
1084 1062 }
1085 1063 rev = base;
1086 1064 }
1087 1065 return rev == -1;
1088 1066 }
1089 1067
1090 1068 static PyObject *index_issnapshot(indexObject *self, PyObject *value)
1091 1069 {
1092 1070 long rev;
1093 1071 int issnap;
1094 1072 Py_ssize_t length = index_length(self);
1095 1073
1096 1074 if (!pylong_to_long(value, &rev)) {
1097 1075 return NULL;
1098 1076 }
1099 1077 if (rev < -1 || rev >= length) {
1100 1078 PyErr_Format(PyExc_ValueError, "revlog index out of range: %ld",
1101 1079 rev);
1102 1080 return NULL;
1103 1081 };
1104 1082 issnap = index_issnapshotrev(self, (Py_ssize_t)rev);
1105 1083 if (issnap < 0) {
1106 1084 return NULL;
1107 1085 };
1108 1086 return PyBool_FromLong((long)issnap);
1109 1087 }
1110 1088
1111 1089 static PyObject *index_findsnapshots(indexObject *self, PyObject *args)
1112 1090 {
1113 1091 Py_ssize_t start_rev;
1114 1092 PyObject *cache;
1115 1093 Py_ssize_t base;
1116 1094 Py_ssize_t rev;
1117 1095 PyObject *key = NULL;
1118 1096 PyObject *value = NULL;
1119 1097 const Py_ssize_t length = index_length(self);
1120 1098 if (!PyArg_ParseTuple(args, "O!n", &PyDict_Type, &cache, &start_rev)) {
1121 1099 return NULL;
1122 1100 }
1123 1101 for (rev = start_rev; rev < length; rev++) {
1124 1102 int issnap;
1125 1103 PyObject *allvalues = NULL;
1126 1104 issnap = index_issnapshotrev(self, rev);
1127 1105 if (issnap < 0) {
1128 1106 goto bail;
1129 1107 }
1130 1108 if (issnap == 0) {
1131 1109 continue;
1132 1110 }
1133 1111 base = (Py_ssize_t)index_baserev(self, rev);
1134 1112 if (base == rev) {
1135 1113 base = -1;
1136 1114 }
1137 1115 if (base == -2) {
1138 1116 assert(PyErr_Occurred());
1139 1117 goto bail;
1140 1118 }
1141 1119 key = PyInt_FromSsize_t(base);
1142 1120 allvalues = PyDict_GetItem(cache, key);
1143 1121 if (allvalues == NULL && PyErr_Occurred()) {
1144 1122 goto bail;
1145 1123 }
1146 1124 if (allvalues == NULL) {
1147 1125 int r;
1148 1126 allvalues = PyList_New(0);
1149 1127 if (!allvalues) {
1150 1128 goto bail;
1151 1129 }
1152 1130 r = PyDict_SetItem(cache, key, allvalues);
1153 1131 Py_DECREF(allvalues);
1154 1132 if (r < 0) {
1155 1133 goto bail;
1156 1134 }
1157 1135 }
1158 1136 value = PyInt_FromSsize_t(rev);
1159 1137 if (PyList_Append(allvalues, value)) {
1160 1138 goto bail;
1161 1139 }
1162 1140 Py_CLEAR(key);
1163 1141 Py_CLEAR(value);
1164 1142 }
1165 1143 Py_RETURN_NONE;
1166 1144 bail:
1167 1145 Py_XDECREF(key);
1168 1146 Py_XDECREF(value);
1169 1147 return NULL;
1170 1148 }
1171 1149
1172 1150 static PyObject *index_deltachain(indexObject *self, PyObject *args)
1173 1151 {
1174 1152 int rev, generaldelta;
1175 1153 PyObject *stoparg;
1176 1154 int stoprev, iterrev, baserev = -1;
1177 1155 int stopped;
1178 1156 PyObject *chain = NULL, *result = NULL;
1179 1157 const Py_ssize_t length = index_length(self);
1180 1158
1181 1159 if (!PyArg_ParseTuple(args, "iOi", &rev, &stoparg, &generaldelta)) {
1182 1160 return NULL;
1183 1161 }
1184 1162
1185 1163 if (PyInt_Check(stoparg)) {
1186 1164 stoprev = (int)PyInt_AsLong(stoparg);
1187 1165 if (stoprev == -1 && PyErr_Occurred()) {
1188 1166 return NULL;
1189 1167 }
1190 1168 } else if (stoparg == Py_None) {
1191 1169 stoprev = -2;
1192 1170 } else {
1193 1171 PyErr_SetString(PyExc_ValueError,
1194 1172 "stoprev must be integer or None");
1195 1173 return NULL;
1196 1174 }
1197 1175
1198 1176 if (rev < 0 || rev >= length) {
1199 1177 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
1200 1178 return NULL;
1201 1179 }
1202 1180
1203 1181 chain = PyList_New(0);
1204 1182 if (chain == NULL) {
1205 1183 return NULL;
1206 1184 }
1207 1185
1208 1186 baserev = index_baserev(self, rev);
1209 1187
1210 1188 /* This should never happen. */
1211 1189 if (baserev <= -2) {
1212 1190 /* Error should be set by index_deref() */
1213 1191 assert(PyErr_Occurred());
1214 1192 goto bail;
1215 1193 }
1216 1194
1217 1195 iterrev = rev;
1218 1196
1219 1197 while (iterrev != baserev && iterrev != stoprev) {
1220 1198 PyObject *value = PyInt_FromLong(iterrev);
1221 1199 if (value == NULL) {
1222 1200 goto bail;
1223 1201 }
1224 1202 if (PyList_Append(chain, value)) {
1225 1203 Py_DECREF(value);
1226 1204 goto bail;
1227 1205 }
1228 1206 Py_DECREF(value);
1229 1207
1230 1208 if (generaldelta) {
1231 1209 iterrev = baserev;
1232 1210 } else {
1233 1211 iterrev--;
1234 1212 }
1235 1213
1236 1214 if (iterrev < 0) {
1237 1215 break;
1238 1216 }
1239 1217
1240 1218 if (iterrev >= length) {
1241 1219 PyErr_SetString(PyExc_IndexError,
1242 1220 "revision outside index");
1243 1221 return NULL;
1244 1222 }
1245 1223
1246 1224 baserev = index_baserev(self, iterrev);
1247 1225
1248 1226 /* This should never happen. */
1249 1227 if (baserev <= -2) {
1250 1228 /* Error should be set by index_deref() */
1251 1229 assert(PyErr_Occurred());
1252 1230 goto bail;
1253 1231 }
1254 1232 }
1255 1233
1256 1234 if (iterrev == stoprev) {
1257 1235 stopped = 1;
1258 1236 } else {
1259 1237 PyObject *value = PyInt_FromLong(iterrev);
1260 1238 if (value == NULL) {
1261 1239 goto bail;
1262 1240 }
1263 1241 if (PyList_Append(chain, value)) {
1264 1242 Py_DECREF(value);
1265 1243 goto bail;
1266 1244 }
1267 1245 Py_DECREF(value);
1268 1246
1269 1247 stopped = 0;
1270 1248 }
1271 1249
1272 1250 if (PyList_Reverse(chain)) {
1273 1251 goto bail;
1274 1252 }
1275 1253
1276 1254 result = Py_BuildValue("OO", chain, stopped ? Py_True : Py_False);
1277 1255 Py_DECREF(chain);
1278 1256 return result;
1279 1257
1280 1258 bail:
1281 1259 Py_DECREF(chain);
1282 1260 return NULL;
1283 1261 }
1284 1262
1285 1263 static inline int64_t
1286 1264 index_segment_span(indexObject *self, Py_ssize_t start_rev, Py_ssize_t end_rev)
1287 1265 {
1288 1266 int64_t start_offset;
1289 1267 int64_t end_offset;
1290 1268 int end_size;
1291 1269 start_offset = index_get_start(self, start_rev);
1292 1270 if (start_offset < 0) {
1293 1271 return -1;
1294 1272 }
1295 1273 end_offset = index_get_start(self, end_rev);
1296 1274 if (end_offset < 0) {
1297 1275 return -1;
1298 1276 }
1299 1277 end_size = index_get_length(self, end_rev);
1300 1278 if (end_size < 0) {
1301 1279 return -1;
1302 1280 }
1303 1281 if (end_offset < start_offset) {
1304 1282 PyErr_Format(PyExc_ValueError,
1305 1283 "corrupted revlog index: inconsistent offset "
1306 1284 "between revisions (%zd) and (%zd)",
1307 1285 start_rev, end_rev);
1308 1286 return -1;
1309 1287 }
1310 1288 return (end_offset - start_offset) + (int64_t)end_size;
1311 1289 }
1312 1290
1313 1291 /* returns endidx so that revs[startidx:endidx] has no empty trailing revs */
1314 1292 static Py_ssize_t trim_endidx(indexObject *self, const Py_ssize_t *revs,
1315 1293 Py_ssize_t startidx, Py_ssize_t endidx)
1316 1294 {
1317 1295 int length;
1318 1296 while (endidx > 1 && endidx > startidx) {
1319 1297 length = index_get_length(self, revs[endidx - 1]);
1320 1298 if (length < 0) {
1321 1299 return -1;
1322 1300 }
1323 1301 if (length != 0) {
1324 1302 break;
1325 1303 }
1326 1304 endidx -= 1;
1327 1305 }
1328 1306 return endidx;
1329 1307 }
1330 1308
1331 1309 struct Gap {
1332 1310 int64_t size;
1333 1311 Py_ssize_t idx;
1334 1312 };
1335 1313
1336 1314 static int gap_compare(const void *left, const void *right)
1337 1315 {
1338 1316 const struct Gap *l_left = ((const struct Gap *)left);
1339 1317 const struct Gap *l_right = ((const struct Gap *)right);
1340 1318 if (l_left->size < l_right->size) {
1341 1319 return -1;
1342 1320 } else if (l_left->size > l_right->size) {
1343 1321 return 1;
1344 1322 }
1345 1323 return 0;
1346 1324 }
1347 1325 static int Py_ssize_t_compare(const void *left, const void *right)
1348 1326 {
1349 1327 const Py_ssize_t l_left = *(const Py_ssize_t *)left;
1350 1328 const Py_ssize_t l_right = *(const Py_ssize_t *)right;
1351 1329 if (l_left < l_right) {
1352 1330 return -1;
1353 1331 } else if (l_left > l_right) {
1354 1332 return 1;
1355 1333 }
1356 1334 return 0;
1357 1335 }
1358 1336
1359 1337 static PyObject *index_slicechunktodensity(indexObject *self, PyObject *args)
1360 1338 {
1361 1339 /* method arguments */
1362 1340 PyObject *list_revs = NULL; /* revisions in the chain */
1363 1341 double targetdensity = 0; /* min density to achieve */
1364 1342 Py_ssize_t mingapsize = 0; /* threshold to ignore gaps */
1365 1343
1366 1344 /* other core variables */
1367 1345 Py_ssize_t idxlen = index_length(self);
1368 1346 Py_ssize_t i; /* used for various iteration */
1369 1347 PyObject *result = NULL; /* the final return of the function */
1370 1348
1371 1349 /* generic information about the delta chain being slice */
1372 1350 Py_ssize_t num_revs = 0; /* size of the full delta chain */
1373 1351 Py_ssize_t *revs = NULL; /* native array of revision in the chain */
1374 1352 int64_t chainpayload = 0; /* sum of all delta in the chain */
1375 1353 int64_t deltachainspan = 0; /* distance from first byte to last byte */
1376 1354
1377 1355 /* variable used for slicing the delta chain */
1378 1356 int64_t readdata = 0; /* amount of data currently planned to be read */
1379 1357 double density = 0; /* ration of payload data compared to read ones */
1380 1358 int64_t previous_end;
1381 1359 struct Gap *gaps = NULL; /* array of notable gap in the chain */
1382 1360 Py_ssize_t num_gaps =
1383 1361 0; /* total number of notable gap recorded so far */
1384 1362 Py_ssize_t *selected_indices = NULL; /* indices of gap skipped over */
1385 1363 Py_ssize_t num_selected = 0; /* number of gaps skipped */
1386 1364 PyObject *chunk = NULL; /* individual slice */
1387 1365 PyObject *allchunks = NULL; /* all slices */
1388 1366 Py_ssize_t previdx;
1389 1367
1390 1368 /* parsing argument */
1391 1369 if (!PyArg_ParseTuple(args, "O!dn", &PyList_Type, &list_revs,
1392 1370 &targetdensity, &mingapsize)) {
1393 1371 goto bail;
1394 1372 }
1395 1373
1396 1374 /* If the delta chain contains a single element, we do not need slicing
1397 1375 */
1398 1376 num_revs = PyList_GET_SIZE(list_revs);
1399 1377 if (num_revs <= 1) {
1400 1378 result = PyTuple_Pack(1, list_revs);
1401 1379 goto done;
1402 1380 }
1403 1381
1404 1382 /* Turn the python list into a native integer array (for efficiency) */
1405 1383 revs = (Py_ssize_t *)calloc(num_revs, sizeof(Py_ssize_t));
1406 1384 if (revs == NULL) {
1407 1385 PyErr_NoMemory();
1408 1386 goto bail;
1409 1387 }
1410 1388 for (i = 0; i < num_revs; i++) {
1411 1389 Py_ssize_t revnum = PyInt_AsLong(PyList_GET_ITEM(list_revs, i));
1412 1390 if (revnum == -1 && PyErr_Occurred()) {
1413 1391 goto bail;
1414 1392 }
1415 1393 if (revnum < nullrev || revnum >= idxlen) {
1416 1394 PyErr_Format(PyExc_IndexError,
1417 1395 "index out of range: %zd", revnum);
1418 1396 goto bail;
1419 1397 }
1420 1398 revs[i] = revnum;
1421 1399 }
1422 1400
1423 1401 /* Compute and check various property of the unsliced delta chain */
1424 1402 deltachainspan = index_segment_span(self, revs[0], revs[num_revs - 1]);
1425 1403 if (deltachainspan < 0) {
1426 1404 goto bail;
1427 1405 }
1428 1406
1429 1407 if (deltachainspan <= mingapsize) {
1430 1408 result = PyTuple_Pack(1, list_revs);
1431 1409 goto done;
1432 1410 }
1433 1411 chainpayload = 0;
1434 1412 for (i = 0; i < num_revs; i++) {
1435 1413 int tmp = index_get_length(self, revs[i]);
1436 1414 if (tmp < 0) {
1437 1415 goto bail;
1438 1416 }
1439 1417 chainpayload += tmp;
1440 1418 }
1441 1419
1442 1420 readdata = deltachainspan;
1443 1421 density = 1.0;
1444 1422
1445 1423 if (0 < deltachainspan) {
1446 1424 density = (double)chainpayload / (double)deltachainspan;
1447 1425 }
1448 1426
1449 1427 if (density >= targetdensity) {
1450 1428 result = PyTuple_Pack(1, list_revs);
1451 1429 goto done;
1452 1430 }
1453 1431
1454 1432 /* if chain is too sparse, look for relevant gaps */
1455 1433 gaps = (struct Gap *)calloc(num_revs, sizeof(struct Gap));
1456 1434 if (gaps == NULL) {
1457 1435 PyErr_NoMemory();
1458 1436 goto bail;
1459 1437 }
1460 1438
1461 1439 previous_end = -1;
1462 1440 for (i = 0; i < num_revs; i++) {
1463 1441 int64_t revstart;
1464 1442 int revsize;
1465 1443 revstart = index_get_start(self, revs[i]);
1466 1444 if (revstart < 0) {
1467 1445 goto bail;
1468 1446 };
1469 1447 revsize = index_get_length(self, revs[i]);
1470 1448 if (revsize < 0) {
1471 1449 goto bail;
1472 1450 };
1473 1451 if (revsize == 0) {
1474 1452 continue;
1475 1453 }
1476 1454 if (previous_end >= 0) {
1477 1455 int64_t gapsize = revstart - previous_end;
1478 1456 if (gapsize > mingapsize) {
1479 1457 gaps[num_gaps].size = gapsize;
1480 1458 gaps[num_gaps].idx = i;
1481 1459 num_gaps += 1;
1482 1460 }
1483 1461 }
1484 1462 previous_end = revstart + revsize;
1485 1463 }
1486 1464 if (num_gaps == 0) {
1487 1465 result = PyTuple_Pack(1, list_revs);
1488 1466 goto done;
1489 1467 }
1490 1468 qsort(gaps, num_gaps, sizeof(struct Gap), &gap_compare);
1491 1469
1492 1470 /* Slice the largest gap first, they improve the density the most */
1493 1471 selected_indices =
1494 1472 (Py_ssize_t *)malloc((num_gaps + 1) * sizeof(Py_ssize_t));
1495 1473 if (selected_indices == NULL) {
1496 1474 PyErr_NoMemory();
1497 1475 goto bail;
1498 1476 }
1499 1477
1500 1478 for (i = num_gaps - 1; i >= 0; i--) {
1501 1479 selected_indices[num_selected] = gaps[i].idx;
1502 1480 readdata -= gaps[i].size;
1503 1481 num_selected += 1;
1504 1482 if (readdata <= 0) {
1505 1483 density = 1.0;
1506 1484 } else {
1507 1485 density = (double)chainpayload / (double)readdata;
1508 1486 }
1509 1487 if (density >= targetdensity) {
1510 1488 break;
1511 1489 }
1512 1490 }
1513 1491 qsort(selected_indices, num_selected, sizeof(Py_ssize_t),
1514 1492 &Py_ssize_t_compare);
1515 1493
1516 1494 /* create the resulting slice */
1517 1495 allchunks = PyList_New(0);
1518 1496 if (allchunks == NULL) {
1519 1497 goto bail;
1520 1498 }
1521 1499 previdx = 0;
1522 1500 selected_indices[num_selected] = num_revs;
1523 1501 for (i = 0; i <= num_selected; i++) {
1524 1502 Py_ssize_t idx = selected_indices[i];
1525 1503 Py_ssize_t endidx = trim_endidx(self, revs, previdx, idx);
1526 1504 if (endidx < 0) {
1527 1505 goto bail;
1528 1506 }
1529 1507 if (previdx < endidx) {
1530 1508 chunk = PyList_GetSlice(list_revs, previdx, endidx);
1531 1509 if (chunk == NULL) {
1532 1510 goto bail;
1533 1511 }
1534 1512 if (PyList_Append(allchunks, chunk) == -1) {
1535 1513 goto bail;
1536 1514 }
1537 1515 Py_DECREF(chunk);
1538 1516 chunk = NULL;
1539 1517 }
1540 1518 previdx = idx;
1541 1519 }
1542 1520 result = allchunks;
1543 1521 goto done;
1544 1522
1545 1523 bail:
1546 1524 Py_XDECREF(allchunks);
1547 1525 Py_XDECREF(chunk);
1548 1526 done:
1549 1527 free(revs);
1550 1528 free(gaps);
1551 1529 free(selected_indices);
1552 1530 return result;
1553 1531 }
1554 1532
1555 1533 static inline int nt_level(const char *node, Py_ssize_t level)
1556 1534 {
1557 1535 int v = node[level >> 1];
1558 1536 if (!(level & 1))
1559 1537 v >>= 4;
1560 1538 return v & 0xf;
1561 1539 }
1562 1540
1563 1541 /*
1564 1542 * Return values:
1565 1543 *
1566 1544 * -4: match is ambiguous (multiple candidates)
1567 1545 * -2: not found
1568 1546 * rest: valid rev
1569 1547 */
1570 1548 static int nt_find(nodetree *self, const char *node, Py_ssize_t nodelen,
1571 1549 int hex)
1572 1550 {
1573 1551 int (*getnybble)(const char *, Py_ssize_t) = hex ? hexdigit : nt_level;
1574 1552 int level, maxlevel, off;
1575 1553
1576 1554 if (nodelen == 20 && node[0] == '\0' && memcmp(node, nullid, 20) == 0)
1577 1555 return -1;
1578 1556
1579 1557 if (hex)
1580 1558 maxlevel = nodelen > 40 ? 40 : (int)nodelen;
1581 1559 else
1582 1560 maxlevel = nodelen > 20 ? 40 : ((int)nodelen * 2);
1583 1561
1584 1562 for (level = off = 0; level < maxlevel; level++) {
1585 1563 int k = getnybble(node, level);
1586 1564 nodetreenode *n = &self->nodes[off];
1587 1565 int v = n->children[k];
1588 1566
1589 1567 if (v < 0) {
1590 1568 const char *n;
1591 1569 Py_ssize_t i;
1592 1570
1593 1571 v = -(v + 2);
1594 1572 n = index_node(self->index, v);
1595 1573 if (n == NULL)
1596 1574 return -2;
1597 1575 for (i = level; i < maxlevel; i++)
1598 1576 if (getnybble(node, i) != nt_level(n, i))
1599 1577 return -2;
1600 1578 return v;
1601 1579 }
1602 1580 if (v == 0)
1603 1581 return -2;
1604 1582 off = v;
1605 1583 }
1606 1584 /* multiple matches against an ambiguous prefix */
1607 1585 return -4;
1608 1586 }
1609 1587
1610 1588 static int nt_new(nodetree *self)
1611 1589 {
1612 1590 if (self->length == self->capacity) {
1613 1591 unsigned newcapacity;
1614 1592 nodetreenode *newnodes;
1615 1593 newcapacity = self->capacity * 2;
1616 1594 if (newcapacity >= INT_MAX / sizeof(nodetreenode)) {
1617 1595 PyErr_SetString(PyExc_MemoryError,
1618 1596 "overflow in nt_new");
1619 1597 return -1;
1620 1598 }
1621 1599 newnodes =
1622 1600 realloc(self->nodes, newcapacity * sizeof(nodetreenode));
1623 1601 if (newnodes == NULL) {
1624 1602 PyErr_SetString(PyExc_MemoryError, "out of memory");
1625 1603 return -1;
1626 1604 }
1627 1605 self->capacity = newcapacity;
1628 1606 self->nodes = newnodes;
1629 1607 memset(&self->nodes[self->length], 0,
1630 1608 sizeof(nodetreenode) * (self->capacity - self->length));
1631 1609 }
1632 1610 return self->length++;
1633 1611 }
1634 1612
1635 1613 static int nt_insert(nodetree *self, const char *node, int rev)
1636 1614 {
1637 1615 int level = 0;
1638 1616 int off = 0;
1639 1617
1640 1618 while (level < 40) {
1641 1619 int k = nt_level(node, level);
1642 1620 nodetreenode *n;
1643 1621 int v;
1644 1622
1645 1623 n = &self->nodes[off];
1646 1624 v = n->children[k];
1647 1625
1648 1626 if (v == 0) {
1649 1627 n->children[k] = -rev - 2;
1650 1628 return 0;
1651 1629 }
1652 1630 if (v < 0) {
1653 1631 const char *oldnode =
1654 1632 index_node_existing(self->index, -(v + 2));
1655 1633 int noff;
1656 1634
1657 1635 if (oldnode == NULL)
1658 1636 return -1;
1659 1637 if (!memcmp(oldnode, node, 20)) {
1660 1638 n->children[k] = -rev - 2;
1661 1639 return 0;
1662 1640 }
1663 1641 noff = nt_new(self);
1664 1642 if (noff == -1)
1665 1643 return -1;
1666 1644 /* self->nodes may have been changed by realloc */
1667 1645 self->nodes[off].children[k] = noff;
1668 1646 off = noff;
1669 1647 n = &self->nodes[off];
1670 1648 n->children[nt_level(oldnode, ++level)] = v;
1671 1649 if (level > self->depth)
1672 1650 self->depth = level;
1673 1651 self->splits += 1;
1674 1652 } else {
1675 1653 level += 1;
1676 1654 off = v;
1677 1655 }
1678 1656 }
1679 1657
1680 1658 return -1;
1681 1659 }
1682 1660
1683 1661 static PyObject *ntobj_insert(nodetreeObject *self, PyObject *args)
1684 1662 {
1685 1663 Py_ssize_t rev;
1686 1664 const char *node;
1687 1665 Py_ssize_t length;
1688 1666 if (!PyArg_ParseTuple(args, "n", &rev))
1689 1667 return NULL;
1690 1668 length = index_length(self->nt.index);
1691 1669 if (rev < 0 || rev >= length) {
1692 1670 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
1693 1671 return NULL;
1694 1672 }
1695 1673 node = index_node_existing(self->nt.index, rev);
1696 1674 if (nt_insert(&self->nt, node, (int)rev) == -1)
1697 1675 return NULL;
1698 1676 Py_RETURN_NONE;
1699 1677 }
1700 1678
1701 1679 static int nt_delete_node(nodetree *self, const char *node)
1702 1680 {
1703 1681 /* rev==-2 happens to get encoded as 0, which is interpreted as not set
1704 1682 */
1705 1683 return nt_insert(self, node, -2);
1706 1684 }
1707 1685
1708 1686 static int nt_init(nodetree *self, indexObject *index, unsigned capacity)
1709 1687 {
1710 1688 /* Initialize before overflow-checking to avoid nt_dealloc() crash. */
1711 1689 self->nodes = NULL;
1712 1690
1713 1691 self->index = index;
1714 1692 /* The input capacity is in terms of revisions, while the field is in
1715 1693 * terms of nodetree nodes. */
1716 1694 self->capacity = (capacity < 4 ? 4 : capacity / 2);
1717 1695 self->depth = 0;
1718 1696 self->splits = 0;
1719 1697 if ((size_t)self->capacity > INT_MAX / sizeof(nodetreenode)) {
1720 1698 PyErr_SetString(PyExc_ValueError, "overflow in init_nt");
1721 1699 return -1;
1722 1700 }
1723 1701 self->nodes = calloc(self->capacity, sizeof(nodetreenode));
1724 1702 if (self->nodes == NULL) {
1725 1703 PyErr_NoMemory();
1726 1704 return -1;
1727 1705 }
1728 1706 self->length = 1;
1729 1707 return 0;
1730 1708 }
1731 1709
1732 1710 static int ntobj_init(nodetreeObject *self, PyObject *args)
1733 1711 {
1734 1712 PyObject *index;
1735 1713 unsigned capacity;
1736 1714 if (!PyArg_ParseTuple(args, "O!I", &HgRevlogIndex_Type, &index,
1737 1715 &capacity))
1738 1716 return -1;
1739 1717 Py_INCREF(index);
1740 1718 return nt_init(&self->nt, (indexObject *)index, capacity);
1741 1719 }
1742 1720
1743 1721 static int nt_partialmatch(nodetree *self, const char *node, Py_ssize_t nodelen)
1744 1722 {
1745 1723 return nt_find(self, node, nodelen, 1);
1746 1724 }
1747 1725
1748 1726 /*
1749 1727 * Find the length of the shortest unique prefix of node.
1750 1728 *
1751 1729 * Return values:
1752 1730 *
1753 1731 * -3: error (exception set)
1754 1732 * -2: not found (no exception set)
1755 1733 * rest: length of shortest prefix
1756 1734 */
1757 1735 static int nt_shortest(nodetree *self, const char *node)
1758 1736 {
1759 1737 int level, off;
1760 1738
1761 1739 for (level = off = 0; level < 40; level++) {
1762 1740 int k, v;
1763 1741 nodetreenode *n = &self->nodes[off];
1764 1742 k = nt_level(node, level);
1765 1743 v = n->children[k];
1766 1744 if (v < 0) {
1767 1745 const char *n;
1768 1746 v = -(v + 2);
1769 1747 n = index_node_existing(self->index, v);
1770 1748 if (n == NULL)
1771 1749 return -3;
1772 1750 if (memcmp(node, n, 20) != 0)
1773 1751 /*
1774 1752 * Found a unique prefix, but it wasn't for the
1775 1753 * requested node (i.e the requested node does
1776 1754 * not exist).
1777 1755 */
1778 1756 return -2;
1779 1757 return level + 1;
1780 1758 }
1781 1759 if (v == 0)
1782 1760 return -2;
1783 1761 off = v;
1784 1762 }
1785 1763 /*
1786 1764 * The node was still not unique after 40 hex digits, so this won't
1787 1765 * happen. Also, if we get here, then there's a programming error in
1788 1766 * this file that made us insert a node longer than 40 hex digits.
1789 1767 */
1790 1768 PyErr_SetString(PyExc_Exception, "broken node tree");
1791 1769 return -3;
1792 1770 }
1793 1771
1794 1772 static PyObject *ntobj_shortest(nodetreeObject *self, PyObject *args)
1795 1773 {
1796 1774 PyObject *val;
1797 1775 char *node;
1798 1776 int length;
1799 1777
1800 1778 if (!PyArg_ParseTuple(args, "O", &val))
1801 1779 return NULL;
1802 1780 if (node_check(val, &node) == -1)
1803 1781 return NULL;
1804 1782
1805 1783 length = nt_shortest(&self->nt, node);
1806 1784 if (length == -3)
1807 1785 return NULL;
1808 1786 if (length == -2) {
1809 1787 raise_revlog_error();
1810 1788 return NULL;
1811 1789 }
1812 1790 return PyInt_FromLong(length);
1813 1791 }
1814 1792
1815 1793 static void nt_dealloc(nodetree *self)
1816 1794 {
1817 1795 free(self->nodes);
1818 1796 self->nodes = NULL;
1819 1797 }
1820 1798
1821 1799 static void ntobj_dealloc(nodetreeObject *self)
1822 1800 {
1823 1801 Py_XDECREF(self->nt.index);
1824 1802 nt_dealloc(&self->nt);
1825 1803 PyObject_Del(self);
1826 1804 }
1827 1805
1828 1806 static PyMethodDef ntobj_methods[] = {
1829 1807 {"insert", (PyCFunction)ntobj_insert, METH_VARARGS,
1830 1808 "insert an index entry"},
1831 1809 {"shortest", (PyCFunction)ntobj_shortest, METH_VARARGS,
1832 1810 "find length of shortest hex nodeid of a binary ID"},
1833 1811 {NULL} /* Sentinel */
1834 1812 };
1835 1813
1836 1814 static PyTypeObject nodetreeType = {
1837 1815 PyVarObject_HEAD_INIT(NULL, 0) /* header */
1838 1816 "parsers.nodetree", /* tp_name */
1839 1817 sizeof(nodetreeObject), /* tp_basicsize */
1840 1818 0, /* tp_itemsize */
1841 1819 (destructor)ntobj_dealloc, /* tp_dealloc */
1842 1820 0, /* tp_print */
1843 1821 0, /* tp_getattr */
1844 1822 0, /* tp_setattr */
1845 1823 0, /* tp_compare */
1846 1824 0, /* tp_repr */
1847 1825 0, /* tp_as_number */
1848 1826 0, /* tp_as_sequence */
1849 1827 0, /* tp_as_mapping */
1850 1828 0, /* tp_hash */
1851 1829 0, /* tp_call */
1852 1830 0, /* tp_str */
1853 1831 0, /* tp_getattro */
1854 1832 0, /* tp_setattro */
1855 1833 0, /* tp_as_buffer */
1856 1834 Py_TPFLAGS_DEFAULT, /* tp_flags */
1857 1835 "nodetree", /* tp_doc */
1858 1836 0, /* tp_traverse */
1859 1837 0, /* tp_clear */
1860 1838 0, /* tp_richcompare */
1861 1839 0, /* tp_weaklistoffset */
1862 1840 0, /* tp_iter */
1863 1841 0, /* tp_iternext */
1864 1842 ntobj_methods, /* tp_methods */
1865 1843 0, /* tp_members */
1866 1844 0, /* tp_getset */
1867 1845 0, /* tp_base */
1868 1846 0, /* tp_dict */
1869 1847 0, /* tp_descr_get */
1870 1848 0, /* tp_descr_set */
1871 1849 0, /* tp_dictoffset */
1872 1850 (initproc)ntobj_init, /* tp_init */
1873 1851 0, /* tp_alloc */
1874 1852 };
1875 1853
1876 1854 static int index_init_nt(indexObject *self)
1877 1855 {
1878 1856 if (!self->ntinitialized) {
1879 1857 if (nt_init(&self->nt, self, (int)self->raw_length) == -1) {
1880 1858 nt_dealloc(&self->nt);
1881 1859 return -1;
1882 1860 }
1883 1861 if (nt_insert(&self->nt, nullid, -1) == -1) {
1884 1862 nt_dealloc(&self->nt);
1885 1863 return -1;
1886 1864 }
1887 1865 self->ntinitialized = 1;
1888 1866 self->ntrev = (int)index_length(self);
1889 1867 self->ntlookups = 1;
1890 1868 self->ntmisses = 0;
1891 1869 }
1892 1870 return 0;
1893 1871 }
1894 1872
1895 1873 /*
1896 1874 * Return values:
1897 1875 *
1898 1876 * -3: error (exception set)
1899 1877 * -2: not found (no exception set)
1900 1878 * rest: valid rev
1901 1879 */
1902 1880 static int index_find_node(indexObject *self, const char *node,
1903 1881 Py_ssize_t nodelen)
1904 1882 {
1905 1883 int rev;
1906 1884
1907 1885 if (index_init_nt(self) == -1)
1908 1886 return -3;
1909 1887
1910 1888 self->ntlookups++;
1911 1889 rev = nt_find(&self->nt, node, nodelen, 0);
1912 1890 if (rev >= -1)
1913 1891 return rev;
1914 1892
1915 1893 /*
1916 1894 * For the first handful of lookups, we scan the entire index,
1917 1895 * and cache only the matching nodes. This optimizes for cases
1918 1896 * like "hg tip", where only a few nodes are accessed.
1919 1897 *
1920 1898 * After that, we cache every node we visit, using a single
1921 1899 * scan amortized over multiple lookups. This gives the best
1922 1900 * bulk performance, e.g. for "hg log".
1923 1901 */
1924 1902 if (self->ntmisses++ < 4) {
1925 1903 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1926 1904 const char *n = index_node_existing(self, rev);
1927 1905 if (n == NULL)
1928 1906 return -3;
1929 1907 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1930 1908 if (nt_insert(&self->nt, n, rev) == -1)
1931 1909 return -3;
1932 1910 break;
1933 1911 }
1934 1912 }
1935 1913 } else {
1936 1914 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1937 1915 const char *n = index_node_existing(self, rev);
1938 1916 if (n == NULL)
1939 1917 return -3;
1940 1918 if (nt_insert(&self->nt, n, rev) == -1) {
1941 1919 self->ntrev = rev + 1;
1942 1920 return -3;
1943 1921 }
1944 1922 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1945 1923 break;
1946 1924 }
1947 1925 }
1948 1926 self->ntrev = rev;
1949 1927 }
1950 1928
1951 1929 if (rev >= 0)
1952 1930 return rev;
1953 1931 return -2;
1954 1932 }
1955 1933
1956 1934 static PyObject *index_getitem(indexObject *self, PyObject *value)
1957 1935 {
1958 1936 char *node;
1959 1937 int rev;
1960 1938
1961 1939 if (PyInt_Check(value)) {
1962 1940 long idx;
1963 1941 if (!pylong_to_long(value, &idx)) {
1964 1942 return NULL;
1965 1943 }
1966 1944 return index_get(self, idx);
1967 1945 }
1968 1946
1969 1947 if (node_check(value, &node) == -1)
1970 1948 return NULL;
1971 1949 rev = index_find_node(self, node, 20);
1972 1950 if (rev >= -1)
1973 1951 return PyInt_FromLong(rev);
1974 1952 if (rev == -2)
1975 1953 raise_revlog_error();
1976 1954 return NULL;
1977 1955 }
1978 1956
1979 1957 /*
1980 1958 * Fully populate the radix tree.
1981 1959 */
1982 1960 static int index_populate_nt(indexObject *self)
1983 1961 {
1984 1962 int rev;
1985 1963 if (self->ntrev > 0) {
1986 1964 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1987 1965 const char *n = index_node_existing(self, rev);
1988 1966 if (n == NULL)
1989 1967 return -1;
1990 1968 if (nt_insert(&self->nt, n, rev) == -1)
1991 1969 return -1;
1992 1970 }
1993 1971 self->ntrev = -1;
1994 1972 }
1995 1973 return 0;
1996 1974 }
1997 1975
1998 1976 static PyObject *index_partialmatch(indexObject *self, PyObject *args)
1999 1977 {
2000 1978 const char *fullnode;
2001 1979 Py_ssize_t nodelen;
2002 1980 char *node;
2003 1981 int rev, i;
2004 1982
2005 1983 if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &node, &nodelen))
2006 1984 return NULL;
2007 1985
2008 1986 if (nodelen < 1) {
2009 1987 PyErr_SetString(PyExc_ValueError, "key too short");
2010 1988 return NULL;
2011 1989 }
2012 1990
2013 1991 if (nodelen > 40) {
2014 1992 PyErr_SetString(PyExc_ValueError, "key too long");
2015 1993 return NULL;
2016 1994 }
2017 1995
2018 1996 for (i = 0; i < nodelen; i++)
2019 1997 hexdigit(node, i);
2020 1998 if (PyErr_Occurred()) {
2021 1999 /* input contains non-hex characters */
2022 2000 PyErr_Clear();
2023 2001 Py_RETURN_NONE;
2024 2002 }
2025 2003
2026 2004 if (index_init_nt(self) == -1)
2027 2005 return NULL;
2028 2006 if (index_populate_nt(self) == -1)
2029 2007 return NULL;
2030 2008 rev = nt_partialmatch(&self->nt, node, nodelen);
2031 2009
2032 2010 switch (rev) {
2033 2011 case -4:
2034 2012 raise_revlog_error();
2035 2013 return NULL;
2036 2014 case -2:
2037 2015 Py_RETURN_NONE;
2038 2016 case -1:
2039 2017 return PyBytes_FromStringAndSize(nullid, 20);
2040 2018 }
2041 2019
2042 2020 fullnode = index_node_existing(self, rev);
2043 2021 if (fullnode == NULL) {
2044 2022 return NULL;
2045 2023 }
2046 2024 return PyBytes_FromStringAndSize(fullnode, 20);
2047 2025 }
2048 2026
2049 2027 static PyObject *index_shortest(indexObject *self, PyObject *args)
2050 2028 {
2051 2029 PyObject *val;
2052 2030 char *node;
2053 2031 int length;
2054 2032
2055 2033 if (!PyArg_ParseTuple(args, "O", &val))
2056 2034 return NULL;
2057 2035 if (node_check(val, &node) == -1)
2058 2036 return NULL;
2059 2037
2060 2038 self->ntlookups++;
2061 2039 if (index_init_nt(self) == -1)
2062 2040 return NULL;
2063 2041 if (index_populate_nt(self) == -1)
2064 2042 return NULL;
2065 2043 length = nt_shortest(&self->nt, node);
2066 2044 if (length == -3)
2067 2045 return NULL;
2068 2046 if (length == -2) {
2069 2047 raise_revlog_error();
2070 2048 return NULL;
2071 2049 }
2072 2050 return PyInt_FromLong(length);
2073 2051 }
2074 2052
2075 2053 static PyObject *index_m_get(indexObject *self, PyObject *args)
2076 2054 {
2077 2055 PyObject *val;
2078 2056 char *node;
2079 2057 int rev;
2080 2058
2081 2059 if (!PyArg_ParseTuple(args, "O", &val))
2082 2060 return NULL;
2083 2061 if (node_check(val, &node) == -1)
2084 2062 return NULL;
2085 2063 rev = index_find_node(self, node, 20);
2086 2064 if (rev == -3)
2087 2065 return NULL;
2088 2066 if (rev == -2)
2089 2067 Py_RETURN_NONE;
2090 2068 return PyInt_FromLong(rev);
2091 2069 }
2092 2070
2093 2071 static int index_contains(indexObject *self, PyObject *value)
2094 2072 {
2095 2073 char *node;
2096 2074
2097 2075 if (PyInt_Check(value)) {
2098 2076 long rev;
2099 2077 if (!pylong_to_long(value, &rev)) {
2100 2078 return -1;
2101 2079 }
2102 2080 return rev >= -1 && rev < index_length(self);
2103 2081 }
2104 2082
2105 2083 if (node_check(value, &node) == -1)
2106 2084 return -1;
2107 2085
2108 2086 switch (index_find_node(self, node, 20)) {
2109 2087 case -3:
2110 2088 return -1;
2111 2089 case -2:
2112 2090 return 0;
2113 2091 default:
2114 2092 return 1;
2115 2093 }
2116 2094 }
2117 2095
2118 2096 static PyObject *index_m_has_node(indexObject *self, PyObject *args)
2119 2097 {
2120 2098 int ret = index_contains(self, args);
2121 2099 if (ret < 0)
2122 2100 return NULL;
2123 2101 return PyBool_FromLong((long)ret);
2124 2102 }
2125 2103
2126 2104 static PyObject *index_m_rev(indexObject *self, PyObject *val)
2127 2105 {
2128 2106 char *node;
2129 2107 int rev;
2130 2108
2131 2109 if (node_check(val, &node) == -1)
2132 2110 return NULL;
2133 2111 rev = index_find_node(self, node, 20);
2134 2112 if (rev >= -1)
2135 2113 return PyInt_FromLong(rev);
2136 2114 if (rev == -2)
2137 2115 raise_revlog_error();
2138 2116 return NULL;
2139 2117 }
2140 2118
2141 2119 typedef uint64_t bitmask;
2142 2120
2143 2121 /*
2144 2122 * Given a disjoint set of revs, return all candidates for the
2145 2123 * greatest common ancestor. In revset notation, this is the set
2146 2124 * "heads(::a and ::b and ...)"
2147 2125 */
2148 2126 static PyObject *find_gca_candidates(indexObject *self, const int *revs,
2149 2127 int revcount)
2150 2128 {
2151 2129 const bitmask allseen = (1ull << revcount) - 1;
2152 2130 const bitmask poison = 1ull << revcount;
2153 2131 PyObject *gca = PyList_New(0);
2154 2132 int i, v, interesting;
2155 2133 int maxrev = -1;
2156 2134 bitmask sp;
2157 2135 bitmask *seen;
2158 2136
2159 2137 if (gca == NULL)
2160 2138 return PyErr_NoMemory();
2161 2139
2162 2140 for (i = 0; i < revcount; i++) {
2163 2141 if (revs[i] > maxrev)
2164 2142 maxrev = revs[i];
2165 2143 }
2166 2144
2167 2145 seen = calloc(sizeof(*seen), maxrev + 1);
2168 2146 if (seen == NULL) {
2169 2147 Py_DECREF(gca);
2170 2148 return PyErr_NoMemory();
2171 2149 }
2172 2150
2173 2151 for (i = 0; i < revcount; i++)
2174 2152 seen[revs[i]] = 1ull << i;
2175 2153
2176 2154 interesting = revcount;
2177 2155
2178 2156 for (v = maxrev; v >= 0 && interesting; v--) {
2179 2157 bitmask sv = seen[v];
2180 2158 int parents[2];
2181 2159
2182 2160 if (!sv)
2183 2161 continue;
2184 2162
2185 2163 if (sv < poison) {
2186 2164 interesting -= 1;
2187 2165 if (sv == allseen) {
2188 2166 PyObject *obj = PyInt_FromLong(v);
2189 2167 if (obj == NULL)
2190 2168 goto bail;
2191 2169 if (PyList_Append(gca, obj) == -1) {
2192 2170 Py_DECREF(obj);
2193 2171 goto bail;
2194 2172 }
2195 2173 sv |= poison;
2196 2174 for (i = 0; i < revcount; i++) {
2197 2175 if (revs[i] == v)
2198 2176 goto done;
2199 2177 }
2200 2178 }
2201 2179 }
2202 2180 if (index_get_parents(self, v, parents, maxrev) < 0)
2203 2181 goto bail;
2204 2182
2205 2183 for (i = 0; i < 2; i++) {
2206 2184 int p = parents[i];
2207 2185 if (p == -1)
2208 2186 continue;
2209 2187 sp = seen[p];
2210 2188 if (sv < poison) {
2211 2189 if (sp == 0) {
2212 2190 seen[p] = sv;
2213 2191 interesting++;
2214 2192 } else if (sp != sv)
2215 2193 seen[p] |= sv;
2216 2194 } else {
2217 2195 if (sp && sp < poison)
2218 2196 interesting--;
2219 2197 seen[p] = sv;
2220 2198 }
2221 2199 }
2222 2200 }
2223 2201
2224 2202 done:
2225 2203 free(seen);
2226 2204 return gca;
2227 2205 bail:
2228 2206 free(seen);
2229 2207 Py_XDECREF(gca);
2230 2208 return NULL;
2231 2209 }
2232 2210
2233 2211 /*
2234 2212 * Given a disjoint set of revs, return the subset with the longest
2235 2213 * path to the root.
2236 2214 */
2237 2215 static PyObject *find_deepest(indexObject *self, PyObject *revs)
2238 2216 {
2239 2217 const Py_ssize_t revcount = PyList_GET_SIZE(revs);
2240 2218 static const Py_ssize_t capacity = 24;
2241 2219 int *depth, *interesting = NULL;
2242 2220 int i, j, v, ninteresting;
2243 2221 PyObject *dict = NULL, *keys = NULL;
2244 2222 long *seen = NULL;
2245 2223 int maxrev = -1;
2246 2224 long final;
2247 2225
2248 2226 if (revcount > capacity) {
2249 2227 PyErr_Format(PyExc_OverflowError,
2250 2228 "bitset size (%ld) > capacity (%ld)",
2251 2229 (long)revcount, (long)capacity);
2252 2230 return NULL;
2253 2231 }
2254 2232
2255 2233 for (i = 0; i < revcount; i++) {
2256 2234 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
2257 2235 if (n > maxrev)
2258 2236 maxrev = n;
2259 2237 }
2260 2238
2261 2239 depth = calloc(sizeof(*depth), maxrev + 1);
2262 2240 if (depth == NULL)
2263 2241 return PyErr_NoMemory();
2264 2242
2265 2243 seen = calloc(sizeof(*seen), maxrev + 1);
2266 2244 if (seen == NULL) {
2267 2245 PyErr_NoMemory();
2268 2246 goto bail;
2269 2247 }
2270 2248
2271 2249 interesting = calloc(sizeof(*interesting), ((size_t)1) << revcount);
2272 2250 if (interesting == NULL) {
2273 2251 PyErr_NoMemory();
2274 2252 goto bail;
2275 2253 }
2276 2254
2277 2255 if (PyList_Sort(revs) == -1)
2278 2256 goto bail;
2279 2257
2280 2258 for (i = 0; i < revcount; i++) {
2281 2259 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
2282 2260 long b = 1l << i;
2283 2261 depth[n] = 1;
2284 2262 seen[n] = b;
2285 2263 interesting[b] = 1;
2286 2264 }
2287 2265
2288 2266 /* invariant: ninteresting is the number of non-zero entries in
2289 2267 * interesting. */
2290 2268 ninteresting = (int)revcount;
2291 2269
2292 2270 for (v = maxrev; v >= 0 && ninteresting > 1; v--) {
2293 2271 int dv = depth[v];
2294 2272 int parents[2];
2295 2273 long sv;
2296 2274
2297 2275 if (dv == 0)
2298 2276 continue;
2299 2277
2300 2278 sv = seen[v];
2301 2279 if (index_get_parents(self, v, parents, maxrev) < 0)
2302 2280 goto bail;
2303 2281
2304 2282 for (i = 0; i < 2; i++) {
2305 2283 int p = parents[i];
2306 2284 long sp;
2307 2285 int dp;
2308 2286
2309 2287 if (p == -1)
2310 2288 continue;
2311 2289
2312 2290 dp = depth[p];
2313 2291 sp = seen[p];
2314 2292 if (dp <= dv) {
2315 2293 depth[p] = dv + 1;
2316 2294 if (sp != sv) {
2317 2295 interesting[sv] += 1;
2318 2296 seen[p] = sv;
2319 2297 if (sp) {
2320 2298 interesting[sp] -= 1;
2321 2299 if (interesting[sp] == 0)
2322 2300 ninteresting -= 1;
2323 2301 }
2324 2302 }
2325 2303 } else if (dv == dp - 1) {
2326 2304 long nsp = sp | sv;
2327 2305 if (nsp == sp)
2328 2306 continue;
2329 2307 seen[p] = nsp;
2330 2308 interesting[sp] -= 1;
2331 2309 if (interesting[sp] == 0)
2332 2310 ninteresting -= 1;
2333 2311 if (interesting[nsp] == 0)
2334 2312 ninteresting += 1;
2335 2313 interesting[nsp] += 1;
2336 2314 }
2337 2315 }
2338 2316 interesting[sv] -= 1;
2339 2317 if (interesting[sv] == 0)
2340 2318 ninteresting -= 1;
2341 2319 }
2342 2320
2343 2321 final = 0;
2344 2322 j = ninteresting;
2345 2323 for (i = 0; i < (int)(2 << revcount) && j > 0; i++) {
2346 2324 if (interesting[i] == 0)
2347 2325 continue;
2348 2326 final |= i;
2349 2327 j -= 1;
2350 2328 }
2351 2329 if (final == 0) {
2352 2330 keys = PyList_New(0);
2353 2331 goto bail;
2354 2332 }
2355 2333
2356 2334 dict = PyDict_New();
2357 2335 if (dict == NULL)
2358 2336 goto bail;
2359 2337
2360 2338 for (i = 0; i < revcount; i++) {
2361 2339 PyObject *key;
2362 2340
2363 2341 if ((final & (1 << i)) == 0)
2364 2342 continue;
2365 2343
2366 2344 key = PyList_GET_ITEM(revs, i);
2367 2345 Py_INCREF(key);
2368 2346 Py_INCREF(Py_None);
2369 2347 if (PyDict_SetItem(dict, key, Py_None) == -1) {
2370 2348 Py_DECREF(key);
2371 2349 Py_DECREF(Py_None);
2372 2350 goto bail;
2373 2351 }
2374 2352 }
2375 2353
2376 2354 keys = PyDict_Keys(dict);
2377 2355
2378 2356 bail:
2379 2357 free(depth);
2380 2358 free(seen);
2381 2359 free(interesting);
2382 2360 Py_XDECREF(dict);
2383 2361
2384 2362 return keys;
2385 2363 }
2386 2364
2387 2365 /*
2388 2366 * Given a (possibly overlapping) set of revs, return all the
2389 2367 * common ancestors heads: heads(::args[0] and ::a[1] and ...)
2390 2368 */
2391 2369 static PyObject *index_commonancestorsheads(indexObject *self, PyObject *args)
2392 2370 {
2393 2371 PyObject *ret = NULL;
2394 2372 Py_ssize_t argcount, i, len;
2395 2373 bitmask repeat = 0;
2396 2374 int revcount = 0;
2397 2375 int *revs;
2398 2376
2399 2377 argcount = PySequence_Length(args);
2400 2378 revs = PyMem_Malloc(argcount * sizeof(*revs));
2401 2379 if (argcount > 0 && revs == NULL)
2402 2380 return PyErr_NoMemory();
2403 2381 len = index_length(self);
2404 2382
2405 2383 for (i = 0; i < argcount; i++) {
2406 2384 static const int capacity = 24;
2407 2385 PyObject *obj = PySequence_GetItem(args, i);
2408 2386 bitmask x;
2409 2387 long val;
2410 2388
2411 2389 if (!PyInt_Check(obj)) {
2412 2390 PyErr_SetString(PyExc_TypeError,
2413 2391 "arguments must all be ints");
2414 2392 Py_DECREF(obj);
2415 2393 goto bail;
2416 2394 }
2417 2395 val = PyInt_AsLong(obj);
2418 2396 Py_DECREF(obj);
2419 2397 if (val == -1) {
2420 2398 ret = PyList_New(0);
2421 2399 goto done;
2422 2400 }
2423 2401 if (val < 0 || val >= len) {
2424 2402 PyErr_SetString(PyExc_IndexError, "index out of range");
2425 2403 goto bail;
2426 2404 }
2427 2405 /* this cheesy bloom filter lets us avoid some more
2428 2406 * expensive duplicate checks in the common set-is-disjoint
2429 2407 * case */
2430 2408 x = 1ull << (val & 0x3f);
2431 2409 if (repeat & x) {
2432 2410 int k;
2433 2411 for (k = 0; k < revcount; k++) {
2434 2412 if (val == revs[k])
2435 2413 goto duplicate;
2436 2414 }
2437 2415 } else
2438 2416 repeat |= x;
2439 2417 if (revcount >= capacity) {
2440 2418 PyErr_Format(PyExc_OverflowError,
2441 2419 "bitset size (%d) > capacity (%d)",
2442 2420 revcount, capacity);
2443 2421 goto bail;
2444 2422 }
2445 2423 revs[revcount++] = (int)val;
2446 2424 duplicate:;
2447 2425 }
2448 2426
2449 2427 if (revcount == 0) {
2450 2428 ret = PyList_New(0);
2451 2429 goto done;
2452 2430 }
2453 2431 if (revcount == 1) {
2454 2432 PyObject *obj;
2455 2433 ret = PyList_New(1);
2456 2434 if (ret == NULL)
2457 2435 goto bail;
2458 2436 obj = PyInt_FromLong(revs[0]);
2459 2437 if (obj == NULL)
2460 2438 goto bail;
2461 2439 PyList_SET_ITEM(ret, 0, obj);
2462 2440 goto done;
2463 2441 }
2464 2442
2465 2443 ret = find_gca_candidates(self, revs, revcount);
2466 2444 if (ret == NULL)
2467 2445 goto bail;
2468 2446
2469 2447 done:
2470 2448 PyMem_Free(revs);
2471 2449 return ret;
2472 2450
2473 2451 bail:
2474 2452 PyMem_Free(revs);
2475 2453 Py_XDECREF(ret);
2476 2454 return NULL;
2477 2455 }
2478 2456
2479 2457 /*
2480 2458 * Given a (possibly overlapping) set of revs, return the greatest
2481 2459 * common ancestors: those with the longest path to the root.
2482 2460 */
2483 2461 static PyObject *index_ancestors(indexObject *self, PyObject *args)
2484 2462 {
2485 2463 PyObject *ret;
2486 2464 PyObject *gca = index_commonancestorsheads(self, args);
2487 2465 if (gca == NULL)
2488 2466 return NULL;
2489 2467
2490 2468 if (PyList_GET_SIZE(gca) <= 1) {
2491 2469 return gca;
2492 2470 }
2493 2471
2494 2472 ret = find_deepest(self, gca);
2495 2473 Py_DECREF(gca);
2496 2474 return ret;
2497 2475 }
2498 2476
2499 2477 /*
2500 2478 * Invalidate any trie entries introduced by added revs.
2501 2479 */
2502 2480 static void index_invalidate_added(indexObject *self, Py_ssize_t start)
2503 2481 {
2504 2482 Py_ssize_t i, len = PyList_GET_SIZE(self->added);
2505 2483
2506 2484 for (i = start; i < len; i++) {
2507 2485 PyObject *tuple = PyList_GET_ITEM(self->added, i);
2508 2486 PyObject *node = PyTuple_GET_ITEM(tuple, 7);
2509 2487
2510 2488 nt_delete_node(&self->nt, PyBytes_AS_STRING(node));
2511 2489 }
2512 2490
2513 2491 if (start == 0)
2514 2492 Py_CLEAR(self->added);
2515 2493 }
2516 2494
2517 2495 /*
2518 2496 * Delete a numeric range of revs, which must be at the end of the
2519 2497 * range.
2520 2498 */
2521 2499 static int index_slice_del(indexObject *self, PyObject *item)
2522 2500 {
2523 2501 Py_ssize_t start, stop, step, slicelength;
2524 2502 Py_ssize_t length = index_length(self) + 1;
2525 2503 int ret = 0;
2526 2504
2527 2505 /* Argument changed from PySliceObject* to PyObject* in Python 3. */
2528 2506 #ifdef IS_PY3K
2529 2507 if (PySlice_GetIndicesEx(item, length, &start, &stop, &step,
2530 2508 &slicelength) < 0)
2531 2509 #else
2532 2510 if (PySlice_GetIndicesEx((PySliceObject *)item, length, &start, &stop,
2533 2511 &step, &slicelength) < 0)
2534 2512 #endif
2535 2513 return -1;
2536 2514
2537 2515 if (slicelength <= 0)
2538 2516 return 0;
2539 2517
2540 2518 if ((step < 0 && start < stop) || (step > 0 && start > stop))
2541 2519 stop = start;
2542 2520
2543 2521 if (step < 0) {
2544 2522 stop = start + 1;
2545 2523 start = stop + step * (slicelength - 1) - 1;
2546 2524 step = -step;
2547 2525 }
2548 2526
2549 2527 if (step != 1) {
2550 2528 PyErr_SetString(PyExc_ValueError,
2551 2529 "revlog index delete requires step size of 1");
2552 2530 return -1;
2553 2531 }
2554 2532
2555 2533 if (stop != length - 1) {
2556 2534 PyErr_SetString(PyExc_IndexError,
2557 2535 "revlog index deletion indices are invalid");
2558 2536 return -1;
2559 2537 }
2560 2538
2561 2539 if (start < self->length) {
2562 2540 if (self->ntinitialized) {
2563 2541 Py_ssize_t i;
2564 2542
2565 2543 for (i = start; i < self->length; i++) {
2566 2544 const char *node = index_node_existing(self, i);
2567 2545 if (node == NULL)
2568 2546 return -1;
2569 2547
2570 2548 nt_delete_node(&self->nt, node);
2571 2549 }
2572 2550 if (self->added)
2573 2551 index_invalidate_added(self, 0);
2574 2552 if (self->ntrev > start)
2575 2553 self->ntrev = (int)start;
2576 2554 } else if (self->added) {
2577 2555 Py_CLEAR(self->added);
2578 2556 }
2579 2557
2580 2558 self->length = start;
2581 if (start < self->raw_length) {
2582 if (self->cache) {
2583 Py_ssize_t i;
2584 for (i = start; i < self->raw_length; i++)
2585 Py_CLEAR(self->cache[i]);
2586 }
2559 if (start < self->raw_length)
2587 2560 self->raw_length = start;
2588 }
2589 2561 goto done;
2590 2562 }
2591 2563
2592 2564 if (self->ntinitialized) {
2593 2565 index_invalidate_added(self, start - self->length);
2594 2566 if (self->ntrev > start)
2595 2567 self->ntrev = (int)start;
2596 2568 }
2597 2569 if (self->added)
2598 2570 ret = PyList_SetSlice(self->added, start - self->length,
2599 2571 PyList_GET_SIZE(self->added), NULL);
2600 2572 done:
2601 2573 Py_CLEAR(self->headrevs);
2602 2574 return ret;
2603 2575 }
2604 2576
2605 2577 /*
2606 2578 * Supported ops:
2607 2579 *
2608 2580 * slice deletion
2609 2581 * string assignment (extend node->rev mapping)
2610 2582 * string deletion (shrink node->rev mapping)
2611 2583 */
2612 2584 static int index_assign_subscript(indexObject *self, PyObject *item,
2613 2585 PyObject *value)
2614 2586 {
2615 2587 char *node;
2616 2588 long rev;
2617 2589
2618 2590 if (PySlice_Check(item) && value == NULL)
2619 2591 return index_slice_del(self, item);
2620 2592
2621 2593 if (node_check(item, &node) == -1)
2622 2594 return -1;
2623 2595
2624 2596 if (value == NULL)
2625 2597 return self->ntinitialized ? nt_delete_node(&self->nt, node)
2626 2598 : 0;
2627 2599 rev = PyInt_AsLong(value);
2628 2600 if (rev > INT_MAX || rev < 0) {
2629 2601 if (!PyErr_Occurred())
2630 2602 PyErr_SetString(PyExc_ValueError, "rev out of range");
2631 2603 return -1;
2632 2604 }
2633 2605
2634 2606 if (index_init_nt(self) == -1)
2635 2607 return -1;
2636 2608 return nt_insert(&self->nt, node, (int)rev);
2637 2609 }
2638 2610
2639 2611 /*
2640 2612 * Find all RevlogNG entries in an index that has inline data. Update
2641 2613 * the optional "offsets" table with those entries.
2642 2614 */
2643 2615 static Py_ssize_t inline_scan(indexObject *self, const char **offsets)
2644 2616 {
2645 2617 const char *data = (const char *)self->buf.buf;
2646 2618 Py_ssize_t pos = 0;
2647 2619 Py_ssize_t end = self->buf.len;
2648 2620 long incr = v1_hdrsize;
2649 2621 Py_ssize_t len = 0;
2650 2622
2651 2623 while (pos + v1_hdrsize <= end && pos >= 0) {
2652 2624 uint32_t comp_len;
2653 2625 /* 3rd element of header is length of compressed inline data */
2654 2626 comp_len = getbe32(data + pos + 8);
2655 2627 incr = v1_hdrsize + comp_len;
2656 2628 if (offsets)
2657 2629 offsets[len] = data + pos;
2658 2630 len++;
2659 2631 pos += incr;
2660 2632 }
2661 2633
2662 2634 if (pos != end) {
2663 2635 if (!PyErr_Occurred())
2664 2636 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2665 2637 return -1;
2666 2638 }
2667 2639
2668 2640 return len;
2669 2641 }
2670 2642
2671 2643 static int index_init(indexObject *self, PyObject *args)
2672 2644 {
2673 2645 PyObject *data_obj, *inlined_obj;
2674 2646 Py_ssize_t size;
2675 2647
2676 2648 /* Initialize before argument-checking to avoid index_dealloc() crash.
2677 2649 */
2678 2650 self->raw_length = 0;
2679 2651 self->added = NULL;
2680 self->cache = NULL;
2681 2652 self->data = NULL;
2682 2653 memset(&self->buf, 0, sizeof(self->buf));
2683 2654 self->headrevs = NULL;
2684 2655 self->filteredrevs = Py_None;
2685 2656 Py_INCREF(Py_None);
2686 2657 self->ntinitialized = 0;
2687 2658 self->offsets = NULL;
2688 2659
2689 2660 if (!PyArg_ParseTuple(args, "OO", &data_obj, &inlined_obj))
2690 2661 return -1;
2691 2662 if (!PyObject_CheckBuffer(data_obj)) {
2692 2663 PyErr_SetString(PyExc_TypeError,
2693 2664 "data does not support buffer interface");
2694 2665 return -1;
2695 2666 }
2696 2667
2697 2668 if (PyObject_GetBuffer(data_obj, &self->buf, PyBUF_SIMPLE) == -1)
2698 2669 return -1;
2699 2670 size = self->buf.len;
2700 2671
2701 2672 self->inlined = inlined_obj && PyObject_IsTrue(inlined_obj);
2702 2673 self->data = data_obj;
2703 2674
2704 2675 self->ntlookups = self->ntmisses = 0;
2705 2676 self->ntrev = -1;
2706 2677 Py_INCREF(self->data);
2707 2678
2708 2679 if (self->inlined) {
2709 2680 Py_ssize_t len = inline_scan(self, NULL);
2710 2681 if (len == -1)
2711 2682 goto bail;
2712 2683 self->raw_length = len;
2713 2684 self->length = len;
2714 2685 } else {
2715 2686 if (size % v1_hdrsize) {
2716 2687 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2717 2688 goto bail;
2718 2689 }
2719 2690 self->raw_length = size / v1_hdrsize;
2720 2691 self->length = self->raw_length;
2721 2692 }
2722 2693
2723 2694 return 0;
2724 2695 bail:
2725 2696 return -1;
2726 2697 }
2727 2698
2728 2699 static PyObject *index_nodemap(indexObject *self)
2729 2700 {
2730 2701 Py_INCREF(self);
2731 2702 return (PyObject *)self;
2732 2703 }
2733 2704
2734 2705 static void _index_clearcaches(indexObject *self)
2735 2706 {
2736 if (self->cache) {
2737 Py_ssize_t i;
2738
2739 for (i = 0; i < self->raw_length; i++)
2740 Py_CLEAR(self->cache[i]);
2741 free(self->cache);
2742 self->cache = NULL;
2743 }
2744 2707 if (self->offsets) {
2745 2708 PyMem_Free((void *)self->offsets);
2746 2709 self->offsets = NULL;
2747 2710 }
2748 2711 if (self->ntinitialized) {
2749 2712 nt_dealloc(&self->nt);
2750 2713 }
2751 2714 self->ntinitialized = 0;
2752 2715 Py_CLEAR(self->headrevs);
2753 2716 }
2754 2717
2755 2718 static PyObject *index_clearcaches(indexObject *self)
2756 2719 {
2757 2720 _index_clearcaches(self);
2758 2721 self->ntrev = -1;
2759 2722 self->ntlookups = self->ntmisses = 0;
2760 2723 Py_RETURN_NONE;
2761 2724 }
2762 2725
2763 2726 static void index_dealloc(indexObject *self)
2764 2727 {
2765 2728 _index_clearcaches(self);
2766 2729 Py_XDECREF(self->filteredrevs);
2767 2730 if (self->buf.buf) {
2768 2731 PyBuffer_Release(&self->buf);
2769 2732 memset(&self->buf, 0, sizeof(self->buf));
2770 2733 }
2771 2734 Py_XDECREF(self->data);
2772 2735 Py_XDECREF(self->added);
2773 2736 PyObject_Del(self);
2774 2737 }
2775 2738
2776 2739 static PySequenceMethods index_sequence_methods = {
2777 2740 (lenfunc)index_length, /* sq_length */
2778 2741 0, /* sq_concat */
2779 2742 0, /* sq_repeat */
2780 2743 (ssizeargfunc)index_get, /* sq_item */
2781 2744 0, /* sq_slice */
2782 2745 0, /* sq_ass_item */
2783 2746 0, /* sq_ass_slice */
2784 2747 (objobjproc)index_contains, /* sq_contains */
2785 2748 };
2786 2749
2787 2750 static PyMappingMethods index_mapping_methods = {
2788 2751 (lenfunc)index_length, /* mp_length */
2789 2752 (binaryfunc)index_getitem, /* mp_subscript */
2790 2753 (objobjargproc)index_assign_subscript, /* mp_ass_subscript */
2791 2754 };
2792 2755
2793 2756 static PyMethodDef index_methods[] = {
2794 2757 {"ancestors", (PyCFunction)index_ancestors, METH_VARARGS,
2795 2758 "return the gca set of the given revs"},
2796 2759 {"commonancestorsheads", (PyCFunction)index_commonancestorsheads,
2797 2760 METH_VARARGS,
2798 2761 "return the heads of the common ancestors of the given revs"},
2799 2762 {"clearcaches", (PyCFunction)index_clearcaches, METH_NOARGS,
2800 2763 "clear the index caches"},
2801 2764 {"get", (PyCFunction)index_m_get, METH_VARARGS, "get an index entry"},
2802 2765 {"get_rev", (PyCFunction)index_m_get, METH_VARARGS,
2803 2766 "return `rev` associated with a node or None"},
2804 2767 {"has_node", (PyCFunction)index_m_has_node, METH_O,
2805 2768 "return True if the node exist in the index"},
2806 2769 {"rev", (PyCFunction)index_m_rev, METH_O,
2807 2770 "return `rev` associated with a node or raise RevlogError"},
2808 2771 {"computephasesmapsets", (PyCFunction)compute_phases_map_sets, METH_VARARGS,
2809 2772 "compute phases"},
2810 2773 {"reachableroots2", (PyCFunction)reachableroots2, METH_VARARGS,
2811 2774 "reachableroots"},
2812 2775 {"headrevs", (PyCFunction)index_headrevs, METH_VARARGS,
2813 2776 "get head revisions"}, /* Can do filtering since 3.2 */
2814 2777 {"headrevsfiltered", (PyCFunction)index_headrevs, METH_VARARGS,
2815 2778 "get filtered head revisions"}, /* Can always do filtering */
2816 2779 {"issnapshot", (PyCFunction)index_issnapshot, METH_O,
2817 2780 "True if the object is a snapshot"},
2818 2781 {"findsnapshots", (PyCFunction)index_findsnapshots, METH_VARARGS,
2819 2782 "Gather snapshot data in a cache dict"},
2820 2783 {"deltachain", (PyCFunction)index_deltachain, METH_VARARGS,
2821 2784 "determine revisions with deltas to reconstruct fulltext"},
2822 2785 {"slicechunktodensity", (PyCFunction)index_slicechunktodensity,
2823 2786 METH_VARARGS, "determine revisions with deltas to reconstruct fulltext"},
2824 2787 {"append", (PyCFunction)index_append, METH_O, "append an index entry"},
2825 2788 {"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS,
2826 2789 "match a potentially ambiguous node ID"},
2827 2790 {"shortest", (PyCFunction)index_shortest, METH_VARARGS,
2828 2791 "find length of shortest hex nodeid of a binary ID"},
2829 2792 {"stats", (PyCFunction)index_stats, METH_NOARGS, "stats for the index"},
2830 2793 {NULL} /* Sentinel */
2831 2794 };
2832 2795
2833 2796 static PyGetSetDef index_getset[] = {
2834 2797 {"nodemap", (getter)index_nodemap, NULL, "nodemap", NULL},
2835 2798 {NULL} /* Sentinel */
2836 2799 };
2837 2800
2838 2801 PyTypeObject HgRevlogIndex_Type = {
2839 2802 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2840 2803 "parsers.index", /* tp_name */
2841 2804 sizeof(indexObject), /* tp_basicsize */
2842 2805 0, /* tp_itemsize */
2843 2806 (destructor)index_dealloc, /* tp_dealloc */
2844 2807 0, /* tp_print */
2845 2808 0, /* tp_getattr */
2846 2809 0, /* tp_setattr */
2847 2810 0, /* tp_compare */
2848 2811 0, /* tp_repr */
2849 2812 0, /* tp_as_number */
2850 2813 &index_sequence_methods, /* tp_as_sequence */
2851 2814 &index_mapping_methods, /* tp_as_mapping */
2852 2815 0, /* tp_hash */
2853 2816 0, /* tp_call */
2854 2817 0, /* tp_str */
2855 2818 0, /* tp_getattro */
2856 2819 0, /* tp_setattro */
2857 2820 0, /* tp_as_buffer */
2858 2821 Py_TPFLAGS_DEFAULT, /* tp_flags */
2859 2822 "revlog index", /* tp_doc */
2860 2823 0, /* tp_traverse */
2861 2824 0, /* tp_clear */
2862 2825 0, /* tp_richcompare */
2863 2826 0, /* tp_weaklistoffset */
2864 2827 0, /* tp_iter */
2865 2828 0, /* tp_iternext */
2866 2829 index_methods, /* tp_methods */
2867 2830 0, /* tp_members */
2868 2831 index_getset, /* tp_getset */
2869 2832 0, /* tp_base */
2870 2833 0, /* tp_dict */
2871 2834 0, /* tp_descr_get */
2872 2835 0, /* tp_descr_set */
2873 2836 0, /* tp_dictoffset */
2874 2837 (initproc)index_init, /* tp_init */
2875 2838 0, /* tp_alloc */
2876 2839 };
2877 2840
2878 2841 /*
2879 2842 * returns a tuple of the form (index, index, cache) with elements as
2880 2843 * follows:
2881 2844 *
2882 2845 * index: an index object that lazily parses RevlogNG records
2883 2846 * cache: if data is inlined, a tuple (0, index_file_content), else None
2884 2847 * index_file_content could be a string, or a buffer
2885 2848 *
2886 2849 * added complications are for backwards compatibility
2887 2850 */
2888 2851 PyObject *parse_index2(PyObject *self, PyObject *args)
2889 2852 {
2890 2853 PyObject *cache = NULL;
2891 2854 indexObject *idx;
2892 2855 int ret;
2893 2856
2894 2857 idx = PyObject_New(indexObject, &HgRevlogIndex_Type);
2895 2858 if (idx == NULL)
2896 2859 goto bail;
2897 2860
2898 2861 ret = index_init(idx, args);
2899 2862 if (ret == -1)
2900 2863 goto bail;
2901 2864
2902 2865 if (idx->inlined) {
2903 2866 cache = Py_BuildValue("iO", 0, idx->data);
2904 2867 if (cache == NULL)
2905 2868 goto bail;
2906 2869 } else {
2907 2870 cache = Py_None;
2908 2871 Py_INCREF(cache);
2909 2872 }
2910 2873
2911 2874 return Py_BuildValue("NN", idx, cache);
2912 2875
2913 2876 bail:
2914 2877 Py_XDECREF(idx);
2915 2878 Py_XDECREF(cache);
2916 2879 return NULL;
2917 2880 }
2918 2881
2919 2882 static Revlog_CAPI CAPI = {
2920 2883 /* increment the abi_version field upon each change in the Revlog_CAPI
2921 2884 struct or in the ABI of the listed functions */
2922 2885 2,
2923 2886 index_length,
2924 2887 index_node,
2925 2888 HgRevlogIndex_GetParents,
2926 2889 };
2927 2890
2928 2891 void revlog_module_init(PyObject *mod)
2929 2892 {
2930 2893 PyObject *caps = NULL;
2931 2894 HgRevlogIndex_Type.tp_new = PyType_GenericNew;
2932 2895 if (PyType_Ready(&HgRevlogIndex_Type) < 0)
2933 2896 return;
2934 2897 Py_INCREF(&HgRevlogIndex_Type);
2935 2898 PyModule_AddObject(mod, "index", (PyObject *)&HgRevlogIndex_Type);
2936 2899
2937 2900 nodetreeType.tp_new = PyType_GenericNew;
2938 2901 if (PyType_Ready(&nodetreeType) < 0)
2939 2902 return;
2940 2903 Py_INCREF(&nodetreeType);
2941 2904 PyModule_AddObject(mod, "nodetree", (PyObject *)&nodetreeType);
2942 2905
2943 2906 if (!nullentry) {
2944 2907 nullentry =
2945 2908 Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0, 0, -1,
2946 2909 -1, -1, -1, nullid, (Py_ssize_t)20);
2947 2910 }
2948 2911 if (nullentry)
2949 2912 PyObject_GC_UnTrack(nullentry);
2950 2913
2951 2914 caps = PyCapsule_New(&CAPI, "mercurial.cext.parsers.revlog_CAPI", NULL);
2952 2915 if (caps != NULL)
2953 2916 PyModule_AddObject(mod, "revlog_CAPI", caps);
2954 2917 }
General Comments 0
You need to be logged in to leave comments. Login now