##// END OF EJS Templates
revlog: address review feedback for deltachain C implementation...
Gregory Szorc -
r33171:f4f52bb3 default
parent child Browse files
Show More
@@ -1,2077 +1,2085
1 1 /*
2 2 parsers.c - efficient content parsing
3 3
4 4 Copyright 2008 Matt Mackall <mpm@selenic.com> and others
5 5
6 6 This software may be used and distributed according to the terms of
7 7 the GNU General Public License, incorporated herein by reference.
8 8 */
9 9
10 10 #include <Python.h>
11 #include <assert.h>
11 12 #include <ctype.h>
12 13 #include <stddef.h>
13 14 #include <string.h>
14 15
15 16 #include "util.h"
16 17 #include "bitmanipulation.h"
17 18
18 19 #ifdef IS_PY3K
19 20 /* The mapping of Python types is meant to be temporary to get Python
20 21 * 3 to compile. We should remove this once Python 3 support is fully
21 22 * supported and proper types are used in the extensions themselves. */
22 23 #define PyInt_Check PyLong_Check
23 24 #define PyInt_FromLong PyLong_FromLong
24 25 #define PyInt_FromSsize_t PyLong_FromSsize_t
25 26 #define PyInt_AS_LONG PyLong_AS_LONG
26 27 #define PyInt_AsLong PyLong_AsLong
27 28 #endif
28 29
29 30 /*
30 31 * A base-16 trie for fast node->rev mapping.
31 32 *
32 33 * Positive value is index of the next node in the trie
33 34 * Negative value is a leaf: -(rev + 1)
34 35 * Zero is empty
35 36 */
36 37 typedef struct {
37 38 int children[16];
38 39 } nodetree;
39 40
40 41 /*
41 42 * This class has two behaviors.
42 43 *
43 44 * When used in a list-like way (with integer keys), we decode an
44 45 * entry in a RevlogNG index file on demand. Our last entry is a
45 46 * sentinel, always a nullid. We have limited support for
46 47 * integer-keyed insert and delete, only at elements right before the
47 48 * sentinel.
48 49 *
49 50 * With string keys, we lazily perform a reverse mapping from node to
50 51 * rev, using a base-16 trie.
51 52 */
52 53 typedef struct {
53 54 PyObject_HEAD
54 55 /* Type-specific fields go here. */
55 56 PyObject *data; /* raw bytes of index */
56 57 Py_buffer buf; /* buffer of data */
57 58 PyObject **cache; /* cached tuples */
58 59 const char **offsets; /* populated on demand */
59 60 Py_ssize_t raw_length; /* original number of elements */
60 61 Py_ssize_t length; /* current number of elements */
61 62 PyObject *added; /* populated on demand */
62 63 PyObject *headrevs; /* cache, invalidated on changes */
63 64 PyObject *filteredrevs;/* filtered revs set */
64 65 nodetree *nt; /* base-16 trie */
65 66 unsigned ntlength; /* # nodes in use */
66 67 unsigned ntcapacity; /* # nodes allocated */
67 68 int ntdepth; /* maximum depth of tree */
68 69 int ntsplits; /* # splits performed */
69 70 int ntrev; /* last rev scanned */
70 71 int ntlookups; /* # lookups */
71 72 int ntmisses; /* # lookups that miss the cache */
72 73 int inlined;
73 74 } indexObject;
74 75
75 76 static Py_ssize_t index_length(const indexObject *self)
76 77 {
77 78 if (self->added == NULL)
78 79 return self->length;
79 80 return self->length + PyList_GET_SIZE(self->added);
80 81 }
81 82
82 83 static PyObject *nullentry;
83 84 static const char nullid[20];
84 85
85 86 static Py_ssize_t inline_scan(indexObject *self, const char **offsets);
86 87
87 88 #if LONG_MAX == 0x7fffffffL
88 89 static char *tuple_format = "Kiiiiiis#";
89 90 #else
90 91 static char *tuple_format = "kiiiiiis#";
91 92 #endif
92 93
93 94 /* A RevlogNG v1 index entry is 64 bytes long. */
94 95 static const long v1_hdrsize = 64;
95 96
96 97 /*
97 98 * Return a pointer to the beginning of a RevlogNG record.
98 99 */
99 100 static const char *index_deref(indexObject *self, Py_ssize_t pos)
100 101 {
101 102 if (self->inlined && pos > 0) {
102 103 if (self->offsets == NULL) {
103 104 self->offsets = PyMem_Malloc(self->raw_length *
104 105 sizeof(*self->offsets));
105 106 if (self->offsets == NULL)
106 107 return (const char *)PyErr_NoMemory();
107 108 inline_scan(self, self->offsets);
108 109 }
109 110 return self->offsets[pos];
110 111 }
111 112
112 113 return (const char *)(self->buf.buf) + pos * v1_hdrsize;
113 114 }
114 115
115 116 static inline int index_get_parents(indexObject *self, Py_ssize_t rev,
116 117 int *ps, int maxrev)
117 118 {
118 119 if (rev >= self->length - 1) {
119 120 PyObject *tuple = PyList_GET_ITEM(self->added,
120 121 rev - self->length + 1);
121 122 ps[0] = (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 5));
122 123 ps[1] = (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 6));
123 124 } else {
124 125 const char *data = index_deref(self, rev);
125 126 ps[0] = getbe32(data + 24);
126 127 ps[1] = getbe32(data + 28);
127 128 }
128 129 /* If index file is corrupted, ps[] may point to invalid revisions. So
129 130 * there is a risk of buffer overflow to trust them unconditionally. */
130 131 if (ps[0] > maxrev || ps[1] > maxrev) {
131 132 PyErr_SetString(PyExc_ValueError, "parent out of range");
132 133 return -1;
133 134 }
134 135 return 0;
135 136 }
136 137
137 138
138 139 /*
139 140 * RevlogNG format (all in big endian, data may be inlined):
140 141 * 6 bytes: offset
141 142 * 2 bytes: flags
142 143 * 4 bytes: compressed length
143 144 * 4 bytes: uncompressed length
144 145 * 4 bytes: base revision
145 146 * 4 bytes: link revision
146 147 * 4 bytes: parent 1 revision
147 148 * 4 bytes: parent 2 revision
148 149 * 32 bytes: nodeid (only 20 bytes used)
149 150 */
150 151 static PyObject *index_get(indexObject *self, Py_ssize_t pos)
151 152 {
152 153 uint64_t offset_flags;
153 154 int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
154 155 const char *c_node_id;
155 156 const char *data;
156 157 Py_ssize_t length = index_length(self);
157 158 PyObject *entry;
158 159
159 160 if (pos < 0)
160 161 pos += length;
161 162
162 163 if (pos < 0 || pos >= length) {
163 164 PyErr_SetString(PyExc_IndexError, "revlog index out of range");
164 165 return NULL;
165 166 }
166 167
167 168 if (pos == length - 1) {
168 169 Py_INCREF(nullentry);
169 170 return nullentry;
170 171 }
171 172
172 173 if (pos >= self->length - 1) {
173 174 PyObject *obj;
174 175 obj = PyList_GET_ITEM(self->added, pos - self->length + 1);
175 176 Py_INCREF(obj);
176 177 return obj;
177 178 }
178 179
179 180 if (self->cache) {
180 181 if (self->cache[pos]) {
181 182 Py_INCREF(self->cache[pos]);
182 183 return self->cache[pos];
183 184 }
184 185 } else {
185 186 self->cache = calloc(self->raw_length, sizeof(PyObject *));
186 187 if (self->cache == NULL)
187 188 return PyErr_NoMemory();
188 189 }
189 190
190 191 data = index_deref(self, pos);
191 192 if (data == NULL)
192 193 return NULL;
193 194
194 195 offset_flags = getbe32(data + 4);
195 196 if (pos == 0) /* mask out version number for the first entry */
196 197 offset_flags &= 0xFFFF;
197 198 else {
198 199 uint32_t offset_high = getbe32(data);
199 200 offset_flags |= ((uint64_t)offset_high) << 32;
200 201 }
201 202
202 203 comp_len = getbe32(data + 8);
203 204 uncomp_len = getbe32(data + 12);
204 205 base_rev = getbe32(data + 16);
205 206 link_rev = getbe32(data + 20);
206 207 parent_1 = getbe32(data + 24);
207 208 parent_2 = getbe32(data + 28);
208 209 c_node_id = data + 32;
209 210
210 211 entry = Py_BuildValue(tuple_format, offset_flags, comp_len,
211 212 uncomp_len, base_rev, link_rev,
212 213 parent_1, parent_2, c_node_id, 20);
213 214
214 215 if (entry) {
215 216 PyObject_GC_UnTrack(entry);
216 217 Py_INCREF(entry);
217 218 }
218 219
219 220 self->cache[pos] = entry;
220 221
221 222 return entry;
222 223 }
223 224
224 225 /*
225 226 * Return the 20-byte SHA of the node corresponding to the given rev.
226 227 */
227 228 static const char *index_node(indexObject *self, Py_ssize_t pos)
228 229 {
229 230 Py_ssize_t length = index_length(self);
230 231 const char *data;
231 232
232 233 if (pos == length - 1 || pos == INT_MAX)
233 234 return nullid;
234 235
235 236 if (pos >= length)
236 237 return NULL;
237 238
238 239 if (pos >= self->length - 1) {
239 240 PyObject *tuple, *str;
240 241 tuple = PyList_GET_ITEM(self->added, pos - self->length + 1);
241 242 str = PyTuple_GetItem(tuple, 7);
242 243 return str ? PyBytes_AS_STRING(str) : NULL;
243 244 }
244 245
245 246 data = index_deref(self, pos);
246 247 return data ? data + 32 : NULL;
247 248 }
248 249
249 250 static int nt_insert(indexObject *self, const char *node, int rev);
250 251
251 252 static int node_check(PyObject *obj, char **node, Py_ssize_t *nodelen)
252 253 {
253 254 if (PyBytes_AsStringAndSize(obj, node, nodelen) == -1)
254 255 return -1;
255 256 if (*nodelen == 20)
256 257 return 0;
257 258 PyErr_SetString(PyExc_ValueError, "20-byte hash required");
258 259 return -1;
259 260 }
260 261
261 262 static PyObject *index_insert(indexObject *self, PyObject *args)
262 263 {
263 264 PyObject *obj;
264 265 char *node;
265 266 int index;
266 267 Py_ssize_t len, nodelen;
267 268
268 269 if (!PyArg_ParseTuple(args, "iO", &index, &obj))
269 270 return NULL;
270 271
271 272 if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj) != 8) {
272 273 PyErr_SetString(PyExc_TypeError, "8-tuple required");
273 274 return NULL;
274 275 }
275 276
276 277 if (node_check(PyTuple_GET_ITEM(obj, 7), &node, &nodelen) == -1)
277 278 return NULL;
278 279
279 280 len = index_length(self);
280 281
281 282 if (index < 0)
282 283 index += len;
283 284
284 285 if (index != len - 1) {
285 286 PyErr_SetString(PyExc_IndexError,
286 287 "insert only supported at index -1");
287 288 return NULL;
288 289 }
289 290
290 291 if (self->added == NULL) {
291 292 self->added = PyList_New(0);
292 293 if (self->added == NULL)
293 294 return NULL;
294 295 }
295 296
296 297 if (PyList_Append(self->added, obj) == -1)
297 298 return NULL;
298 299
299 300 if (self->nt)
300 301 nt_insert(self, node, index);
301 302
302 303 Py_CLEAR(self->headrevs);
303 304 Py_RETURN_NONE;
304 305 }
305 306
306 307 static void _index_clearcaches(indexObject *self)
307 308 {
308 309 if (self->cache) {
309 310 Py_ssize_t i;
310 311
311 312 for (i = 0; i < self->raw_length; i++)
312 313 Py_CLEAR(self->cache[i]);
313 314 free(self->cache);
314 315 self->cache = NULL;
315 316 }
316 317 if (self->offsets) {
317 318 PyMem_Free(self->offsets);
318 319 self->offsets = NULL;
319 320 }
320 321 if (self->nt) {
321 322 free(self->nt);
322 323 self->nt = NULL;
323 324 }
324 325 Py_CLEAR(self->headrevs);
325 326 }
326 327
327 328 static PyObject *index_clearcaches(indexObject *self)
328 329 {
329 330 _index_clearcaches(self);
330 331 self->ntlength = self->ntcapacity = 0;
331 332 self->ntdepth = self->ntsplits = 0;
332 333 self->ntrev = -1;
333 334 self->ntlookups = self->ntmisses = 0;
334 335 Py_RETURN_NONE;
335 336 }
336 337
337 338 static PyObject *index_stats(indexObject *self)
338 339 {
339 340 PyObject *obj = PyDict_New();
340 341 PyObject *t = NULL;
341 342
342 343 if (obj == NULL)
343 344 return NULL;
344 345
345 346 #define istat(__n, __d) \
346 347 do { \
347 348 t = PyInt_FromSsize_t(self->__n); \
348 349 if (!t) \
349 350 goto bail; \
350 351 if (PyDict_SetItemString(obj, __d, t) == -1) \
351 352 goto bail; \
352 353 Py_DECREF(t); \
353 354 } while (0)
354 355
355 356 if (self->added) {
356 357 Py_ssize_t len = PyList_GET_SIZE(self->added);
357 358 t = PyInt_FromSsize_t(len);
358 359 if (!t)
359 360 goto bail;
360 361 if (PyDict_SetItemString(obj, "index entries added", t) == -1)
361 362 goto bail;
362 363 Py_DECREF(t);
363 364 }
364 365
365 366 if (self->raw_length != self->length - 1)
366 367 istat(raw_length, "revs on disk");
367 368 istat(length, "revs in memory");
368 369 istat(ntcapacity, "node trie capacity");
369 370 istat(ntdepth, "node trie depth");
370 371 istat(ntlength, "node trie count");
371 372 istat(ntlookups, "node trie lookups");
372 373 istat(ntmisses, "node trie misses");
373 374 istat(ntrev, "node trie last rev scanned");
374 375 istat(ntsplits, "node trie splits");
375 376
376 377 #undef istat
377 378
378 379 return obj;
379 380
380 381 bail:
381 382 Py_XDECREF(obj);
382 383 Py_XDECREF(t);
383 384 return NULL;
384 385 }
385 386
386 387 /*
387 388 * When we cache a list, we want to be sure the caller can't mutate
388 389 * the cached copy.
389 390 */
390 391 static PyObject *list_copy(PyObject *list)
391 392 {
392 393 Py_ssize_t len = PyList_GET_SIZE(list);
393 394 PyObject *newlist = PyList_New(len);
394 395 Py_ssize_t i;
395 396
396 397 if (newlist == NULL)
397 398 return NULL;
398 399
399 400 for (i = 0; i < len; i++) {
400 401 PyObject *obj = PyList_GET_ITEM(list, i);
401 402 Py_INCREF(obj);
402 403 PyList_SET_ITEM(newlist, i, obj);
403 404 }
404 405
405 406 return newlist;
406 407 }
407 408
408 409 static int check_filter(PyObject *filter, Py_ssize_t arg) {
409 410 if (filter) {
410 411 PyObject *arglist, *result;
411 412 int isfiltered;
412 413
413 414 arglist = Py_BuildValue("(n)", arg);
414 415 if (!arglist) {
415 416 return -1;
416 417 }
417 418
418 419 result = PyEval_CallObject(filter, arglist);
419 420 Py_DECREF(arglist);
420 421 if (!result) {
421 422 return -1;
422 423 }
423 424
424 425 /* PyObject_IsTrue returns 1 if true, 0 if false, -1 if error,
425 426 * same as this function, so we can just return it directly.*/
426 427 isfiltered = PyObject_IsTrue(result);
427 428 Py_DECREF(result);
428 429 return isfiltered;
429 430 } else {
430 431 return 0;
431 432 }
432 433 }
433 434
434 435 static Py_ssize_t add_roots_get_min(indexObject *self, PyObject *list,
435 436 Py_ssize_t marker, char *phases)
436 437 {
437 438 PyObject *iter = NULL;
438 439 PyObject *iter_item = NULL;
439 440 Py_ssize_t min_idx = index_length(self) + 1;
440 441 long iter_item_long;
441 442
442 443 if (PyList_GET_SIZE(list) != 0) {
443 444 iter = PyObject_GetIter(list);
444 445 if (iter == NULL)
445 446 return -2;
446 447 while ((iter_item = PyIter_Next(iter)))
447 448 {
448 449 iter_item_long = PyInt_AS_LONG(iter_item);
449 450 Py_DECREF(iter_item);
450 451 if (iter_item_long < min_idx)
451 452 min_idx = iter_item_long;
452 453 phases[iter_item_long] = marker;
453 454 }
454 455 Py_DECREF(iter);
455 456 }
456 457
457 458 return min_idx;
458 459 }
459 460
460 461 static inline void set_phase_from_parents(char *phases, int parent_1,
461 462 int parent_2, Py_ssize_t i)
462 463 {
463 464 if (parent_1 >= 0 && phases[parent_1] > phases[i])
464 465 phases[i] = phases[parent_1];
465 466 if (parent_2 >= 0 && phases[parent_2] > phases[i])
466 467 phases[i] = phases[parent_2];
467 468 }
468 469
469 470 static PyObject *reachableroots2(indexObject *self, PyObject *args)
470 471 {
471 472
472 473 /* Input */
473 474 long minroot;
474 475 PyObject *includepatharg = NULL;
475 476 int includepath = 0;
476 477 /* heads and roots are lists */
477 478 PyObject *heads = NULL;
478 479 PyObject *roots = NULL;
479 480 PyObject *reachable = NULL;
480 481
481 482 PyObject *val;
482 483 Py_ssize_t len = index_length(self) - 1;
483 484 long revnum;
484 485 Py_ssize_t k;
485 486 Py_ssize_t i;
486 487 Py_ssize_t l;
487 488 int r;
488 489 int parents[2];
489 490
490 491 /* Internal data structure:
491 492 * tovisit: array of length len+1 (all revs + nullrev), filled upto lentovisit
492 493 * revstates: array of length len+1 (all revs + nullrev) */
493 494 int *tovisit = NULL;
494 495 long lentovisit = 0;
495 496 enum { RS_SEEN = 1, RS_ROOT = 2, RS_REACHABLE = 4 };
496 497 char *revstates = NULL;
497 498
498 499 /* Get arguments */
499 500 if (!PyArg_ParseTuple(args, "lO!O!O!", &minroot, &PyList_Type, &heads,
500 501 &PyList_Type, &roots,
501 502 &PyBool_Type, &includepatharg))
502 503 goto bail;
503 504
504 505 if (includepatharg == Py_True)
505 506 includepath = 1;
506 507
507 508 /* Initialize return set */
508 509 reachable = PyList_New(0);
509 510 if (reachable == NULL)
510 511 goto bail;
511 512
512 513 /* Initialize internal datastructures */
513 514 tovisit = (int *)malloc((len + 1) * sizeof(int));
514 515 if (tovisit == NULL) {
515 516 PyErr_NoMemory();
516 517 goto bail;
517 518 }
518 519
519 520 revstates = (char *)calloc(len + 1, 1);
520 521 if (revstates == NULL) {
521 522 PyErr_NoMemory();
522 523 goto bail;
523 524 }
524 525
525 526 l = PyList_GET_SIZE(roots);
526 527 for (i = 0; i < l; i++) {
527 528 revnum = PyInt_AsLong(PyList_GET_ITEM(roots, i));
528 529 if (revnum == -1 && PyErr_Occurred())
529 530 goto bail;
530 531 /* If root is out of range, e.g. wdir(), it must be unreachable
531 532 * from heads. So we can just ignore it. */
532 533 if (revnum + 1 < 0 || revnum + 1 >= len + 1)
533 534 continue;
534 535 revstates[revnum + 1] |= RS_ROOT;
535 536 }
536 537
537 538 /* Populate tovisit with all the heads */
538 539 l = PyList_GET_SIZE(heads);
539 540 for (i = 0; i < l; i++) {
540 541 revnum = PyInt_AsLong(PyList_GET_ITEM(heads, i));
541 542 if (revnum == -1 && PyErr_Occurred())
542 543 goto bail;
543 544 if (revnum + 1 < 0 || revnum + 1 >= len + 1) {
544 545 PyErr_SetString(PyExc_IndexError, "head out of range");
545 546 goto bail;
546 547 }
547 548 if (!(revstates[revnum + 1] & RS_SEEN)) {
548 549 tovisit[lentovisit++] = (int)revnum;
549 550 revstates[revnum + 1] |= RS_SEEN;
550 551 }
551 552 }
552 553
553 554 /* Visit the tovisit list and find the reachable roots */
554 555 k = 0;
555 556 while (k < lentovisit) {
556 557 /* Add the node to reachable if it is a root*/
557 558 revnum = tovisit[k++];
558 559 if (revstates[revnum + 1] & RS_ROOT) {
559 560 revstates[revnum + 1] |= RS_REACHABLE;
560 561 val = PyInt_FromLong(revnum);
561 562 if (val == NULL)
562 563 goto bail;
563 564 r = PyList_Append(reachable, val);
564 565 Py_DECREF(val);
565 566 if (r < 0)
566 567 goto bail;
567 568 if (includepath == 0)
568 569 continue;
569 570 }
570 571
571 572 /* Add its parents to the list of nodes to visit */
572 573 if (revnum == -1)
573 574 continue;
574 575 r = index_get_parents(self, revnum, parents, (int)len - 1);
575 576 if (r < 0)
576 577 goto bail;
577 578 for (i = 0; i < 2; i++) {
578 579 if (!(revstates[parents[i] + 1] & RS_SEEN)
579 580 && parents[i] >= minroot) {
580 581 tovisit[lentovisit++] = parents[i];
581 582 revstates[parents[i] + 1] |= RS_SEEN;
582 583 }
583 584 }
584 585 }
585 586
586 587 /* Find all the nodes in between the roots we found and the heads
587 588 * and add them to the reachable set */
588 589 if (includepath == 1) {
589 590 long minidx = minroot;
590 591 if (minidx < 0)
591 592 minidx = 0;
592 593 for (i = minidx; i < len; i++) {
593 594 if (!(revstates[i + 1] & RS_SEEN))
594 595 continue;
595 596 r = index_get_parents(self, i, parents, (int)len - 1);
596 597 /* Corrupted index file, error is set from
597 598 * index_get_parents */
598 599 if (r < 0)
599 600 goto bail;
600 601 if (((revstates[parents[0] + 1] |
601 602 revstates[parents[1] + 1]) & RS_REACHABLE)
602 603 && !(revstates[i + 1] & RS_REACHABLE)) {
603 604 revstates[i + 1] |= RS_REACHABLE;
604 605 val = PyInt_FromLong(i);
605 606 if (val == NULL)
606 607 goto bail;
607 608 r = PyList_Append(reachable, val);
608 609 Py_DECREF(val);
609 610 if (r < 0)
610 611 goto bail;
611 612 }
612 613 }
613 614 }
614 615
615 616 free(revstates);
616 617 free(tovisit);
617 618 return reachable;
618 619 bail:
619 620 Py_XDECREF(reachable);
620 621 free(revstates);
621 622 free(tovisit);
622 623 return NULL;
623 624 }
624 625
625 626 static PyObject *compute_phases_map_sets(indexObject *self, PyObject *args)
626 627 {
627 628 PyObject *roots = Py_None;
628 629 PyObject *ret = NULL;
629 630 PyObject *phaseslist = NULL;
630 631 PyObject *phaseroots = NULL;
631 632 PyObject *phaseset = NULL;
632 633 PyObject *phasessetlist = NULL;
633 634 PyObject *rev = NULL;
634 635 Py_ssize_t len = index_length(self) - 1;
635 636 Py_ssize_t numphase = 0;
636 637 Py_ssize_t minrevallphases = 0;
637 638 Py_ssize_t minrevphase = 0;
638 639 Py_ssize_t i = 0;
639 640 char *phases = NULL;
640 641 long phase;
641 642
642 643 if (!PyArg_ParseTuple(args, "O", &roots))
643 644 goto done;
644 645 if (roots == NULL || !PyList_Check(roots))
645 646 goto done;
646 647
647 648 phases = calloc(len, 1); /* phase per rev: {0: public, 1: draft, 2: secret} */
648 649 if (phases == NULL) {
649 650 PyErr_NoMemory();
650 651 goto done;
651 652 }
652 653 /* Put the phase information of all the roots in phases */
653 654 numphase = PyList_GET_SIZE(roots)+1;
654 655 minrevallphases = len + 1;
655 656 phasessetlist = PyList_New(numphase);
656 657 if (phasessetlist == NULL)
657 658 goto done;
658 659
659 660 PyList_SET_ITEM(phasessetlist, 0, Py_None);
660 661 Py_INCREF(Py_None);
661 662
662 663 for (i = 0; i < numphase-1; i++) {
663 664 phaseroots = PyList_GET_ITEM(roots, i);
664 665 phaseset = PySet_New(NULL);
665 666 if (phaseset == NULL)
666 667 goto release;
667 668 PyList_SET_ITEM(phasessetlist, i+1, phaseset);
668 669 if (!PyList_Check(phaseroots))
669 670 goto release;
670 671 minrevphase = add_roots_get_min(self, phaseroots, i+1, phases);
671 672 if (minrevphase == -2) /* Error from add_roots_get_min */
672 673 goto release;
673 674 minrevallphases = MIN(minrevallphases, minrevphase);
674 675 }
675 676 /* Propagate the phase information from the roots to the revs */
676 677 if (minrevallphases != -1) {
677 678 int parents[2];
678 679 for (i = minrevallphases; i < len; i++) {
679 680 if (index_get_parents(self, i, parents,
680 681 (int)len - 1) < 0)
681 682 goto release;
682 683 set_phase_from_parents(phases, parents[0], parents[1], i);
683 684 }
684 685 }
685 686 /* Transform phase list to a python list */
686 687 phaseslist = PyList_New(len);
687 688 if (phaseslist == NULL)
688 689 goto release;
689 690 for (i = 0; i < len; i++) {
690 691 PyObject *phaseval;
691 692
692 693 phase = phases[i];
693 694 /* We only store the sets of phase for non public phase, the public phase
694 695 * is computed as a difference */
695 696 if (phase != 0) {
696 697 phaseset = PyList_GET_ITEM(phasessetlist, phase);
697 698 rev = PyInt_FromLong(i);
698 699 if (rev == NULL)
699 700 goto release;
700 701 PySet_Add(phaseset, rev);
701 702 Py_XDECREF(rev);
702 703 }
703 704 phaseval = PyInt_FromLong(phase);
704 705 if (phaseval == NULL)
705 706 goto release;
706 707 PyList_SET_ITEM(phaseslist, i, phaseval);
707 708 }
708 709 ret = PyTuple_Pack(2, phaseslist, phasessetlist);
709 710
710 711 release:
711 712 Py_XDECREF(phaseslist);
712 713 Py_XDECREF(phasessetlist);
713 714 done:
714 715 free(phases);
715 716 return ret;
716 717 }
717 718
718 719 static PyObject *index_headrevs(indexObject *self, PyObject *args)
719 720 {
720 721 Py_ssize_t i, j, len;
721 722 char *nothead = NULL;
722 723 PyObject *heads = NULL;
723 724 PyObject *filter = NULL;
724 725 PyObject *filteredrevs = Py_None;
725 726
726 727 if (!PyArg_ParseTuple(args, "|O", &filteredrevs)) {
727 728 return NULL;
728 729 }
729 730
730 731 if (self->headrevs && filteredrevs == self->filteredrevs)
731 732 return list_copy(self->headrevs);
732 733
733 734 Py_DECREF(self->filteredrevs);
734 735 self->filteredrevs = filteredrevs;
735 736 Py_INCREF(filteredrevs);
736 737
737 738 if (filteredrevs != Py_None) {
738 739 filter = PyObject_GetAttrString(filteredrevs, "__contains__");
739 740 if (!filter) {
740 741 PyErr_SetString(PyExc_TypeError,
741 742 "filteredrevs has no attribute __contains__");
742 743 goto bail;
743 744 }
744 745 }
745 746
746 747 len = index_length(self) - 1;
747 748 heads = PyList_New(0);
748 749 if (heads == NULL)
749 750 goto bail;
750 751 if (len == 0) {
751 752 PyObject *nullid = PyInt_FromLong(-1);
752 753 if (nullid == NULL || PyList_Append(heads, nullid) == -1) {
753 754 Py_XDECREF(nullid);
754 755 goto bail;
755 756 }
756 757 goto done;
757 758 }
758 759
759 760 nothead = calloc(len, 1);
760 761 if (nothead == NULL) {
761 762 PyErr_NoMemory();
762 763 goto bail;
763 764 }
764 765
765 766 for (i = len - 1; i >= 0; i--) {
766 767 int isfiltered;
767 768 int parents[2];
768 769
769 770 /* If nothead[i] == 1, it means we've seen an unfiltered child of this
770 771 * node already, and therefore this node is not filtered. So we can skip
771 772 * the expensive check_filter step.
772 773 */
773 774 if (nothead[i] != 1) {
774 775 isfiltered = check_filter(filter, i);
775 776 if (isfiltered == -1) {
776 777 PyErr_SetString(PyExc_TypeError,
777 778 "unable to check filter");
778 779 goto bail;
779 780 }
780 781
781 782 if (isfiltered) {
782 783 nothead[i] = 1;
783 784 continue;
784 785 }
785 786 }
786 787
787 788 if (index_get_parents(self, i, parents, (int)len - 1) < 0)
788 789 goto bail;
789 790 for (j = 0; j < 2; j++) {
790 791 if (parents[j] >= 0)
791 792 nothead[parents[j]] = 1;
792 793 }
793 794 }
794 795
795 796 for (i = 0; i < len; i++) {
796 797 PyObject *head;
797 798
798 799 if (nothead[i])
799 800 continue;
800 801 head = PyInt_FromSsize_t(i);
801 802 if (head == NULL || PyList_Append(heads, head) == -1) {
802 803 Py_XDECREF(head);
803 804 goto bail;
804 805 }
805 806 }
806 807
807 808 done:
808 809 self->headrevs = heads;
809 810 Py_XDECREF(filter);
810 811 free(nothead);
811 812 return list_copy(self->headrevs);
812 813 bail:
813 814 Py_XDECREF(filter);
814 815 Py_XDECREF(heads);
815 816 free(nothead);
816 817 return NULL;
817 818 }
818 819
820 /**
821 * Obtain the base revision index entry.
822 *
823 * Callers must ensure that rev >= 0 or illegal memory access may occur.
824 */
819 825 static inline int index_baserev(indexObject *self, int rev)
820 826 {
821 827 const char *data;
822 828
823 829 if (rev >= self->length - 1) {
824 830 PyObject *tuple = PyList_GET_ITEM(self->added,
825 831 rev - self->length + 1);
826 832 return (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 3));
827 833 }
828 834 else {
829 835 data = index_deref(self, rev);
830 836 if (data == NULL) {
831 837 return -2;
832 838 }
833 839
834 840 return getbe32(data + 16);
835 841 }
836 842 }
837 843
838 844 static PyObject *index_deltachain(indexObject *self, PyObject *args)
839 845 {
840 846 int rev, generaldelta;
841 847 PyObject *stoparg;
842 848 int stoprev, iterrev, baserev = -1;
843 849 int stopped;
844 PyObject *chain = NULL, *value = NULL, *result = NULL;
850 PyObject *chain = NULL, *result = NULL;
845 851 const Py_ssize_t length = index_length(self);
846 852
847 853 if (!PyArg_ParseTuple(args, "iOi", &rev, &stoparg, &generaldelta)) {
848 854 return NULL;
849 855 }
850 856
851 857 if (PyInt_Check(stoparg)) {
852 858 stoprev = (int)PyInt_AsLong(stoparg);
853 859 if (stoprev == -1 && PyErr_Occurred()) {
854 860 return NULL;
855 861 }
856 862 }
857 863 else if (stoparg == Py_None) {
858 864 stoprev = -2;
859 865 }
860 866 else {
861 867 PyErr_SetString(PyExc_ValueError,
862 868 "stoprev must be integer or None");
863 869 return NULL;
864 870 }
865 871
866 872 if (rev < 0 || rev >= length - 1) {
867 873 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
868 874 return NULL;
869 875 }
870 876
871 877 chain = PyList_New(0);
872 878 if (chain == NULL) {
873 879 return NULL;
874 880 }
875 881
876 882 baserev = index_baserev(self, rev);
877 883
878 884 /* This should never happen. */
879 if (baserev == -2) {
880 PyErr_SetString(PyExc_IndexError, "unable to resolve data");
885 if (baserev <= -2) {
886 /* Error should be set by index_deref() */
887 assert(PyErr_Occurred());
881 888 goto bail;
882 889 }
883 890
884 891 iterrev = rev;
885 892
886 893 while (iterrev != baserev && iterrev != stoprev) {
887 value = PyInt_FromLong(iterrev);
894 PyObject *value = PyInt_FromLong(iterrev);
888 895 if (value == NULL) {
889 896 goto bail;
890 897 }
891 898 if (PyList_Append(chain, value)) {
892 899 Py_DECREF(value);
893 900 goto bail;
894 901 }
895 902 Py_DECREF(value);
896 903
897 904 if (generaldelta) {
898 905 iterrev = baserev;
899 906 }
900 907 else {
901 908 iterrev--;
902 909 }
903 910
904 911 if (iterrev < 0) {
905 912 break;
906 913 }
907 914
908 915 if (iterrev >= length - 1) {
909 916 PyErr_SetString(PyExc_IndexError, "revision outside index");
910 917 return NULL;
911 918 }
912 919
913 920 baserev = index_baserev(self, iterrev);
914 921
915 922 /* This should never happen. */
916 if (baserev == -2) {
917 PyErr_SetString(PyExc_IndexError, "unable to resolve data");
923 if (baserev <= -2) {
924 /* Error should be set by index_deref() */
925 assert(PyErr_Occurred());
918 926 goto bail;
919 927 }
920 928 }
921 929
922 930 if (iterrev == stoprev) {
923 931 stopped = 1;
924 932 }
925 933 else {
926 value = PyInt_FromLong(iterrev);
934 PyObject *value = PyInt_FromLong(iterrev);
927 935 if (value == NULL) {
928 936 goto bail;
929 937 }
930 938 if (PyList_Append(chain, value)) {
931 939 Py_DECREF(value);
932 940 goto bail;
933 941 }
934 942 Py_DECREF(value);
935 943
936 944 stopped = 0;
937 945 }
938 946
939 947 if (PyList_Reverse(chain)) {
940 948 goto bail;
941 949 }
942 950
943 951 result = Py_BuildValue("OO", chain, stopped ? Py_True : Py_False);
944 952 Py_DECREF(chain);
945 953 return result;
946 954
947 955 bail:
948 956 Py_DECREF(chain);
949 957 return NULL;
950 958 }
951 959
952 960 static inline int nt_level(const char *node, Py_ssize_t level)
953 961 {
954 962 int v = node[level>>1];
955 963 if (!(level & 1))
956 964 v >>= 4;
957 965 return v & 0xf;
958 966 }
959 967
960 968 /*
961 969 * Return values:
962 970 *
963 971 * -4: match is ambiguous (multiple candidates)
964 972 * -2: not found
965 973 * rest: valid rev
966 974 */
967 975 static int nt_find(indexObject *self, const char *node, Py_ssize_t nodelen,
968 976 int hex)
969 977 {
970 978 int (*getnybble)(const char *, Py_ssize_t) = hex ? hexdigit : nt_level;
971 979 int level, maxlevel, off;
972 980
973 981 if (nodelen == 20 && node[0] == '\0' && memcmp(node, nullid, 20) == 0)
974 982 return -1;
975 983
976 984 if (self->nt == NULL)
977 985 return -2;
978 986
979 987 if (hex)
980 988 maxlevel = nodelen > 40 ? 40 : (int)nodelen;
981 989 else
982 990 maxlevel = nodelen > 20 ? 40 : ((int)nodelen * 2);
983 991
984 992 for (level = off = 0; level < maxlevel; level++) {
985 993 int k = getnybble(node, level);
986 994 nodetree *n = &self->nt[off];
987 995 int v = n->children[k];
988 996
989 997 if (v < 0) {
990 998 const char *n;
991 999 Py_ssize_t i;
992 1000
993 1001 v = -(v + 1);
994 1002 n = index_node(self, v);
995 1003 if (n == NULL)
996 1004 return -2;
997 1005 for (i = level; i < maxlevel; i++)
998 1006 if (getnybble(node, i) != nt_level(n, i))
999 1007 return -2;
1000 1008 return v;
1001 1009 }
1002 1010 if (v == 0)
1003 1011 return -2;
1004 1012 off = v;
1005 1013 }
1006 1014 /* multiple matches against an ambiguous prefix */
1007 1015 return -4;
1008 1016 }
1009 1017
1010 1018 static int nt_new(indexObject *self)
1011 1019 {
1012 1020 if (self->ntlength == self->ntcapacity) {
1013 1021 if (self->ntcapacity >= INT_MAX / (sizeof(nodetree) * 2)) {
1014 1022 PyErr_SetString(PyExc_MemoryError,
1015 1023 "overflow in nt_new");
1016 1024 return -1;
1017 1025 }
1018 1026 self->ntcapacity *= 2;
1019 1027 self->nt = realloc(self->nt,
1020 1028 self->ntcapacity * sizeof(nodetree));
1021 1029 if (self->nt == NULL) {
1022 1030 PyErr_SetString(PyExc_MemoryError, "out of memory");
1023 1031 return -1;
1024 1032 }
1025 1033 memset(&self->nt[self->ntlength], 0,
1026 1034 sizeof(nodetree) * (self->ntcapacity - self->ntlength));
1027 1035 }
1028 1036 return self->ntlength++;
1029 1037 }
1030 1038
1031 1039 static int nt_insert(indexObject *self, const char *node, int rev)
1032 1040 {
1033 1041 int level = 0;
1034 1042 int off = 0;
1035 1043
1036 1044 while (level < 40) {
1037 1045 int k = nt_level(node, level);
1038 1046 nodetree *n;
1039 1047 int v;
1040 1048
1041 1049 n = &self->nt[off];
1042 1050 v = n->children[k];
1043 1051
1044 1052 if (v == 0) {
1045 1053 n->children[k] = -rev - 1;
1046 1054 return 0;
1047 1055 }
1048 1056 if (v < 0) {
1049 1057 const char *oldnode = index_node(self, -(v + 1));
1050 1058 int noff;
1051 1059
1052 1060 if (!oldnode || !memcmp(oldnode, node, 20)) {
1053 1061 n->children[k] = -rev - 1;
1054 1062 return 0;
1055 1063 }
1056 1064 noff = nt_new(self);
1057 1065 if (noff == -1)
1058 1066 return -1;
1059 1067 /* self->nt may have been changed by realloc */
1060 1068 self->nt[off].children[k] = noff;
1061 1069 off = noff;
1062 1070 n = &self->nt[off];
1063 1071 n->children[nt_level(oldnode, ++level)] = v;
1064 1072 if (level > self->ntdepth)
1065 1073 self->ntdepth = level;
1066 1074 self->ntsplits += 1;
1067 1075 } else {
1068 1076 level += 1;
1069 1077 off = v;
1070 1078 }
1071 1079 }
1072 1080
1073 1081 return -1;
1074 1082 }
1075 1083
1076 1084 static int nt_init(indexObject *self)
1077 1085 {
1078 1086 if (self->nt == NULL) {
1079 1087 if ((size_t)self->raw_length > INT_MAX / sizeof(nodetree)) {
1080 1088 PyErr_SetString(PyExc_ValueError, "overflow in nt_init");
1081 1089 return -1;
1082 1090 }
1083 1091 self->ntcapacity = self->raw_length < 4
1084 1092 ? 4 : (int)self->raw_length / 2;
1085 1093
1086 1094 self->nt = calloc(self->ntcapacity, sizeof(nodetree));
1087 1095 if (self->nt == NULL) {
1088 1096 PyErr_NoMemory();
1089 1097 return -1;
1090 1098 }
1091 1099 self->ntlength = 1;
1092 1100 self->ntrev = (int)index_length(self) - 1;
1093 1101 self->ntlookups = 1;
1094 1102 self->ntmisses = 0;
1095 1103 if (nt_insert(self, nullid, INT_MAX) == -1)
1096 1104 return -1;
1097 1105 }
1098 1106 return 0;
1099 1107 }
1100 1108
1101 1109 /*
1102 1110 * Return values:
1103 1111 *
1104 1112 * -3: error (exception set)
1105 1113 * -2: not found (no exception set)
1106 1114 * rest: valid rev
1107 1115 */
1108 1116 static int index_find_node(indexObject *self,
1109 1117 const char *node, Py_ssize_t nodelen)
1110 1118 {
1111 1119 int rev;
1112 1120
1113 1121 self->ntlookups++;
1114 1122 rev = nt_find(self, node, nodelen, 0);
1115 1123 if (rev >= -1)
1116 1124 return rev;
1117 1125
1118 1126 if (nt_init(self) == -1)
1119 1127 return -3;
1120 1128
1121 1129 /*
1122 1130 * For the first handful of lookups, we scan the entire index,
1123 1131 * and cache only the matching nodes. This optimizes for cases
1124 1132 * like "hg tip", where only a few nodes are accessed.
1125 1133 *
1126 1134 * After that, we cache every node we visit, using a single
1127 1135 * scan amortized over multiple lookups. This gives the best
1128 1136 * bulk performance, e.g. for "hg log".
1129 1137 */
1130 1138 if (self->ntmisses++ < 4) {
1131 1139 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1132 1140 const char *n = index_node(self, rev);
1133 1141 if (n == NULL)
1134 1142 return -2;
1135 1143 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1136 1144 if (nt_insert(self, n, rev) == -1)
1137 1145 return -3;
1138 1146 break;
1139 1147 }
1140 1148 }
1141 1149 } else {
1142 1150 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1143 1151 const char *n = index_node(self, rev);
1144 1152 if (n == NULL) {
1145 1153 self->ntrev = rev + 1;
1146 1154 return -2;
1147 1155 }
1148 1156 if (nt_insert(self, n, rev) == -1) {
1149 1157 self->ntrev = rev + 1;
1150 1158 return -3;
1151 1159 }
1152 1160 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1153 1161 break;
1154 1162 }
1155 1163 }
1156 1164 self->ntrev = rev;
1157 1165 }
1158 1166
1159 1167 if (rev >= 0)
1160 1168 return rev;
1161 1169 return -2;
1162 1170 }
1163 1171
1164 1172 static void raise_revlog_error(void)
1165 1173 {
1166 1174 PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
1167 1175
1168 1176 mod = PyImport_ImportModule("mercurial.error");
1169 1177 if (mod == NULL) {
1170 1178 goto cleanup;
1171 1179 }
1172 1180
1173 1181 dict = PyModule_GetDict(mod);
1174 1182 if (dict == NULL) {
1175 1183 goto cleanup;
1176 1184 }
1177 1185 Py_INCREF(dict);
1178 1186
1179 1187 errclass = PyDict_GetItemString(dict, "RevlogError");
1180 1188 if (errclass == NULL) {
1181 1189 PyErr_SetString(PyExc_SystemError,
1182 1190 "could not find RevlogError");
1183 1191 goto cleanup;
1184 1192 }
1185 1193
1186 1194 /* value of exception is ignored by callers */
1187 1195 PyErr_SetString(errclass, "RevlogError");
1188 1196
1189 1197 cleanup:
1190 1198 Py_XDECREF(dict);
1191 1199 Py_XDECREF(mod);
1192 1200 }
1193 1201
1194 1202 static PyObject *index_getitem(indexObject *self, PyObject *value)
1195 1203 {
1196 1204 char *node;
1197 1205 Py_ssize_t nodelen;
1198 1206 int rev;
1199 1207
1200 1208 if (PyInt_Check(value))
1201 1209 return index_get(self, PyInt_AS_LONG(value));
1202 1210
1203 1211 if (node_check(value, &node, &nodelen) == -1)
1204 1212 return NULL;
1205 1213 rev = index_find_node(self, node, nodelen);
1206 1214 if (rev >= -1)
1207 1215 return PyInt_FromLong(rev);
1208 1216 if (rev == -2)
1209 1217 raise_revlog_error();
1210 1218 return NULL;
1211 1219 }
1212 1220
1213 1221 static int nt_partialmatch(indexObject *self, const char *node,
1214 1222 Py_ssize_t nodelen)
1215 1223 {
1216 1224 int rev;
1217 1225
1218 1226 if (nt_init(self) == -1)
1219 1227 return -3;
1220 1228
1221 1229 if (self->ntrev > 0) {
1222 1230 /* ensure that the radix tree is fully populated */
1223 1231 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1224 1232 const char *n = index_node(self, rev);
1225 1233 if (n == NULL)
1226 1234 return -2;
1227 1235 if (nt_insert(self, n, rev) == -1)
1228 1236 return -3;
1229 1237 }
1230 1238 self->ntrev = rev;
1231 1239 }
1232 1240
1233 1241 return nt_find(self, node, nodelen, 1);
1234 1242 }
1235 1243
1236 1244 static PyObject *index_partialmatch(indexObject *self, PyObject *args)
1237 1245 {
1238 1246 const char *fullnode;
1239 1247 int nodelen;
1240 1248 char *node;
1241 1249 int rev, i;
1242 1250
1243 1251 if (!PyArg_ParseTuple(args, "s#", &node, &nodelen))
1244 1252 return NULL;
1245 1253
1246 1254 if (nodelen < 4) {
1247 1255 PyErr_SetString(PyExc_ValueError, "key too short");
1248 1256 return NULL;
1249 1257 }
1250 1258
1251 1259 if (nodelen > 40) {
1252 1260 PyErr_SetString(PyExc_ValueError, "key too long");
1253 1261 return NULL;
1254 1262 }
1255 1263
1256 1264 for (i = 0; i < nodelen; i++)
1257 1265 hexdigit(node, i);
1258 1266 if (PyErr_Occurred()) {
1259 1267 /* input contains non-hex characters */
1260 1268 PyErr_Clear();
1261 1269 Py_RETURN_NONE;
1262 1270 }
1263 1271
1264 1272 rev = nt_partialmatch(self, node, nodelen);
1265 1273
1266 1274 switch (rev) {
1267 1275 case -4:
1268 1276 raise_revlog_error();
1269 1277 case -3:
1270 1278 return NULL;
1271 1279 case -2:
1272 1280 Py_RETURN_NONE;
1273 1281 case -1:
1274 1282 return PyBytes_FromStringAndSize(nullid, 20);
1275 1283 }
1276 1284
1277 1285 fullnode = index_node(self, rev);
1278 1286 if (fullnode == NULL) {
1279 1287 PyErr_Format(PyExc_IndexError,
1280 1288 "could not access rev %d", rev);
1281 1289 return NULL;
1282 1290 }
1283 1291 return PyBytes_FromStringAndSize(fullnode, 20);
1284 1292 }
1285 1293
1286 1294 static PyObject *index_m_get(indexObject *self, PyObject *args)
1287 1295 {
1288 1296 Py_ssize_t nodelen;
1289 1297 PyObject *val;
1290 1298 char *node;
1291 1299 int rev;
1292 1300
1293 1301 if (!PyArg_ParseTuple(args, "O", &val))
1294 1302 return NULL;
1295 1303 if (node_check(val, &node, &nodelen) == -1)
1296 1304 return NULL;
1297 1305 rev = index_find_node(self, node, nodelen);
1298 1306 if (rev == -3)
1299 1307 return NULL;
1300 1308 if (rev == -2)
1301 1309 Py_RETURN_NONE;
1302 1310 return PyInt_FromLong(rev);
1303 1311 }
1304 1312
1305 1313 static int index_contains(indexObject *self, PyObject *value)
1306 1314 {
1307 1315 char *node;
1308 1316 Py_ssize_t nodelen;
1309 1317
1310 1318 if (PyInt_Check(value)) {
1311 1319 long rev = PyInt_AS_LONG(value);
1312 1320 return rev >= -1 && rev < index_length(self);
1313 1321 }
1314 1322
1315 1323 if (node_check(value, &node, &nodelen) == -1)
1316 1324 return -1;
1317 1325
1318 1326 switch (index_find_node(self, node, nodelen)) {
1319 1327 case -3:
1320 1328 return -1;
1321 1329 case -2:
1322 1330 return 0;
1323 1331 default:
1324 1332 return 1;
1325 1333 }
1326 1334 }
1327 1335
1328 1336 typedef uint64_t bitmask;
1329 1337
1330 1338 /*
1331 1339 * Given a disjoint set of revs, return all candidates for the
1332 1340 * greatest common ancestor. In revset notation, this is the set
1333 1341 * "heads(::a and ::b and ...)"
1334 1342 */
1335 1343 static PyObject *find_gca_candidates(indexObject *self, const int *revs,
1336 1344 int revcount)
1337 1345 {
1338 1346 const bitmask allseen = (1ull << revcount) - 1;
1339 1347 const bitmask poison = 1ull << revcount;
1340 1348 PyObject *gca = PyList_New(0);
1341 1349 int i, v, interesting;
1342 1350 int maxrev = -1;
1343 1351 bitmask sp;
1344 1352 bitmask *seen;
1345 1353
1346 1354 if (gca == NULL)
1347 1355 return PyErr_NoMemory();
1348 1356
1349 1357 for (i = 0; i < revcount; i++) {
1350 1358 if (revs[i] > maxrev)
1351 1359 maxrev = revs[i];
1352 1360 }
1353 1361
1354 1362 seen = calloc(sizeof(*seen), maxrev + 1);
1355 1363 if (seen == NULL) {
1356 1364 Py_DECREF(gca);
1357 1365 return PyErr_NoMemory();
1358 1366 }
1359 1367
1360 1368 for (i = 0; i < revcount; i++)
1361 1369 seen[revs[i]] = 1ull << i;
1362 1370
1363 1371 interesting = revcount;
1364 1372
1365 1373 for (v = maxrev; v >= 0 && interesting; v--) {
1366 1374 bitmask sv = seen[v];
1367 1375 int parents[2];
1368 1376
1369 1377 if (!sv)
1370 1378 continue;
1371 1379
1372 1380 if (sv < poison) {
1373 1381 interesting -= 1;
1374 1382 if (sv == allseen) {
1375 1383 PyObject *obj = PyInt_FromLong(v);
1376 1384 if (obj == NULL)
1377 1385 goto bail;
1378 1386 if (PyList_Append(gca, obj) == -1) {
1379 1387 Py_DECREF(obj);
1380 1388 goto bail;
1381 1389 }
1382 1390 sv |= poison;
1383 1391 for (i = 0; i < revcount; i++) {
1384 1392 if (revs[i] == v)
1385 1393 goto done;
1386 1394 }
1387 1395 }
1388 1396 }
1389 1397 if (index_get_parents(self, v, parents, maxrev) < 0)
1390 1398 goto bail;
1391 1399
1392 1400 for (i = 0; i < 2; i++) {
1393 1401 int p = parents[i];
1394 1402 if (p == -1)
1395 1403 continue;
1396 1404 sp = seen[p];
1397 1405 if (sv < poison) {
1398 1406 if (sp == 0) {
1399 1407 seen[p] = sv;
1400 1408 interesting++;
1401 1409 }
1402 1410 else if (sp != sv)
1403 1411 seen[p] |= sv;
1404 1412 } else {
1405 1413 if (sp && sp < poison)
1406 1414 interesting--;
1407 1415 seen[p] = sv;
1408 1416 }
1409 1417 }
1410 1418 }
1411 1419
1412 1420 done:
1413 1421 free(seen);
1414 1422 return gca;
1415 1423 bail:
1416 1424 free(seen);
1417 1425 Py_XDECREF(gca);
1418 1426 return NULL;
1419 1427 }
1420 1428
1421 1429 /*
1422 1430 * Given a disjoint set of revs, return the subset with the longest
1423 1431 * path to the root.
1424 1432 */
1425 1433 static PyObject *find_deepest(indexObject *self, PyObject *revs)
1426 1434 {
1427 1435 const Py_ssize_t revcount = PyList_GET_SIZE(revs);
1428 1436 static const Py_ssize_t capacity = 24;
1429 1437 int *depth, *interesting = NULL;
1430 1438 int i, j, v, ninteresting;
1431 1439 PyObject *dict = NULL, *keys = NULL;
1432 1440 long *seen = NULL;
1433 1441 int maxrev = -1;
1434 1442 long final;
1435 1443
1436 1444 if (revcount > capacity) {
1437 1445 PyErr_Format(PyExc_OverflowError,
1438 1446 "bitset size (%ld) > capacity (%ld)",
1439 1447 (long)revcount, (long)capacity);
1440 1448 return NULL;
1441 1449 }
1442 1450
1443 1451 for (i = 0; i < revcount; i++) {
1444 1452 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
1445 1453 if (n > maxrev)
1446 1454 maxrev = n;
1447 1455 }
1448 1456
1449 1457 depth = calloc(sizeof(*depth), maxrev + 1);
1450 1458 if (depth == NULL)
1451 1459 return PyErr_NoMemory();
1452 1460
1453 1461 seen = calloc(sizeof(*seen), maxrev + 1);
1454 1462 if (seen == NULL) {
1455 1463 PyErr_NoMemory();
1456 1464 goto bail;
1457 1465 }
1458 1466
1459 1467 interesting = calloc(sizeof(*interesting), 2 << revcount);
1460 1468 if (interesting == NULL) {
1461 1469 PyErr_NoMemory();
1462 1470 goto bail;
1463 1471 }
1464 1472
1465 1473 if (PyList_Sort(revs) == -1)
1466 1474 goto bail;
1467 1475
1468 1476 for (i = 0; i < revcount; i++) {
1469 1477 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
1470 1478 long b = 1l << i;
1471 1479 depth[n] = 1;
1472 1480 seen[n] = b;
1473 1481 interesting[b] = 1;
1474 1482 }
1475 1483
1476 1484 ninteresting = (int)revcount;
1477 1485
1478 1486 for (v = maxrev; v >= 0 && ninteresting > 1; v--) {
1479 1487 int dv = depth[v];
1480 1488 int parents[2];
1481 1489 long sv;
1482 1490
1483 1491 if (dv == 0)
1484 1492 continue;
1485 1493
1486 1494 sv = seen[v];
1487 1495 if (index_get_parents(self, v, parents, maxrev) < 0)
1488 1496 goto bail;
1489 1497
1490 1498 for (i = 0; i < 2; i++) {
1491 1499 int p = parents[i];
1492 1500 long sp;
1493 1501 int dp;
1494 1502
1495 1503 if (p == -1)
1496 1504 continue;
1497 1505
1498 1506 dp = depth[p];
1499 1507 sp = seen[p];
1500 1508 if (dp <= dv) {
1501 1509 depth[p] = dv + 1;
1502 1510 if (sp != sv) {
1503 1511 interesting[sv] += 1;
1504 1512 seen[p] = sv;
1505 1513 if (sp) {
1506 1514 interesting[sp] -= 1;
1507 1515 if (interesting[sp] == 0)
1508 1516 ninteresting -= 1;
1509 1517 }
1510 1518 }
1511 1519 }
1512 1520 else if (dv == dp - 1) {
1513 1521 long nsp = sp | sv;
1514 1522 if (nsp == sp)
1515 1523 continue;
1516 1524 seen[p] = nsp;
1517 1525 interesting[sp] -= 1;
1518 1526 if (interesting[sp] == 0 && interesting[nsp] > 0)
1519 1527 ninteresting -= 1;
1520 1528 interesting[nsp] += 1;
1521 1529 }
1522 1530 }
1523 1531 interesting[sv] -= 1;
1524 1532 if (interesting[sv] == 0)
1525 1533 ninteresting -= 1;
1526 1534 }
1527 1535
1528 1536 final = 0;
1529 1537 j = ninteresting;
1530 1538 for (i = 0; i < (int)(2 << revcount) && j > 0; i++) {
1531 1539 if (interesting[i] == 0)
1532 1540 continue;
1533 1541 final |= i;
1534 1542 j -= 1;
1535 1543 }
1536 1544 if (final == 0) {
1537 1545 keys = PyList_New(0);
1538 1546 goto bail;
1539 1547 }
1540 1548
1541 1549 dict = PyDict_New();
1542 1550 if (dict == NULL)
1543 1551 goto bail;
1544 1552
1545 1553 for (i = 0; i < revcount; i++) {
1546 1554 PyObject *key;
1547 1555
1548 1556 if ((final & (1 << i)) == 0)
1549 1557 continue;
1550 1558
1551 1559 key = PyList_GET_ITEM(revs, i);
1552 1560 Py_INCREF(key);
1553 1561 Py_INCREF(Py_None);
1554 1562 if (PyDict_SetItem(dict, key, Py_None) == -1) {
1555 1563 Py_DECREF(key);
1556 1564 Py_DECREF(Py_None);
1557 1565 goto bail;
1558 1566 }
1559 1567 }
1560 1568
1561 1569 keys = PyDict_Keys(dict);
1562 1570
1563 1571 bail:
1564 1572 free(depth);
1565 1573 free(seen);
1566 1574 free(interesting);
1567 1575 Py_XDECREF(dict);
1568 1576
1569 1577 return keys;
1570 1578 }
1571 1579
1572 1580 /*
1573 1581 * Given a (possibly overlapping) set of revs, return all the
1574 1582 * common ancestors heads: heads(::args[0] and ::a[1] and ...)
1575 1583 */
1576 1584 static PyObject *index_commonancestorsheads(indexObject *self, PyObject *args)
1577 1585 {
1578 1586 PyObject *ret = NULL;
1579 1587 Py_ssize_t argcount, i, len;
1580 1588 bitmask repeat = 0;
1581 1589 int revcount = 0;
1582 1590 int *revs;
1583 1591
1584 1592 argcount = PySequence_Length(args);
1585 1593 revs = PyMem_Malloc(argcount * sizeof(*revs));
1586 1594 if (argcount > 0 && revs == NULL)
1587 1595 return PyErr_NoMemory();
1588 1596 len = index_length(self) - 1;
1589 1597
1590 1598 for (i = 0; i < argcount; i++) {
1591 1599 static const int capacity = 24;
1592 1600 PyObject *obj = PySequence_GetItem(args, i);
1593 1601 bitmask x;
1594 1602 long val;
1595 1603
1596 1604 if (!PyInt_Check(obj)) {
1597 1605 PyErr_SetString(PyExc_TypeError,
1598 1606 "arguments must all be ints");
1599 1607 Py_DECREF(obj);
1600 1608 goto bail;
1601 1609 }
1602 1610 val = PyInt_AsLong(obj);
1603 1611 Py_DECREF(obj);
1604 1612 if (val == -1) {
1605 1613 ret = PyList_New(0);
1606 1614 goto done;
1607 1615 }
1608 1616 if (val < 0 || val >= len) {
1609 1617 PyErr_SetString(PyExc_IndexError,
1610 1618 "index out of range");
1611 1619 goto bail;
1612 1620 }
1613 1621 /* this cheesy bloom filter lets us avoid some more
1614 1622 * expensive duplicate checks in the common set-is-disjoint
1615 1623 * case */
1616 1624 x = 1ull << (val & 0x3f);
1617 1625 if (repeat & x) {
1618 1626 int k;
1619 1627 for (k = 0; k < revcount; k++) {
1620 1628 if (val == revs[k])
1621 1629 goto duplicate;
1622 1630 }
1623 1631 }
1624 1632 else repeat |= x;
1625 1633 if (revcount >= capacity) {
1626 1634 PyErr_Format(PyExc_OverflowError,
1627 1635 "bitset size (%d) > capacity (%d)",
1628 1636 revcount, capacity);
1629 1637 goto bail;
1630 1638 }
1631 1639 revs[revcount++] = (int)val;
1632 1640 duplicate:;
1633 1641 }
1634 1642
1635 1643 if (revcount == 0) {
1636 1644 ret = PyList_New(0);
1637 1645 goto done;
1638 1646 }
1639 1647 if (revcount == 1) {
1640 1648 PyObject *obj;
1641 1649 ret = PyList_New(1);
1642 1650 if (ret == NULL)
1643 1651 goto bail;
1644 1652 obj = PyInt_FromLong(revs[0]);
1645 1653 if (obj == NULL)
1646 1654 goto bail;
1647 1655 PyList_SET_ITEM(ret, 0, obj);
1648 1656 goto done;
1649 1657 }
1650 1658
1651 1659 ret = find_gca_candidates(self, revs, revcount);
1652 1660 if (ret == NULL)
1653 1661 goto bail;
1654 1662
1655 1663 done:
1656 1664 PyMem_Free(revs);
1657 1665 return ret;
1658 1666
1659 1667 bail:
1660 1668 PyMem_Free(revs);
1661 1669 Py_XDECREF(ret);
1662 1670 return NULL;
1663 1671 }
1664 1672
1665 1673 /*
1666 1674 * Given a (possibly overlapping) set of revs, return the greatest
1667 1675 * common ancestors: those with the longest path to the root.
1668 1676 */
1669 1677 static PyObject *index_ancestors(indexObject *self, PyObject *args)
1670 1678 {
1671 1679 PyObject *ret;
1672 1680 PyObject *gca = index_commonancestorsheads(self, args);
1673 1681 if (gca == NULL)
1674 1682 return NULL;
1675 1683
1676 1684 if (PyList_GET_SIZE(gca) <= 1) {
1677 1685 return gca;
1678 1686 }
1679 1687
1680 1688 ret = find_deepest(self, gca);
1681 1689 Py_DECREF(gca);
1682 1690 return ret;
1683 1691 }
1684 1692
1685 1693 /*
1686 1694 * Invalidate any trie entries introduced by added revs.
1687 1695 */
1688 1696 static void nt_invalidate_added(indexObject *self, Py_ssize_t start)
1689 1697 {
1690 1698 Py_ssize_t i, len = PyList_GET_SIZE(self->added);
1691 1699
1692 1700 for (i = start; i < len; i++) {
1693 1701 PyObject *tuple = PyList_GET_ITEM(self->added, i);
1694 1702 PyObject *node = PyTuple_GET_ITEM(tuple, 7);
1695 1703
1696 1704 nt_insert(self, PyBytes_AS_STRING(node), -1);
1697 1705 }
1698 1706
1699 1707 if (start == 0)
1700 1708 Py_CLEAR(self->added);
1701 1709 }
1702 1710
1703 1711 /*
1704 1712 * Delete a numeric range of revs, which must be at the end of the
1705 1713 * range, but exclude the sentinel nullid entry.
1706 1714 */
1707 1715 static int index_slice_del(indexObject *self, PyObject *item)
1708 1716 {
1709 1717 Py_ssize_t start, stop, step, slicelength;
1710 1718 Py_ssize_t length = index_length(self);
1711 1719 int ret = 0;
1712 1720
1713 1721 /* Argument changed from PySliceObject* to PyObject* in Python 3. */
1714 1722 #ifdef IS_PY3K
1715 1723 if (PySlice_GetIndicesEx(item, length,
1716 1724 #else
1717 1725 if (PySlice_GetIndicesEx((PySliceObject*)item, length,
1718 1726 #endif
1719 1727 &start, &stop, &step, &slicelength) < 0)
1720 1728 return -1;
1721 1729
1722 1730 if (slicelength <= 0)
1723 1731 return 0;
1724 1732
1725 1733 if ((step < 0 && start < stop) || (step > 0 && start > stop))
1726 1734 stop = start;
1727 1735
1728 1736 if (step < 0) {
1729 1737 stop = start + 1;
1730 1738 start = stop + step*(slicelength - 1) - 1;
1731 1739 step = -step;
1732 1740 }
1733 1741
1734 1742 if (step != 1) {
1735 1743 PyErr_SetString(PyExc_ValueError,
1736 1744 "revlog index delete requires step size of 1");
1737 1745 return -1;
1738 1746 }
1739 1747
1740 1748 if (stop != length - 1) {
1741 1749 PyErr_SetString(PyExc_IndexError,
1742 1750 "revlog index deletion indices are invalid");
1743 1751 return -1;
1744 1752 }
1745 1753
1746 1754 if (start < self->length - 1) {
1747 1755 if (self->nt) {
1748 1756 Py_ssize_t i;
1749 1757
1750 1758 for (i = start + 1; i < self->length - 1; i++) {
1751 1759 const char *node = index_node(self, i);
1752 1760
1753 1761 if (node)
1754 1762 nt_insert(self, node, -1);
1755 1763 }
1756 1764 if (self->added)
1757 1765 nt_invalidate_added(self, 0);
1758 1766 if (self->ntrev > start)
1759 1767 self->ntrev = (int)start;
1760 1768 }
1761 1769 self->length = start + 1;
1762 1770 if (start < self->raw_length) {
1763 1771 if (self->cache) {
1764 1772 Py_ssize_t i;
1765 1773 for (i = start; i < self->raw_length; i++)
1766 1774 Py_CLEAR(self->cache[i]);
1767 1775 }
1768 1776 self->raw_length = start;
1769 1777 }
1770 1778 goto done;
1771 1779 }
1772 1780
1773 1781 if (self->nt) {
1774 1782 nt_invalidate_added(self, start - self->length + 1);
1775 1783 if (self->ntrev > start)
1776 1784 self->ntrev = (int)start;
1777 1785 }
1778 1786 if (self->added)
1779 1787 ret = PyList_SetSlice(self->added, start - self->length + 1,
1780 1788 PyList_GET_SIZE(self->added), NULL);
1781 1789 done:
1782 1790 Py_CLEAR(self->headrevs);
1783 1791 return ret;
1784 1792 }
1785 1793
1786 1794 /*
1787 1795 * Supported ops:
1788 1796 *
1789 1797 * slice deletion
1790 1798 * string assignment (extend node->rev mapping)
1791 1799 * string deletion (shrink node->rev mapping)
1792 1800 */
1793 1801 static int index_assign_subscript(indexObject *self, PyObject *item,
1794 1802 PyObject *value)
1795 1803 {
1796 1804 char *node;
1797 1805 Py_ssize_t nodelen;
1798 1806 long rev;
1799 1807
1800 1808 if (PySlice_Check(item) && value == NULL)
1801 1809 return index_slice_del(self, item);
1802 1810
1803 1811 if (node_check(item, &node, &nodelen) == -1)
1804 1812 return -1;
1805 1813
1806 1814 if (value == NULL)
1807 1815 return self->nt ? nt_insert(self, node, -1) : 0;
1808 1816 rev = PyInt_AsLong(value);
1809 1817 if (rev > INT_MAX || rev < 0) {
1810 1818 if (!PyErr_Occurred())
1811 1819 PyErr_SetString(PyExc_ValueError, "rev out of range");
1812 1820 return -1;
1813 1821 }
1814 1822
1815 1823 if (nt_init(self) == -1)
1816 1824 return -1;
1817 1825 return nt_insert(self, node, (int)rev);
1818 1826 }
1819 1827
1820 1828 /*
1821 1829 * Find all RevlogNG entries in an index that has inline data. Update
1822 1830 * the optional "offsets" table with those entries.
1823 1831 */
1824 1832 static Py_ssize_t inline_scan(indexObject *self, const char **offsets)
1825 1833 {
1826 1834 const char *data = (const char *)self->buf.buf;
1827 1835 Py_ssize_t pos = 0;
1828 1836 Py_ssize_t end = self->buf.len;
1829 1837 long incr = v1_hdrsize;
1830 1838 Py_ssize_t len = 0;
1831 1839
1832 1840 while (pos + v1_hdrsize <= end && pos >= 0) {
1833 1841 uint32_t comp_len;
1834 1842 /* 3rd element of header is length of compressed inline data */
1835 1843 comp_len = getbe32(data + pos + 8);
1836 1844 incr = v1_hdrsize + comp_len;
1837 1845 if (offsets)
1838 1846 offsets[len] = data + pos;
1839 1847 len++;
1840 1848 pos += incr;
1841 1849 }
1842 1850
1843 1851 if (pos != end) {
1844 1852 if (!PyErr_Occurred())
1845 1853 PyErr_SetString(PyExc_ValueError, "corrupt index file");
1846 1854 return -1;
1847 1855 }
1848 1856
1849 1857 return len;
1850 1858 }
1851 1859
1852 1860 static int index_init(indexObject *self, PyObject *args)
1853 1861 {
1854 1862 PyObject *data_obj, *inlined_obj;
1855 1863 Py_ssize_t size;
1856 1864
1857 1865 /* Initialize before argument-checking to avoid index_dealloc() crash. */
1858 1866 self->raw_length = 0;
1859 1867 self->added = NULL;
1860 1868 self->cache = NULL;
1861 1869 self->data = NULL;
1862 1870 memset(&self->buf, 0, sizeof(self->buf));
1863 1871 self->headrevs = NULL;
1864 1872 self->filteredrevs = Py_None;
1865 1873 Py_INCREF(Py_None);
1866 1874 self->nt = NULL;
1867 1875 self->offsets = NULL;
1868 1876
1869 1877 if (!PyArg_ParseTuple(args, "OO", &data_obj, &inlined_obj))
1870 1878 return -1;
1871 1879 if (!PyObject_CheckBuffer(data_obj)) {
1872 1880 PyErr_SetString(PyExc_TypeError,
1873 1881 "data does not support buffer interface");
1874 1882 return -1;
1875 1883 }
1876 1884
1877 1885 if (PyObject_GetBuffer(data_obj, &self->buf, PyBUF_SIMPLE) == -1)
1878 1886 return -1;
1879 1887 size = self->buf.len;
1880 1888
1881 1889 self->inlined = inlined_obj && PyObject_IsTrue(inlined_obj);
1882 1890 self->data = data_obj;
1883 1891
1884 1892 self->ntlength = self->ntcapacity = 0;
1885 1893 self->ntdepth = self->ntsplits = 0;
1886 1894 self->ntlookups = self->ntmisses = 0;
1887 1895 self->ntrev = -1;
1888 1896 Py_INCREF(self->data);
1889 1897
1890 1898 if (self->inlined) {
1891 1899 Py_ssize_t len = inline_scan(self, NULL);
1892 1900 if (len == -1)
1893 1901 goto bail;
1894 1902 self->raw_length = len;
1895 1903 self->length = len + 1;
1896 1904 } else {
1897 1905 if (size % v1_hdrsize) {
1898 1906 PyErr_SetString(PyExc_ValueError, "corrupt index file");
1899 1907 goto bail;
1900 1908 }
1901 1909 self->raw_length = size / v1_hdrsize;
1902 1910 self->length = self->raw_length + 1;
1903 1911 }
1904 1912
1905 1913 return 0;
1906 1914 bail:
1907 1915 return -1;
1908 1916 }
1909 1917
1910 1918 static PyObject *index_nodemap(indexObject *self)
1911 1919 {
1912 1920 Py_INCREF(self);
1913 1921 return (PyObject *)self;
1914 1922 }
1915 1923
1916 1924 static void index_dealloc(indexObject *self)
1917 1925 {
1918 1926 _index_clearcaches(self);
1919 1927 Py_XDECREF(self->filteredrevs);
1920 1928 if (self->buf.buf) {
1921 1929 PyBuffer_Release(&self->buf);
1922 1930 memset(&self->buf, 0, sizeof(self->buf));
1923 1931 }
1924 1932 Py_XDECREF(self->data);
1925 1933 Py_XDECREF(self->added);
1926 1934 PyObject_Del(self);
1927 1935 }
1928 1936
1929 1937 static PySequenceMethods index_sequence_methods = {
1930 1938 (lenfunc)index_length, /* sq_length */
1931 1939 0, /* sq_concat */
1932 1940 0, /* sq_repeat */
1933 1941 (ssizeargfunc)index_get, /* sq_item */
1934 1942 0, /* sq_slice */
1935 1943 0, /* sq_ass_item */
1936 1944 0, /* sq_ass_slice */
1937 1945 (objobjproc)index_contains, /* sq_contains */
1938 1946 };
1939 1947
1940 1948 static PyMappingMethods index_mapping_methods = {
1941 1949 (lenfunc)index_length, /* mp_length */
1942 1950 (binaryfunc)index_getitem, /* mp_subscript */
1943 1951 (objobjargproc)index_assign_subscript, /* mp_ass_subscript */
1944 1952 };
1945 1953
1946 1954 static PyMethodDef index_methods[] = {
1947 1955 {"ancestors", (PyCFunction)index_ancestors, METH_VARARGS,
1948 1956 "return the gca set of the given revs"},
1949 1957 {"commonancestorsheads", (PyCFunction)index_commonancestorsheads,
1950 1958 METH_VARARGS,
1951 1959 "return the heads of the common ancestors of the given revs"},
1952 1960 {"clearcaches", (PyCFunction)index_clearcaches, METH_NOARGS,
1953 1961 "clear the index caches"},
1954 1962 {"get", (PyCFunction)index_m_get, METH_VARARGS,
1955 1963 "get an index entry"},
1956 1964 {"computephasesmapsets", (PyCFunction)compute_phases_map_sets,
1957 1965 METH_VARARGS, "compute phases"},
1958 1966 {"reachableroots2", (PyCFunction)reachableroots2, METH_VARARGS,
1959 1967 "reachableroots"},
1960 1968 {"headrevs", (PyCFunction)index_headrevs, METH_VARARGS,
1961 1969 "get head revisions"}, /* Can do filtering since 3.2 */
1962 1970 {"headrevsfiltered", (PyCFunction)index_headrevs, METH_VARARGS,
1963 1971 "get filtered head revisions"}, /* Can always do filtering */
1964 1972 {"deltachain", (PyCFunction)index_deltachain, METH_VARARGS,
1965 1973 "determine revisions with deltas to reconstruct fulltext"},
1966 1974 {"insert", (PyCFunction)index_insert, METH_VARARGS,
1967 1975 "insert an index entry"},
1968 1976 {"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS,
1969 1977 "match a potentially ambiguous node ID"},
1970 1978 {"stats", (PyCFunction)index_stats, METH_NOARGS,
1971 1979 "stats for the index"},
1972 1980 {NULL} /* Sentinel */
1973 1981 };
1974 1982
1975 1983 static PyGetSetDef index_getset[] = {
1976 1984 {"nodemap", (getter)index_nodemap, NULL, "nodemap", NULL},
1977 1985 {NULL} /* Sentinel */
1978 1986 };
1979 1987
1980 1988 static PyTypeObject indexType = {
1981 1989 PyVarObject_HEAD_INIT(NULL, 0)
1982 1990 "parsers.index", /* tp_name */
1983 1991 sizeof(indexObject), /* tp_basicsize */
1984 1992 0, /* tp_itemsize */
1985 1993 (destructor)index_dealloc, /* tp_dealloc */
1986 1994 0, /* tp_print */
1987 1995 0, /* tp_getattr */
1988 1996 0, /* tp_setattr */
1989 1997 0, /* tp_compare */
1990 1998 0, /* tp_repr */
1991 1999 0, /* tp_as_number */
1992 2000 &index_sequence_methods, /* tp_as_sequence */
1993 2001 &index_mapping_methods, /* tp_as_mapping */
1994 2002 0, /* tp_hash */
1995 2003 0, /* tp_call */
1996 2004 0, /* tp_str */
1997 2005 0, /* tp_getattro */
1998 2006 0, /* tp_setattro */
1999 2007 0, /* tp_as_buffer */
2000 2008 Py_TPFLAGS_DEFAULT, /* tp_flags */
2001 2009 "revlog index", /* tp_doc */
2002 2010 0, /* tp_traverse */
2003 2011 0, /* tp_clear */
2004 2012 0, /* tp_richcompare */
2005 2013 0, /* tp_weaklistoffset */
2006 2014 0, /* tp_iter */
2007 2015 0, /* tp_iternext */
2008 2016 index_methods, /* tp_methods */
2009 2017 0, /* tp_members */
2010 2018 index_getset, /* tp_getset */
2011 2019 0, /* tp_base */
2012 2020 0, /* tp_dict */
2013 2021 0, /* tp_descr_get */
2014 2022 0, /* tp_descr_set */
2015 2023 0, /* tp_dictoffset */
2016 2024 (initproc)index_init, /* tp_init */
2017 2025 0, /* tp_alloc */
2018 2026 };
2019 2027
2020 2028 /*
2021 2029 * returns a tuple of the form (index, index, cache) with elements as
2022 2030 * follows:
2023 2031 *
2024 2032 * index: an index object that lazily parses RevlogNG records
2025 2033 * cache: if data is inlined, a tuple (0, index_file_content), else None
2026 2034 * index_file_content could be a string, or a buffer
2027 2035 *
2028 2036 * added complications are for backwards compatibility
2029 2037 */
2030 2038 PyObject *parse_index2(PyObject *self, PyObject *args)
2031 2039 {
2032 2040 PyObject *tuple = NULL, *cache = NULL;
2033 2041 indexObject *idx;
2034 2042 int ret;
2035 2043
2036 2044 idx = PyObject_New(indexObject, &indexType);
2037 2045 if (idx == NULL)
2038 2046 goto bail;
2039 2047
2040 2048 ret = index_init(idx, args);
2041 2049 if (ret == -1)
2042 2050 goto bail;
2043 2051
2044 2052 if (idx->inlined) {
2045 2053 cache = Py_BuildValue("iO", 0, idx->data);
2046 2054 if (cache == NULL)
2047 2055 goto bail;
2048 2056 } else {
2049 2057 cache = Py_None;
2050 2058 Py_INCREF(cache);
2051 2059 }
2052 2060
2053 2061 tuple = Py_BuildValue("NN", idx, cache);
2054 2062 if (!tuple)
2055 2063 goto bail;
2056 2064 return tuple;
2057 2065
2058 2066 bail:
2059 2067 Py_XDECREF(idx);
2060 2068 Py_XDECREF(cache);
2061 2069 Py_XDECREF(tuple);
2062 2070 return NULL;
2063 2071 }
2064 2072
2065 2073 void revlog_module_init(PyObject *mod)
2066 2074 {
2067 2075 indexType.tp_new = PyType_GenericNew;
2068 2076 if (PyType_Ready(&indexType) < 0)
2069 2077 return;
2070 2078 Py_INCREF(&indexType);
2071 2079 PyModule_AddObject(mod, "index", (PyObject *)&indexType);
2072 2080
2073 2081 nullentry = Py_BuildValue("iiiiiiis#", 0, 0, 0,
2074 2082 -1, -1, -1, -1, nullid, 20);
2075 2083 if (nullentry)
2076 2084 PyObject_GC_UnTrack(nullentry);
2077 2085 }
General Comments 0
You need to be logged in to leave comments. Login now