##// END OF EJS Templates
revlog: extract function for getting node from known-to-exist rev...
Martin von Zweigbergk -
r37877:a91f31a1 default
parent child Browse files
Show More
@@ -1,2089 +1,2101
1 1 /*
2 2 parsers.c - efficient content parsing
3 3
4 4 Copyright 2008 Matt Mackall <mpm@selenic.com> and others
5 5
6 6 This software may be used and distributed according to the terms of
7 7 the GNU General Public License, incorporated herein by reference.
8 8 */
9 9
10 10 #include <Python.h>
11 11 #include <assert.h>
12 12 #include <ctype.h>
13 13 #include <stddef.h>
14 14 #include <string.h>
15 15
16 16 #include "bitmanipulation.h"
17 17 #include "charencode.h"
18 18 #include "util.h"
19 19
20 20 #ifdef IS_PY3K
21 21 /* The mapping of Python types is meant to be temporary to get Python
22 22 * 3 to compile. We should remove this once Python 3 support is fully
23 23 * supported and proper types are used in the extensions themselves. */
24 24 #define PyInt_Check PyLong_Check
25 25 #define PyInt_FromLong PyLong_FromLong
26 26 #define PyInt_FromSsize_t PyLong_FromSsize_t
27 27 #define PyInt_AS_LONG PyLong_AS_LONG
28 28 #define PyInt_AsLong PyLong_AsLong
29 29 #endif
30 30
31 31 /*
32 32 * A base-16 trie for fast node->rev mapping.
33 33 *
34 34 * Positive value is index of the next node in the trie
35 35 * Negative value is a leaf: -(rev + 1)
36 36 * Zero is empty
37 37 */
38 38 typedef struct {
39 39 int children[16];
40 40 } nodetree;
41 41
42 42 /*
43 43 * This class has two behaviors.
44 44 *
45 45 * When used in a list-like way (with integer keys), we decode an
46 46 * entry in a RevlogNG index file on demand. Our last entry is a
47 47 * sentinel, always a nullid. We have limited support for
48 48 * integer-keyed insert and delete, only at elements right before the
49 49 * sentinel.
50 50 *
51 51 * With string keys, we lazily perform a reverse mapping from node to
52 52 * rev, using a base-16 trie.
53 53 */
54 54 typedef struct {
55 55 PyObject_HEAD
56 56 /* Type-specific fields go here. */
57 57 PyObject *data; /* raw bytes of index */
58 58 Py_buffer buf; /* buffer of data */
59 59 PyObject **cache; /* cached tuples */
60 60 const char **offsets; /* populated on demand */
61 61 Py_ssize_t raw_length; /* original number of elements */
62 62 Py_ssize_t length; /* current number of elements */
63 63 PyObject *added; /* populated on demand */
64 64 PyObject *headrevs; /* cache, invalidated on changes */
65 65 PyObject *filteredrevs;/* filtered revs set */
66 66 nodetree *nt; /* base-16 trie */
67 67 unsigned ntlength; /* # nodes in use */
68 68 unsigned ntcapacity; /* # nodes allocated */
69 69 int ntdepth; /* maximum depth of tree */
70 70 int ntsplits; /* # splits performed */
71 71 int ntrev; /* last rev scanned */
72 72 int ntlookups; /* # lookups */
73 73 int ntmisses; /* # lookups that miss the cache */
74 74 int inlined;
75 75 } indexObject;
76 76
77 77 static Py_ssize_t index_length(const indexObject *self)
78 78 {
79 79 if (self->added == NULL)
80 80 return self->length;
81 81 return self->length + PyList_GET_SIZE(self->added);
82 82 }
83 83
84 84 static PyObject *nullentry;
85 85 static const char nullid[20];
86 86
87 87 static Py_ssize_t inline_scan(indexObject *self, const char **offsets);
88 88
89 89 #if LONG_MAX == 0x7fffffffL
90 90 static const char *const tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
91 91 #else
92 92 static const char *const tuple_format = PY23("kiiiiiis#", "kiiiiiiy#");
93 93 #endif
94 94
95 95 /* A RevlogNG v1 index entry is 64 bytes long. */
96 96 static const long v1_hdrsize = 64;
97 97
98 98 /*
99 99 * Return a pointer to the beginning of a RevlogNG record.
100 100 */
101 101 static const char *index_deref(indexObject *self, Py_ssize_t pos)
102 102 {
103 103 if (self->inlined && pos > 0) {
104 104 if (self->offsets == NULL) {
105 105 self->offsets = PyMem_Malloc(self->raw_length *
106 106 sizeof(*self->offsets));
107 107 if (self->offsets == NULL)
108 108 return (const char *)PyErr_NoMemory();
109 109 inline_scan(self, self->offsets);
110 110 }
111 111 return self->offsets[pos];
112 112 }
113 113
114 114 return (const char *)(self->buf.buf) + pos * v1_hdrsize;
115 115 }
116 116
117 117 static inline int index_get_parents(indexObject *self, Py_ssize_t rev,
118 118 int *ps, int maxrev)
119 119 {
120 120 if (rev >= self->length - 1) {
121 121 PyObject *tuple = PyList_GET_ITEM(self->added,
122 122 rev - self->length + 1);
123 123 ps[0] = (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 5));
124 124 ps[1] = (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 6));
125 125 } else {
126 126 const char *data = index_deref(self, rev);
127 127 ps[0] = getbe32(data + 24);
128 128 ps[1] = getbe32(data + 28);
129 129 }
130 130 /* If index file is corrupted, ps[] may point to invalid revisions. So
131 131 * there is a risk of buffer overflow to trust them unconditionally. */
132 132 if (ps[0] > maxrev || ps[1] > maxrev) {
133 133 PyErr_SetString(PyExc_ValueError, "parent out of range");
134 134 return -1;
135 135 }
136 136 return 0;
137 137 }
138 138
139 139
140 140 /*
141 141 * RevlogNG format (all in big endian, data may be inlined):
142 142 * 6 bytes: offset
143 143 * 2 bytes: flags
144 144 * 4 bytes: compressed length
145 145 * 4 bytes: uncompressed length
146 146 * 4 bytes: base revision
147 147 * 4 bytes: link revision
148 148 * 4 bytes: parent 1 revision
149 149 * 4 bytes: parent 2 revision
150 150 * 32 bytes: nodeid (only 20 bytes used)
151 151 */
152 152 static PyObject *index_get(indexObject *self, Py_ssize_t pos)
153 153 {
154 154 uint64_t offset_flags;
155 155 int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
156 156 const char *c_node_id;
157 157 const char *data;
158 158 Py_ssize_t length = index_length(self);
159 159 PyObject *entry;
160 160
161 161 if (pos < 0)
162 162 pos += length;
163 163
164 164 if (pos < 0 || pos >= length) {
165 165 PyErr_SetString(PyExc_IndexError, "revlog index out of range");
166 166 return NULL;
167 167 }
168 168
169 169 if (pos == length - 1) {
170 170 Py_INCREF(nullentry);
171 171 return nullentry;
172 172 }
173 173
174 174 if (pos >= self->length - 1) {
175 175 PyObject *obj;
176 176 obj = PyList_GET_ITEM(self->added, pos - self->length + 1);
177 177 Py_INCREF(obj);
178 178 return obj;
179 179 }
180 180
181 181 if (self->cache) {
182 182 if (self->cache[pos]) {
183 183 Py_INCREF(self->cache[pos]);
184 184 return self->cache[pos];
185 185 }
186 186 } else {
187 187 self->cache = calloc(self->raw_length, sizeof(PyObject *));
188 188 if (self->cache == NULL)
189 189 return PyErr_NoMemory();
190 190 }
191 191
192 192 data = index_deref(self, pos);
193 193 if (data == NULL)
194 194 return NULL;
195 195
196 196 offset_flags = getbe32(data + 4);
197 197 if (pos == 0) /* mask out version number for the first entry */
198 198 offset_flags &= 0xFFFF;
199 199 else {
200 200 uint32_t offset_high = getbe32(data);
201 201 offset_flags |= ((uint64_t)offset_high) << 32;
202 202 }
203 203
204 204 comp_len = getbe32(data + 8);
205 205 uncomp_len = getbe32(data + 12);
206 206 base_rev = getbe32(data + 16);
207 207 link_rev = getbe32(data + 20);
208 208 parent_1 = getbe32(data + 24);
209 209 parent_2 = getbe32(data + 28);
210 210 c_node_id = data + 32;
211 211
212 212 entry = Py_BuildValue(tuple_format, offset_flags, comp_len,
213 213 uncomp_len, base_rev, link_rev,
214 214 parent_1, parent_2, c_node_id, 20);
215 215
216 216 if (entry) {
217 217 PyObject_GC_UnTrack(entry);
218 218 Py_INCREF(entry);
219 219 }
220 220
221 221 self->cache[pos] = entry;
222 222
223 223 return entry;
224 224 }
225 225
226 226 /*
227 227 * Return the 20-byte SHA of the node corresponding to the given rev.
228 228 */
229 229 static const char *index_node(indexObject *self, Py_ssize_t pos)
230 230 {
231 231 Py_ssize_t length = index_length(self);
232 232 const char *data;
233 233
234 234 if (pos == length - 1 || pos == INT_MAX)
235 235 return nullid;
236 236
237 237 if (pos >= length)
238 238 return NULL;
239 239
240 240 if (pos >= self->length - 1) {
241 241 PyObject *tuple, *str;
242 242 tuple = PyList_GET_ITEM(self->added, pos - self->length + 1);
243 243 str = PyTuple_GetItem(tuple, 7);
244 244 return str ? PyBytes_AS_STRING(str) : NULL;
245 245 }
246 246
247 247 data = index_deref(self, pos);
248 248 return data ? data + 32 : NULL;
249 249 }
250 250
251 /*
252 * Return the 20-byte SHA of the node corresponding to the given rev. The
253 * rev is assumed to be existing. If not, an exception is set.
254 */
255 static const char *index_node_existing(indexObject *self, Py_ssize_t pos)
256 {
257 const char *node = index_node(self, pos);
258 if (node == NULL) {
259 PyErr_Format(PyExc_IndexError, "could not access rev %d",
260 (int)pos);
261 }
262 return node;
263 }
264
251 265 static int nt_insert(indexObject *self, const char *node, int rev);
252 266
253 267 static int node_check(PyObject *obj, char **node, Py_ssize_t *nodelen)
254 268 {
255 269 if (PyBytes_AsStringAndSize(obj, node, nodelen) == -1)
256 270 return -1;
257 271 if (*nodelen == 20)
258 272 return 0;
259 273 PyErr_SetString(PyExc_ValueError, "20-byte hash required");
260 274 return -1;
261 275 }
262 276
263 277 static PyObject *index_insert(indexObject *self, PyObject *args)
264 278 {
265 279 PyObject *obj;
266 280 char *node;
267 281 int index;
268 282 Py_ssize_t len, nodelen;
269 283
270 284 if (!PyArg_ParseTuple(args, "iO", &index, &obj))
271 285 return NULL;
272 286
273 287 if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj) != 8) {
274 288 PyErr_SetString(PyExc_TypeError, "8-tuple required");
275 289 return NULL;
276 290 }
277 291
278 292 if (node_check(PyTuple_GET_ITEM(obj, 7), &node, &nodelen) == -1)
279 293 return NULL;
280 294
281 295 len = index_length(self);
282 296
283 297 if (index < 0)
284 298 index += len;
285 299
286 300 if (index != len - 1) {
287 301 PyErr_SetString(PyExc_IndexError,
288 302 "insert only supported at index -1");
289 303 return NULL;
290 304 }
291 305
292 306 if (self->added == NULL) {
293 307 self->added = PyList_New(0);
294 308 if (self->added == NULL)
295 309 return NULL;
296 310 }
297 311
298 312 if (PyList_Append(self->added, obj) == -1)
299 313 return NULL;
300 314
301 315 if (self->nt)
302 316 nt_insert(self, node, index);
303 317
304 318 Py_CLEAR(self->headrevs);
305 319 Py_RETURN_NONE;
306 320 }
307 321
308 322 static void _index_clearcaches(indexObject *self)
309 323 {
310 324 if (self->cache) {
311 325 Py_ssize_t i;
312 326
313 327 for (i = 0; i < self->raw_length; i++)
314 328 Py_CLEAR(self->cache[i]);
315 329 free(self->cache);
316 330 self->cache = NULL;
317 331 }
318 332 if (self->offsets) {
319 333 PyMem_Free(self->offsets);
320 334 self->offsets = NULL;
321 335 }
322 336 if (self->nt) {
323 337 free(self->nt);
324 338 self->nt = NULL;
325 339 }
326 340 Py_CLEAR(self->headrevs);
327 341 }
328 342
329 343 static PyObject *index_clearcaches(indexObject *self)
330 344 {
331 345 _index_clearcaches(self);
332 346 self->ntlength = self->ntcapacity = 0;
333 347 self->ntdepth = self->ntsplits = 0;
334 348 self->ntrev = -1;
335 349 self->ntlookups = self->ntmisses = 0;
336 350 Py_RETURN_NONE;
337 351 }
338 352
339 353 static PyObject *index_stats(indexObject *self)
340 354 {
341 355 PyObject *obj = PyDict_New();
342 356 PyObject *t = NULL;
343 357
344 358 if (obj == NULL)
345 359 return NULL;
346 360
347 361 #define istat(__n, __d) \
348 362 do { \
349 363 t = PyInt_FromSsize_t(self->__n); \
350 364 if (!t) \
351 365 goto bail; \
352 366 if (PyDict_SetItemString(obj, __d, t) == -1) \
353 367 goto bail; \
354 368 Py_DECREF(t); \
355 369 } while (0)
356 370
357 371 if (self->added) {
358 372 Py_ssize_t len = PyList_GET_SIZE(self->added);
359 373 t = PyInt_FromSsize_t(len);
360 374 if (!t)
361 375 goto bail;
362 376 if (PyDict_SetItemString(obj, "index entries added", t) == -1)
363 377 goto bail;
364 378 Py_DECREF(t);
365 379 }
366 380
367 381 if (self->raw_length != self->length - 1)
368 382 istat(raw_length, "revs on disk");
369 383 istat(length, "revs in memory");
370 384 istat(ntcapacity, "node trie capacity");
371 385 istat(ntdepth, "node trie depth");
372 386 istat(ntlength, "node trie count");
373 387 istat(ntlookups, "node trie lookups");
374 388 istat(ntmisses, "node trie misses");
375 389 istat(ntrev, "node trie last rev scanned");
376 390 istat(ntsplits, "node trie splits");
377 391
378 392 #undef istat
379 393
380 394 return obj;
381 395
382 396 bail:
383 397 Py_XDECREF(obj);
384 398 Py_XDECREF(t);
385 399 return NULL;
386 400 }
387 401
388 402 /*
389 403 * When we cache a list, we want to be sure the caller can't mutate
390 404 * the cached copy.
391 405 */
392 406 static PyObject *list_copy(PyObject *list)
393 407 {
394 408 Py_ssize_t len = PyList_GET_SIZE(list);
395 409 PyObject *newlist = PyList_New(len);
396 410 Py_ssize_t i;
397 411
398 412 if (newlist == NULL)
399 413 return NULL;
400 414
401 415 for (i = 0; i < len; i++) {
402 416 PyObject *obj = PyList_GET_ITEM(list, i);
403 417 Py_INCREF(obj);
404 418 PyList_SET_ITEM(newlist, i, obj);
405 419 }
406 420
407 421 return newlist;
408 422 }
409 423
410 424 static int check_filter(PyObject *filter, Py_ssize_t arg)
411 425 {
412 426 if (filter) {
413 427 PyObject *arglist, *result;
414 428 int isfiltered;
415 429
416 430 arglist = Py_BuildValue("(n)", arg);
417 431 if (!arglist) {
418 432 return -1;
419 433 }
420 434
421 435 result = PyEval_CallObject(filter, arglist);
422 436 Py_DECREF(arglist);
423 437 if (!result) {
424 438 return -1;
425 439 }
426 440
427 441 /* PyObject_IsTrue returns 1 if true, 0 if false, -1 if error,
428 442 * same as this function, so we can just return it directly.*/
429 443 isfiltered = PyObject_IsTrue(result);
430 444 Py_DECREF(result);
431 445 return isfiltered;
432 446 } else {
433 447 return 0;
434 448 }
435 449 }
436 450
437 451 static Py_ssize_t add_roots_get_min(indexObject *self, PyObject *list,
438 452 Py_ssize_t marker, char *phases)
439 453 {
440 454 PyObject *iter = NULL;
441 455 PyObject *iter_item = NULL;
442 456 Py_ssize_t min_idx = index_length(self) + 1;
443 457 long iter_item_long;
444 458
445 459 if (PyList_GET_SIZE(list) != 0) {
446 460 iter = PyObject_GetIter(list);
447 461 if (iter == NULL)
448 462 return -2;
449 463 while ((iter_item = PyIter_Next(iter))) {
450 464 iter_item_long = PyInt_AS_LONG(iter_item);
451 465 Py_DECREF(iter_item);
452 466 if (iter_item_long < min_idx)
453 467 min_idx = iter_item_long;
454 468 phases[iter_item_long] = marker;
455 469 }
456 470 Py_DECREF(iter);
457 471 }
458 472
459 473 return min_idx;
460 474 }
461 475
462 476 static inline void set_phase_from_parents(char *phases, int parent_1,
463 477 int parent_2, Py_ssize_t i)
464 478 {
465 479 if (parent_1 >= 0 && phases[parent_1] > phases[i])
466 480 phases[i] = phases[parent_1];
467 481 if (parent_2 >= 0 && phases[parent_2] > phases[i])
468 482 phases[i] = phases[parent_2];
469 483 }
470 484
471 485 static PyObject *reachableroots2(indexObject *self, PyObject *args)
472 486 {
473 487
474 488 /* Input */
475 489 long minroot;
476 490 PyObject *includepatharg = NULL;
477 491 int includepath = 0;
478 492 /* heads and roots are lists */
479 493 PyObject *heads = NULL;
480 494 PyObject *roots = NULL;
481 495 PyObject *reachable = NULL;
482 496
483 497 PyObject *val;
484 498 Py_ssize_t len = index_length(self) - 1;
485 499 long revnum;
486 500 Py_ssize_t k;
487 501 Py_ssize_t i;
488 502 Py_ssize_t l;
489 503 int r;
490 504 int parents[2];
491 505
492 506 /* Internal data structure:
493 507 * tovisit: array of length len+1 (all revs + nullrev), filled upto lentovisit
494 508 * revstates: array of length len+1 (all revs + nullrev) */
495 509 int *tovisit = NULL;
496 510 long lentovisit = 0;
497 511 enum { RS_SEEN = 1, RS_ROOT = 2, RS_REACHABLE = 4 };
498 512 char *revstates = NULL;
499 513
500 514 /* Get arguments */
501 515 if (!PyArg_ParseTuple(args, "lO!O!O!", &minroot, &PyList_Type, &heads,
502 516 &PyList_Type, &roots,
503 517 &PyBool_Type, &includepatharg))
504 518 goto bail;
505 519
506 520 if (includepatharg == Py_True)
507 521 includepath = 1;
508 522
509 523 /* Initialize return set */
510 524 reachable = PyList_New(0);
511 525 if (reachable == NULL)
512 526 goto bail;
513 527
514 528 /* Initialize internal datastructures */
515 529 tovisit = (int *)malloc((len + 1) * sizeof(int));
516 530 if (tovisit == NULL) {
517 531 PyErr_NoMemory();
518 532 goto bail;
519 533 }
520 534
521 535 revstates = (char *)calloc(len + 1, 1);
522 536 if (revstates == NULL) {
523 537 PyErr_NoMemory();
524 538 goto bail;
525 539 }
526 540
527 541 l = PyList_GET_SIZE(roots);
528 542 for (i = 0; i < l; i++) {
529 543 revnum = PyInt_AsLong(PyList_GET_ITEM(roots, i));
530 544 if (revnum == -1 && PyErr_Occurred())
531 545 goto bail;
532 546 /* If root is out of range, e.g. wdir(), it must be unreachable
533 547 * from heads. So we can just ignore it. */
534 548 if (revnum + 1 < 0 || revnum + 1 >= len + 1)
535 549 continue;
536 550 revstates[revnum + 1] |= RS_ROOT;
537 551 }
538 552
539 553 /* Populate tovisit with all the heads */
540 554 l = PyList_GET_SIZE(heads);
541 555 for (i = 0; i < l; i++) {
542 556 revnum = PyInt_AsLong(PyList_GET_ITEM(heads, i));
543 557 if (revnum == -1 && PyErr_Occurred())
544 558 goto bail;
545 559 if (revnum + 1 < 0 || revnum + 1 >= len + 1) {
546 560 PyErr_SetString(PyExc_IndexError, "head out of range");
547 561 goto bail;
548 562 }
549 563 if (!(revstates[revnum + 1] & RS_SEEN)) {
550 564 tovisit[lentovisit++] = (int)revnum;
551 565 revstates[revnum + 1] |= RS_SEEN;
552 566 }
553 567 }
554 568
555 569 /* Visit the tovisit list and find the reachable roots */
556 570 k = 0;
557 571 while (k < lentovisit) {
558 572 /* Add the node to reachable if it is a root*/
559 573 revnum = tovisit[k++];
560 574 if (revstates[revnum + 1] & RS_ROOT) {
561 575 revstates[revnum + 1] |= RS_REACHABLE;
562 576 val = PyInt_FromLong(revnum);
563 577 if (val == NULL)
564 578 goto bail;
565 579 r = PyList_Append(reachable, val);
566 580 Py_DECREF(val);
567 581 if (r < 0)
568 582 goto bail;
569 583 if (includepath == 0)
570 584 continue;
571 585 }
572 586
573 587 /* Add its parents to the list of nodes to visit */
574 588 if (revnum == -1)
575 589 continue;
576 590 r = index_get_parents(self, revnum, parents, (int)len - 1);
577 591 if (r < 0)
578 592 goto bail;
579 593 for (i = 0; i < 2; i++) {
580 594 if (!(revstates[parents[i] + 1] & RS_SEEN)
581 595 && parents[i] >= minroot) {
582 596 tovisit[lentovisit++] = parents[i];
583 597 revstates[parents[i] + 1] |= RS_SEEN;
584 598 }
585 599 }
586 600 }
587 601
588 602 /* Find all the nodes in between the roots we found and the heads
589 603 * and add them to the reachable set */
590 604 if (includepath == 1) {
591 605 long minidx = minroot;
592 606 if (minidx < 0)
593 607 minidx = 0;
594 608 for (i = minidx; i < len; i++) {
595 609 if (!(revstates[i + 1] & RS_SEEN))
596 610 continue;
597 611 r = index_get_parents(self, i, parents, (int)len - 1);
598 612 /* Corrupted index file, error is set from
599 613 * index_get_parents */
600 614 if (r < 0)
601 615 goto bail;
602 616 if (((revstates[parents[0] + 1] |
603 617 revstates[parents[1] + 1]) & RS_REACHABLE)
604 618 && !(revstates[i + 1] & RS_REACHABLE)) {
605 619 revstates[i + 1] |= RS_REACHABLE;
606 620 val = PyInt_FromLong(i);
607 621 if (val == NULL)
608 622 goto bail;
609 623 r = PyList_Append(reachable, val);
610 624 Py_DECREF(val);
611 625 if (r < 0)
612 626 goto bail;
613 627 }
614 628 }
615 629 }
616 630
617 631 free(revstates);
618 632 free(tovisit);
619 633 return reachable;
620 634 bail:
621 635 Py_XDECREF(reachable);
622 636 free(revstates);
623 637 free(tovisit);
624 638 return NULL;
625 639 }
626 640
627 641 static PyObject *compute_phases_map_sets(indexObject *self, PyObject *args)
628 642 {
629 643 PyObject *roots = Py_None;
630 644 PyObject *ret = NULL;
631 645 PyObject *phasessize = NULL;
632 646 PyObject *phaseroots = NULL;
633 647 PyObject *phaseset = NULL;
634 648 PyObject *phasessetlist = NULL;
635 649 PyObject *rev = NULL;
636 650 Py_ssize_t len = index_length(self) - 1;
637 651 Py_ssize_t numphase = 0;
638 652 Py_ssize_t minrevallphases = 0;
639 653 Py_ssize_t minrevphase = 0;
640 654 Py_ssize_t i = 0;
641 655 char *phases = NULL;
642 656 long phase;
643 657
644 658 if (!PyArg_ParseTuple(args, "O", &roots))
645 659 goto done;
646 660 if (roots == NULL || !PyList_Check(roots)) {
647 661 PyErr_SetString(PyExc_TypeError, "roots must be a list");
648 662 goto done;
649 663 }
650 664
651 665 phases = calloc(len, 1); /* phase per rev: {0: public, 1: draft, 2: secret} */
652 666 if (phases == NULL) {
653 667 PyErr_NoMemory();
654 668 goto done;
655 669 }
656 670 /* Put the phase information of all the roots in phases */
657 671 numphase = PyList_GET_SIZE(roots)+1;
658 672 minrevallphases = len + 1;
659 673 phasessetlist = PyList_New(numphase);
660 674 if (phasessetlist == NULL)
661 675 goto done;
662 676
663 677 PyList_SET_ITEM(phasessetlist, 0, Py_None);
664 678 Py_INCREF(Py_None);
665 679
666 680 for (i = 0; i < numphase-1; i++) {
667 681 phaseroots = PyList_GET_ITEM(roots, i);
668 682 phaseset = PySet_New(NULL);
669 683 if (phaseset == NULL)
670 684 goto release;
671 685 PyList_SET_ITEM(phasessetlist, i+1, phaseset);
672 686 if (!PyList_Check(phaseroots)) {
673 687 PyErr_SetString(PyExc_TypeError,
674 688 "roots item must be a list");
675 689 goto release;
676 690 }
677 691 minrevphase = add_roots_get_min(self, phaseroots, i+1, phases);
678 692 if (minrevphase == -2) /* Error from add_roots_get_min */
679 693 goto release;
680 694 minrevallphases = MIN(minrevallphases, minrevphase);
681 695 }
682 696 /* Propagate the phase information from the roots to the revs */
683 697 if (minrevallphases != -1) {
684 698 int parents[2];
685 699 for (i = minrevallphases; i < len; i++) {
686 700 if (index_get_parents(self, i, parents,
687 701 (int)len - 1) < 0)
688 702 goto release;
689 703 set_phase_from_parents(phases, parents[0], parents[1], i);
690 704 }
691 705 }
692 706 /* Transform phase list to a python list */
693 707 phasessize = PyInt_FromLong(len);
694 708 if (phasessize == NULL)
695 709 goto release;
696 710 for (i = 0; i < len; i++) {
697 711 phase = phases[i];
698 712 /* We only store the sets of phase for non public phase, the public phase
699 713 * is computed as a difference */
700 714 if (phase != 0) {
701 715 phaseset = PyList_GET_ITEM(phasessetlist, phase);
702 716 rev = PyInt_FromLong(i);
703 717 if (rev == NULL)
704 718 goto release;
705 719 PySet_Add(phaseset, rev);
706 720 Py_XDECREF(rev);
707 721 }
708 722 }
709 723 ret = PyTuple_Pack(2, phasessize, phasessetlist);
710 724
711 725 release:
712 726 Py_XDECREF(phasessize);
713 727 Py_XDECREF(phasessetlist);
714 728 done:
715 729 free(phases);
716 730 return ret;
717 731 }
718 732
719 733 static PyObject *index_headrevs(indexObject *self, PyObject *args)
720 734 {
721 735 Py_ssize_t i, j, len;
722 736 char *nothead = NULL;
723 737 PyObject *heads = NULL;
724 738 PyObject *filter = NULL;
725 739 PyObject *filteredrevs = Py_None;
726 740
727 741 if (!PyArg_ParseTuple(args, "|O", &filteredrevs)) {
728 742 return NULL;
729 743 }
730 744
731 745 if (self->headrevs && filteredrevs == self->filteredrevs)
732 746 return list_copy(self->headrevs);
733 747
734 748 Py_DECREF(self->filteredrevs);
735 749 self->filteredrevs = filteredrevs;
736 750 Py_INCREF(filteredrevs);
737 751
738 752 if (filteredrevs != Py_None) {
739 753 filter = PyObject_GetAttrString(filteredrevs, "__contains__");
740 754 if (!filter) {
741 755 PyErr_SetString(PyExc_TypeError,
742 756 "filteredrevs has no attribute __contains__");
743 757 goto bail;
744 758 }
745 759 }
746 760
747 761 len = index_length(self) - 1;
748 762 heads = PyList_New(0);
749 763 if (heads == NULL)
750 764 goto bail;
751 765 if (len == 0) {
752 766 PyObject *nullid = PyInt_FromLong(-1);
753 767 if (nullid == NULL || PyList_Append(heads, nullid) == -1) {
754 768 Py_XDECREF(nullid);
755 769 goto bail;
756 770 }
757 771 goto done;
758 772 }
759 773
760 774 nothead = calloc(len, 1);
761 775 if (nothead == NULL) {
762 776 PyErr_NoMemory();
763 777 goto bail;
764 778 }
765 779
766 780 for (i = len - 1; i >= 0; i--) {
767 781 int isfiltered;
768 782 int parents[2];
769 783
770 784 /* If nothead[i] == 1, it means we've seen an unfiltered child of this
771 785 * node already, and therefore this node is not filtered. So we can skip
772 786 * the expensive check_filter step.
773 787 */
774 788 if (nothead[i] != 1) {
775 789 isfiltered = check_filter(filter, i);
776 790 if (isfiltered == -1) {
777 791 PyErr_SetString(PyExc_TypeError,
778 792 "unable to check filter");
779 793 goto bail;
780 794 }
781 795
782 796 if (isfiltered) {
783 797 nothead[i] = 1;
784 798 continue;
785 799 }
786 800 }
787 801
788 802 if (index_get_parents(self, i, parents, (int)len - 1) < 0)
789 803 goto bail;
790 804 for (j = 0; j < 2; j++) {
791 805 if (parents[j] >= 0)
792 806 nothead[parents[j]] = 1;
793 807 }
794 808 }
795 809
796 810 for (i = 0; i < len; i++) {
797 811 PyObject *head;
798 812
799 813 if (nothead[i])
800 814 continue;
801 815 head = PyInt_FromSsize_t(i);
802 816 if (head == NULL || PyList_Append(heads, head) == -1) {
803 817 Py_XDECREF(head);
804 818 goto bail;
805 819 }
806 820 }
807 821
808 822 done:
809 823 self->headrevs = heads;
810 824 Py_XDECREF(filter);
811 825 free(nothead);
812 826 return list_copy(self->headrevs);
813 827 bail:
814 828 Py_XDECREF(filter);
815 829 Py_XDECREF(heads);
816 830 free(nothead);
817 831 return NULL;
818 832 }
819 833
820 834 /**
821 835 * Obtain the base revision index entry.
822 836 *
823 837 * Callers must ensure that rev >= 0 or illegal memory access may occur.
824 838 */
825 839 static inline int index_baserev(indexObject *self, int rev)
826 840 {
827 841 const char *data;
828 842
829 843 if (rev >= self->length - 1) {
830 844 PyObject *tuple = PyList_GET_ITEM(self->added,
831 845 rev - self->length + 1);
832 846 return (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 3));
833 847 }
834 848 else {
835 849 data = index_deref(self, rev);
836 850 if (data == NULL) {
837 851 return -2;
838 852 }
839 853
840 854 return getbe32(data + 16);
841 855 }
842 856 }
843 857
844 858 static PyObject *index_deltachain(indexObject *self, PyObject *args)
845 859 {
846 860 int rev, generaldelta;
847 861 PyObject *stoparg;
848 862 int stoprev, iterrev, baserev = -1;
849 863 int stopped;
850 864 PyObject *chain = NULL, *result = NULL;
851 865 const Py_ssize_t length = index_length(self);
852 866
853 867 if (!PyArg_ParseTuple(args, "iOi", &rev, &stoparg, &generaldelta)) {
854 868 return NULL;
855 869 }
856 870
857 871 if (PyInt_Check(stoparg)) {
858 872 stoprev = (int)PyInt_AsLong(stoparg);
859 873 if (stoprev == -1 && PyErr_Occurred()) {
860 874 return NULL;
861 875 }
862 876 }
863 877 else if (stoparg == Py_None) {
864 878 stoprev = -2;
865 879 }
866 880 else {
867 881 PyErr_SetString(PyExc_ValueError,
868 882 "stoprev must be integer or None");
869 883 return NULL;
870 884 }
871 885
872 886 if (rev < 0 || rev >= length - 1) {
873 887 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
874 888 return NULL;
875 889 }
876 890
877 891 chain = PyList_New(0);
878 892 if (chain == NULL) {
879 893 return NULL;
880 894 }
881 895
882 896 baserev = index_baserev(self, rev);
883 897
884 898 /* This should never happen. */
885 899 if (baserev <= -2) {
886 900 /* Error should be set by index_deref() */
887 901 assert(PyErr_Occurred());
888 902 goto bail;
889 903 }
890 904
891 905 iterrev = rev;
892 906
893 907 while (iterrev != baserev && iterrev != stoprev) {
894 908 PyObject *value = PyInt_FromLong(iterrev);
895 909 if (value == NULL) {
896 910 goto bail;
897 911 }
898 912 if (PyList_Append(chain, value)) {
899 913 Py_DECREF(value);
900 914 goto bail;
901 915 }
902 916 Py_DECREF(value);
903 917
904 918 if (generaldelta) {
905 919 iterrev = baserev;
906 920 }
907 921 else {
908 922 iterrev--;
909 923 }
910 924
911 925 if (iterrev < 0) {
912 926 break;
913 927 }
914 928
915 929 if (iterrev >= length - 1) {
916 930 PyErr_SetString(PyExc_IndexError, "revision outside index");
917 931 return NULL;
918 932 }
919 933
920 934 baserev = index_baserev(self, iterrev);
921 935
922 936 /* This should never happen. */
923 937 if (baserev <= -2) {
924 938 /* Error should be set by index_deref() */
925 939 assert(PyErr_Occurred());
926 940 goto bail;
927 941 }
928 942 }
929 943
930 944 if (iterrev == stoprev) {
931 945 stopped = 1;
932 946 }
933 947 else {
934 948 PyObject *value = PyInt_FromLong(iterrev);
935 949 if (value == NULL) {
936 950 goto bail;
937 951 }
938 952 if (PyList_Append(chain, value)) {
939 953 Py_DECREF(value);
940 954 goto bail;
941 955 }
942 956 Py_DECREF(value);
943 957
944 958 stopped = 0;
945 959 }
946 960
947 961 if (PyList_Reverse(chain)) {
948 962 goto bail;
949 963 }
950 964
951 965 result = Py_BuildValue("OO", chain, stopped ? Py_True : Py_False);
952 966 Py_DECREF(chain);
953 967 return result;
954 968
955 969 bail:
956 970 Py_DECREF(chain);
957 971 return NULL;
958 972 }
959 973
960 974 static inline int nt_level(const char *node, Py_ssize_t level)
961 975 {
962 976 int v = node[level>>1];
963 977 if (!(level & 1))
964 978 v >>= 4;
965 979 return v & 0xf;
966 980 }
967 981
968 982 /*
969 983 * Return values:
970 984 *
971 985 * -4: match is ambiguous (multiple candidates)
972 986 * -2: not found
973 987 * rest: valid rev
974 988 */
975 989 static int nt_find(indexObject *self, const char *node, Py_ssize_t nodelen,
976 990 int hex)
977 991 {
978 992 int (*getnybble)(const char *, Py_ssize_t) = hex ? hexdigit : nt_level;
979 993 int level, maxlevel, off;
980 994
981 995 if (nodelen == 20 && node[0] == '\0' && memcmp(node, nullid, 20) == 0)
982 996 return -1;
983 997
984 998 if (self->nt == NULL)
985 999 return -2;
986 1000
987 1001 if (hex)
988 1002 maxlevel = nodelen > 40 ? 40 : (int)nodelen;
989 1003 else
990 1004 maxlevel = nodelen > 20 ? 40 : ((int)nodelen * 2);
991 1005
992 1006 for (level = off = 0; level < maxlevel; level++) {
993 1007 int k = getnybble(node, level);
994 1008 nodetree *n = &self->nt[off];
995 1009 int v = n->children[k];
996 1010
997 1011 if (v < 0) {
998 1012 const char *n;
999 1013 Py_ssize_t i;
1000 1014
1001 1015 v = -(v + 1);
1002 1016 n = index_node(self, v);
1003 1017 if (n == NULL)
1004 1018 return -2;
1005 1019 for (i = level; i < maxlevel; i++)
1006 1020 if (getnybble(node, i) != nt_level(n, i))
1007 1021 return -2;
1008 1022 return v;
1009 1023 }
1010 1024 if (v == 0)
1011 1025 return -2;
1012 1026 off = v;
1013 1027 }
1014 1028 /* multiple matches against an ambiguous prefix */
1015 1029 return -4;
1016 1030 }
1017 1031
1018 1032 static int nt_new(indexObject *self)
1019 1033 {
1020 1034 if (self->ntlength == self->ntcapacity) {
1021 1035 if (self->ntcapacity >= INT_MAX / (sizeof(nodetree) * 2)) {
1022 1036 PyErr_SetString(PyExc_MemoryError,
1023 1037 "overflow in nt_new");
1024 1038 return -1;
1025 1039 }
1026 1040 self->ntcapacity *= 2;
1027 1041 self->nt = realloc(self->nt,
1028 1042 self->ntcapacity * sizeof(nodetree));
1029 1043 if (self->nt == NULL) {
1030 1044 PyErr_SetString(PyExc_MemoryError, "out of memory");
1031 1045 return -1;
1032 1046 }
1033 1047 memset(&self->nt[self->ntlength], 0,
1034 1048 sizeof(nodetree) * (self->ntcapacity - self->ntlength));
1035 1049 }
1036 1050 return self->ntlength++;
1037 1051 }
1038 1052
1039 1053 static int nt_insert(indexObject *self, const char *node, int rev)
1040 1054 {
1041 1055 int level = 0;
1042 1056 int off = 0;
1043 1057
1044 1058 while (level < 40) {
1045 1059 int k = nt_level(node, level);
1046 1060 nodetree *n;
1047 1061 int v;
1048 1062
1049 1063 n = &self->nt[off];
1050 1064 v = n->children[k];
1051 1065
1052 1066 if (v == 0) {
1053 1067 n->children[k] = -rev - 1;
1054 1068 return 0;
1055 1069 }
1056 1070 if (v < 0) {
1057 1071 const char *oldnode = index_node(self, -(v + 1));
1058 1072 int noff;
1059 1073
1060 1074 if (!oldnode || !memcmp(oldnode, node, 20)) {
1061 1075 n->children[k] = -rev - 1;
1062 1076 return 0;
1063 1077 }
1064 1078 noff = nt_new(self);
1065 1079 if (noff == -1)
1066 1080 return -1;
1067 1081 /* self->nt may have been changed by realloc */
1068 1082 self->nt[off].children[k] = noff;
1069 1083 off = noff;
1070 1084 n = &self->nt[off];
1071 1085 n->children[nt_level(oldnode, ++level)] = v;
1072 1086 if (level > self->ntdepth)
1073 1087 self->ntdepth = level;
1074 1088 self->ntsplits += 1;
1075 1089 } else {
1076 1090 level += 1;
1077 1091 off = v;
1078 1092 }
1079 1093 }
1080 1094
1081 1095 return -1;
1082 1096 }
1083 1097
1084 1098 static int nt_init(indexObject *self)
1085 1099 {
1086 1100 if (self->nt == NULL) {
1087 1101 if ((size_t)self->raw_length > INT_MAX / sizeof(nodetree)) {
1088 1102 PyErr_SetString(PyExc_ValueError, "overflow in nt_init");
1089 1103 return -1;
1090 1104 }
1091 1105 self->ntcapacity = self->raw_length < 4
1092 1106 ? 4 : (int)self->raw_length / 2;
1093 1107
1094 1108 self->nt = calloc(self->ntcapacity, sizeof(nodetree));
1095 1109 if (self->nt == NULL) {
1096 1110 PyErr_NoMemory();
1097 1111 return -1;
1098 1112 }
1099 1113 self->ntlength = 1;
1100 1114 self->ntrev = (int)index_length(self) - 1;
1101 1115 self->ntlookups = 1;
1102 1116 self->ntmisses = 0;
1103 1117 if (nt_insert(self, nullid, INT_MAX) == -1)
1104 1118 return -1;
1105 1119 }
1106 1120 return 0;
1107 1121 }
1108 1122
1109 1123 /*
1110 1124 * Return values:
1111 1125 *
1112 1126 * -3: error (exception set)
1113 1127 * -2: not found (no exception set)
1114 1128 * rest: valid rev
1115 1129 */
1116 1130 static int index_find_node(indexObject *self,
1117 1131 const char *node, Py_ssize_t nodelen)
1118 1132 {
1119 1133 int rev;
1120 1134
1121 1135 self->ntlookups++;
1122 1136 rev = nt_find(self, node, nodelen, 0);
1123 1137 if (rev >= -1)
1124 1138 return rev;
1125 1139
1126 1140 if (nt_init(self) == -1)
1127 1141 return -3;
1128 1142
1129 1143 /*
1130 1144 * For the first handful of lookups, we scan the entire index,
1131 1145 * and cache only the matching nodes. This optimizes for cases
1132 1146 * like "hg tip", where only a few nodes are accessed.
1133 1147 *
1134 1148 * After that, we cache every node we visit, using a single
1135 1149 * scan amortized over multiple lookups. This gives the best
1136 1150 * bulk performance, e.g. for "hg log".
1137 1151 */
1138 1152 if (self->ntmisses++ < 4) {
1139 1153 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1140 1154 const char *n = index_node(self, rev);
1141 1155 if (n == NULL)
1142 1156 return -2;
1143 1157 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1144 1158 if (nt_insert(self, n, rev) == -1)
1145 1159 return -3;
1146 1160 break;
1147 1161 }
1148 1162 }
1149 1163 } else {
1150 1164 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1151 1165 const char *n = index_node(self, rev);
1152 1166 if (n == NULL) {
1153 1167 self->ntrev = rev + 1;
1154 1168 return -2;
1155 1169 }
1156 1170 if (nt_insert(self, n, rev) == -1) {
1157 1171 self->ntrev = rev + 1;
1158 1172 return -3;
1159 1173 }
1160 1174 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1161 1175 break;
1162 1176 }
1163 1177 }
1164 1178 self->ntrev = rev;
1165 1179 }
1166 1180
1167 1181 if (rev >= 0)
1168 1182 return rev;
1169 1183 return -2;
1170 1184 }
1171 1185
1172 1186 static void raise_revlog_error(void)
1173 1187 {
1174 1188 PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
1175 1189
1176 1190 mod = PyImport_ImportModule("mercurial.error");
1177 1191 if (mod == NULL) {
1178 1192 goto cleanup;
1179 1193 }
1180 1194
1181 1195 dict = PyModule_GetDict(mod);
1182 1196 if (dict == NULL) {
1183 1197 goto cleanup;
1184 1198 }
1185 1199 Py_INCREF(dict);
1186 1200
1187 1201 errclass = PyDict_GetItemString(dict, "RevlogError");
1188 1202 if (errclass == NULL) {
1189 1203 PyErr_SetString(PyExc_SystemError,
1190 1204 "could not find RevlogError");
1191 1205 goto cleanup;
1192 1206 }
1193 1207
1194 1208 /* value of exception is ignored by callers */
1195 1209 PyErr_SetString(errclass, "RevlogError");
1196 1210
1197 1211 cleanup:
1198 1212 Py_XDECREF(dict);
1199 1213 Py_XDECREF(mod);
1200 1214 }
1201 1215
1202 1216 static PyObject *index_getitem(indexObject *self, PyObject *value)
1203 1217 {
1204 1218 char *node;
1205 1219 Py_ssize_t nodelen;
1206 1220 int rev;
1207 1221
1208 1222 if (PyInt_Check(value))
1209 1223 return index_get(self, PyInt_AS_LONG(value));
1210 1224
1211 1225 if (node_check(value, &node, &nodelen) == -1)
1212 1226 return NULL;
1213 1227 rev = index_find_node(self, node, nodelen);
1214 1228 if (rev >= -1)
1215 1229 return PyInt_FromLong(rev);
1216 1230 if (rev == -2)
1217 1231 raise_revlog_error();
1218 1232 return NULL;
1219 1233 }
1220 1234
1221 1235 static int nt_partialmatch(indexObject *self, const char *node,
1222 1236 Py_ssize_t nodelen)
1223 1237 {
1224 1238 int rev;
1225 1239
1226 1240 if (nt_init(self) == -1)
1227 1241 return -3;
1228 1242
1229 1243 if (self->ntrev > 0) {
1230 1244 /* ensure that the radix tree is fully populated */
1231 1245 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1232 1246 const char *n = index_node(self, rev);
1233 1247 if (n == NULL)
1234 1248 return -2;
1235 1249 if (nt_insert(self, n, rev) == -1)
1236 1250 return -3;
1237 1251 }
1238 1252 self->ntrev = rev;
1239 1253 }
1240 1254
1241 1255 return nt_find(self, node, nodelen, 1);
1242 1256 }
1243 1257
1244 1258 static PyObject *index_partialmatch(indexObject *self, PyObject *args)
1245 1259 {
1246 1260 const char *fullnode;
1247 1261 int nodelen;
1248 1262 char *node;
1249 1263 int rev, i;
1250 1264
1251 1265 if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &node, &nodelen))
1252 1266 return NULL;
1253 1267
1254 1268 if (nodelen < 1) {
1255 1269 PyErr_SetString(PyExc_ValueError, "key too short");
1256 1270 return NULL;
1257 1271 }
1258 1272
1259 1273 if (nodelen > 40) {
1260 1274 PyErr_SetString(PyExc_ValueError, "key too long");
1261 1275 return NULL;
1262 1276 }
1263 1277
1264 1278 for (i = 0; i < nodelen; i++)
1265 1279 hexdigit(node, i);
1266 1280 if (PyErr_Occurred()) {
1267 1281 /* input contains non-hex characters */
1268 1282 PyErr_Clear();
1269 1283 Py_RETURN_NONE;
1270 1284 }
1271 1285
1272 1286 rev = nt_partialmatch(self, node, nodelen);
1273 1287
1274 1288 switch (rev) {
1275 1289 case -4:
1276 1290 raise_revlog_error();
1277 1291 case -3:
1278 1292 return NULL;
1279 1293 case -2:
1280 1294 Py_RETURN_NONE;
1281 1295 case -1:
1282 1296 return PyBytes_FromStringAndSize(nullid, 20);
1283 1297 }
1284 1298
1285 fullnode = index_node(self, rev);
1299 fullnode = index_node_existing(self, rev);
1286 1300 if (fullnode == NULL) {
1287 PyErr_Format(PyExc_IndexError,
1288 "could not access rev %d", rev);
1289 1301 return NULL;
1290 1302 }
1291 1303 return PyBytes_FromStringAndSize(fullnode, 20);
1292 1304 }
1293 1305
1294 1306 static PyObject *index_m_get(indexObject *self, PyObject *args)
1295 1307 {
1296 1308 Py_ssize_t nodelen;
1297 1309 PyObject *val;
1298 1310 char *node;
1299 1311 int rev;
1300 1312
1301 1313 if (!PyArg_ParseTuple(args, "O", &val))
1302 1314 return NULL;
1303 1315 if (node_check(val, &node, &nodelen) == -1)
1304 1316 return NULL;
1305 1317 rev = index_find_node(self, node, nodelen);
1306 1318 if (rev == -3)
1307 1319 return NULL;
1308 1320 if (rev == -2)
1309 1321 Py_RETURN_NONE;
1310 1322 return PyInt_FromLong(rev);
1311 1323 }
1312 1324
1313 1325 static int index_contains(indexObject *self, PyObject *value)
1314 1326 {
1315 1327 char *node;
1316 1328 Py_ssize_t nodelen;
1317 1329
1318 1330 if (PyInt_Check(value)) {
1319 1331 long rev = PyInt_AS_LONG(value);
1320 1332 return rev >= -1 && rev < index_length(self);
1321 1333 }
1322 1334
1323 1335 if (node_check(value, &node, &nodelen) == -1)
1324 1336 return -1;
1325 1337
1326 1338 switch (index_find_node(self, node, nodelen)) {
1327 1339 case -3:
1328 1340 return -1;
1329 1341 case -2:
1330 1342 return 0;
1331 1343 default:
1332 1344 return 1;
1333 1345 }
1334 1346 }
1335 1347
1336 1348 typedef uint64_t bitmask;
1337 1349
1338 1350 /*
1339 1351 * Given a disjoint set of revs, return all candidates for the
1340 1352 * greatest common ancestor. In revset notation, this is the set
1341 1353 * "heads(::a and ::b and ...)"
1342 1354 */
1343 1355 static PyObject *find_gca_candidates(indexObject *self, const int *revs,
1344 1356 int revcount)
1345 1357 {
1346 1358 const bitmask allseen = (1ull << revcount) - 1;
1347 1359 const bitmask poison = 1ull << revcount;
1348 1360 PyObject *gca = PyList_New(0);
1349 1361 int i, v, interesting;
1350 1362 int maxrev = -1;
1351 1363 bitmask sp;
1352 1364 bitmask *seen;
1353 1365
1354 1366 if (gca == NULL)
1355 1367 return PyErr_NoMemory();
1356 1368
1357 1369 for (i = 0; i < revcount; i++) {
1358 1370 if (revs[i] > maxrev)
1359 1371 maxrev = revs[i];
1360 1372 }
1361 1373
1362 1374 seen = calloc(sizeof(*seen), maxrev + 1);
1363 1375 if (seen == NULL) {
1364 1376 Py_DECREF(gca);
1365 1377 return PyErr_NoMemory();
1366 1378 }
1367 1379
1368 1380 for (i = 0; i < revcount; i++)
1369 1381 seen[revs[i]] = 1ull << i;
1370 1382
1371 1383 interesting = revcount;
1372 1384
1373 1385 for (v = maxrev; v >= 0 && interesting; v--) {
1374 1386 bitmask sv = seen[v];
1375 1387 int parents[2];
1376 1388
1377 1389 if (!sv)
1378 1390 continue;
1379 1391
1380 1392 if (sv < poison) {
1381 1393 interesting -= 1;
1382 1394 if (sv == allseen) {
1383 1395 PyObject *obj = PyInt_FromLong(v);
1384 1396 if (obj == NULL)
1385 1397 goto bail;
1386 1398 if (PyList_Append(gca, obj) == -1) {
1387 1399 Py_DECREF(obj);
1388 1400 goto bail;
1389 1401 }
1390 1402 sv |= poison;
1391 1403 for (i = 0; i < revcount; i++) {
1392 1404 if (revs[i] == v)
1393 1405 goto done;
1394 1406 }
1395 1407 }
1396 1408 }
1397 1409 if (index_get_parents(self, v, parents, maxrev) < 0)
1398 1410 goto bail;
1399 1411
1400 1412 for (i = 0; i < 2; i++) {
1401 1413 int p = parents[i];
1402 1414 if (p == -1)
1403 1415 continue;
1404 1416 sp = seen[p];
1405 1417 if (sv < poison) {
1406 1418 if (sp == 0) {
1407 1419 seen[p] = sv;
1408 1420 interesting++;
1409 1421 }
1410 1422 else if (sp != sv)
1411 1423 seen[p] |= sv;
1412 1424 } else {
1413 1425 if (sp && sp < poison)
1414 1426 interesting--;
1415 1427 seen[p] = sv;
1416 1428 }
1417 1429 }
1418 1430 }
1419 1431
1420 1432 done:
1421 1433 free(seen);
1422 1434 return gca;
1423 1435 bail:
1424 1436 free(seen);
1425 1437 Py_XDECREF(gca);
1426 1438 return NULL;
1427 1439 }
1428 1440
1429 1441 /*
1430 1442 * Given a disjoint set of revs, return the subset with the longest
1431 1443 * path to the root.
1432 1444 */
1433 1445 static PyObject *find_deepest(indexObject *self, PyObject *revs)
1434 1446 {
1435 1447 const Py_ssize_t revcount = PyList_GET_SIZE(revs);
1436 1448 static const Py_ssize_t capacity = 24;
1437 1449 int *depth, *interesting = NULL;
1438 1450 int i, j, v, ninteresting;
1439 1451 PyObject *dict = NULL, *keys = NULL;
1440 1452 long *seen = NULL;
1441 1453 int maxrev = -1;
1442 1454 long final;
1443 1455
1444 1456 if (revcount > capacity) {
1445 1457 PyErr_Format(PyExc_OverflowError,
1446 1458 "bitset size (%ld) > capacity (%ld)",
1447 1459 (long)revcount, (long)capacity);
1448 1460 return NULL;
1449 1461 }
1450 1462
1451 1463 for (i = 0; i < revcount; i++) {
1452 1464 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
1453 1465 if (n > maxrev)
1454 1466 maxrev = n;
1455 1467 }
1456 1468
1457 1469 depth = calloc(sizeof(*depth), maxrev + 1);
1458 1470 if (depth == NULL)
1459 1471 return PyErr_NoMemory();
1460 1472
1461 1473 seen = calloc(sizeof(*seen), maxrev + 1);
1462 1474 if (seen == NULL) {
1463 1475 PyErr_NoMemory();
1464 1476 goto bail;
1465 1477 }
1466 1478
1467 1479 interesting = calloc(sizeof(*interesting), 1 << revcount);
1468 1480 if (interesting == NULL) {
1469 1481 PyErr_NoMemory();
1470 1482 goto bail;
1471 1483 }
1472 1484
1473 1485 if (PyList_Sort(revs) == -1)
1474 1486 goto bail;
1475 1487
1476 1488 for (i = 0; i < revcount; i++) {
1477 1489 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
1478 1490 long b = 1l << i;
1479 1491 depth[n] = 1;
1480 1492 seen[n] = b;
1481 1493 interesting[b] = 1;
1482 1494 }
1483 1495
1484 1496 /* invariant: ninteresting is the number of non-zero entries in
1485 1497 * interesting. */
1486 1498 ninteresting = (int)revcount;
1487 1499
1488 1500 for (v = maxrev; v >= 0 && ninteresting > 1; v--) {
1489 1501 int dv = depth[v];
1490 1502 int parents[2];
1491 1503 long sv;
1492 1504
1493 1505 if (dv == 0)
1494 1506 continue;
1495 1507
1496 1508 sv = seen[v];
1497 1509 if (index_get_parents(self, v, parents, maxrev) < 0)
1498 1510 goto bail;
1499 1511
1500 1512 for (i = 0; i < 2; i++) {
1501 1513 int p = parents[i];
1502 1514 long sp;
1503 1515 int dp;
1504 1516
1505 1517 if (p == -1)
1506 1518 continue;
1507 1519
1508 1520 dp = depth[p];
1509 1521 sp = seen[p];
1510 1522 if (dp <= dv) {
1511 1523 depth[p] = dv + 1;
1512 1524 if (sp != sv) {
1513 1525 interesting[sv] += 1;
1514 1526 seen[p] = sv;
1515 1527 if (sp) {
1516 1528 interesting[sp] -= 1;
1517 1529 if (interesting[sp] == 0)
1518 1530 ninteresting -= 1;
1519 1531 }
1520 1532 }
1521 1533 }
1522 1534 else if (dv == dp - 1) {
1523 1535 long nsp = sp | sv;
1524 1536 if (nsp == sp)
1525 1537 continue;
1526 1538 seen[p] = nsp;
1527 1539 interesting[sp] -= 1;
1528 1540 if (interesting[sp] == 0)
1529 1541 ninteresting -= 1;
1530 1542 if (interesting[nsp] == 0)
1531 1543 ninteresting += 1;
1532 1544 interesting[nsp] += 1;
1533 1545 }
1534 1546 }
1535 1547 interesting[sv] -= 1;
1536 1548 if (interesting[sv] == 0)
1537 1549 ninteresting -= 1;
1538 1550 }
1539 1551
1540 1552 final = 0;
1541 1553 j = ninteresting;
1542 1554 for (i = 0; i < (int)(2 << revcount) && j > 0; i++) {
1543 1555 if (interesting[i] == 0)
1544 1556 continue;
1545 1557 final |= i;
1546 1558 j -= 1;
1547 1559 }
1548 1560 if (final == 0) {
1549 1561 keys = PyList_New(0);
1550 1562 goto bail;
1551 1563 }
1552 1564
1553 1565 dict = PyDict_New();
1554 1566 if (dict == NULL)
1555 1567 goto bail;
1556 1568
1557 1569 for (i = 0; i < revcount; i++) {
1558 1570 PyObject *key;
1559 1571
1560 1572 if ((final & (1 << i)) == 0)
1561 1573 continue;
1562 1574
1563 1575 key = PyList_GET_ITEM(revs, i);
1564 1576 Py_INCREF(key);
1565 1577 Py_INCREF(Py_None);
1566 1578 if (PyDict_SetItem(dict, key, Py_None) == -1) {
1567 1579 Py_DECREF(key);
1568 1580 Py_DECREF(Py_None);
1569 1581 goto bail;
1570 1582 }
1571 1583 }
1572 1584
1573 1585 keys = PyDict_Keys(dict);
1574 1586
1575 1587 bail:
1576 1588 free(depth);
1577 1589 free(seen);
1578 1590 free(interesting);
1579 1591 Py_XDECREF(dict);
1580 1592
1581 1593 return keys;
1582 1594 }
1583 1595
1584 1596 /*
1585 1597 * Given a (possibly overlapping) set of revs, return all the
1586 1598 * common ancestors heads: heads(::args[0] and ::a[1] and ...)
1587 1599 */
1588 1600 static PyObject *index_commonancestorsheads(indexObject *self, PyObject *args)
1589 1601 {
1590 1602 PyObject *ret = NULL;
1591 1603 Py_ssize_t argcount, i, len;
1592 1604 bitmask repeat = 0;
1593 1605 int revcount = 0;
1594 1606 int *revs;
1595 1607
1596 1608 argcount = PySequence_Length(args);
1597 1609 revs = PyMem_Malloc(argcount * sizeof(*revs));
1598 1610 if (argcount > 0 && revs == NULL)
1599 1611 return PyErr_NoMemory();
1600 1612 len = index_length(self) - 1;
1601 1613
1602 1614 for (i = 0; i < argcount; i++) {
1603 1615 static const int capacity = 24;
1604 1616 PyObject *obj = PySequence_GetItem(args, i);
1605 1617 bitmask x;
1606 1618 long val;
1607 1619
1608 1620 if (!PyInt_Check(obj)) {
1609 1621 PyErr_SetString(PyExc_TypeError,
1610 1622 "arguments must all be ints");
1611 1623 Py_DECREF(obj);
1612 1624 goto bail;
1613 1625 }
1614 1626 val = PyInt_AsLong(obj);
1615 1627 Py_DECREF(obj);
1616 1628 if (val == -1) {
1617 1629 ret = PyList_New(0);
1618 1630 goto done;
1619 1631 }
1620 1632 if (val < 0 || val >= len) {
1621 1633 PyErr_SetString(PyExc_IndexError,
1622 1634 "index out of range");
1623 1635 goto bail;
1624 1636 }
1625 1637 /* this cheesy bloom filter lets us avoid some more
1626 1638 * expensive duplicate checks in the common set-is-disjoint
1627 1639 * case */
1628 1640 x = 1ull << (val & 0x3f);
1629 1641 if (repeat & x) {
1630 1642 int k;
1631 1643 for (k = 0; k < revcount; k++) {
1632 1644 if (val == revs[k])
1633 1645 goto duplicate;
1634 1646 }
1635 1647 }
1636 1648 else repeat |= x;
1637 1649 if (revcount >= capacity) {
1638 1650 PyErr_Format(PyExc_OverflowError,
1639 1651 "bitset size (%d) > capacity (%d)",
1640 1652 revcount, capacity);
1641 1653 goto bail;
1642 1654 }
1643 1655 revs[revcount++] = (int)val;
1644 1656 duplicate:;
1645 1657 }
1646 1658
1647 1659 if (revcount == 0) {
1648 1660 ret = PyList_New(0);
1649 1661 goto done;
1650 1662 }
1651 1663 if (revcount == 1) {
1652 1664 PyObject *obj;
1653 1665 ret = PyList_New(1);
1654 1666 if (ret == NULL)
1655 1667 goto bail;
1656 1668 obj = PyInt_FromLong(revs[0]);
1657 1669 if (obj == NULL)
1658 1670 goto bail;
1659 1671 PyList_SET_ITEM(ret, 0, obj);
1660 1672 goto done;
1661 1673 }
1662 1674
1663 1675 ret = find_gca_candidates(self, revs, revcount);
1664 1676 if (ret == NULL)
1665 1677 goto bail;
1666 1678
1667 1679 done:
1668 1680 PyMem_Free(revs);
1669 1681 return ret;
1670 1682
1671 1683 bail:
1672 1684 PyMem_Free(revs);
1673 1685 Py_XDECREF(ret);
1674 1686 return NULL;
1675 1687 }
1676 1688
1677 1689 /*
1678 1690 * Given a (possibly overlapping) set of revs, return the greatest
1679 1691 * common ancestors: those with the longest path to the root.
1680 1692 */
1681 1693 static PyObject *index_ancestors(indexObject *self, PyObject *args)
1682 1694 {
1683 1695 PyObject *ret;
1684 1696 PyObject *gca = index_commonancestorsheads(self, args);
1685 1697 if (gca == NULL)
1686 1698 return NULL;
1687 1699
1688 1700 if (PyList_GET_SIZE(gca) <= 1) {
1689 1701 return gca;
1690 1702 }
1691 1703
1692 1704 ret = find_deepest(self, gca);
1693 1705 Py_DECREF(gca);
1694 1706 return ret;
1695 1707 }
1696 1708
1697 1709 /*
1698 1710 * Invalidate any trie entries introduced by added revs.
1699 1711 */
1700 1712 static void nt_invalidate_added(indexObject *self, Py_ssize_t start)
1701 1713 {
1702 1714 Py_ssize_t i, len = PyList_GET_SIZE(self->added);
1703 1715
1704 1716 for (i = start; i < len; i++) {
1705 1717 PyObject *tuple = PyList_GET_ITEM(self->added, i);
1706 1718 PyObject *node = PyTuple_GET_ITEM(tuple, 7);
1707 1719
1708 1720 nt_insert(self, PyBytes_AS_STRING(node), -1);
1709 1721 }
1710 1722
1711 1723 if (start == 0)
1712 1724 Py_CLEAR(self->added);
1713 1725 }
1714 1726
1715 1727 /*
1716 1728 * Delete a numeric range of revs, which must be at the end of the
1717 1729 * range, but exclude the sentinel nullid entry.
1718 1730 */
1719 1731 static int index_slice_del(indexObject *self, PyObject *item)
1720 1732 {
1721 1733 Py_ssize_t start, stop, step, slicelength;
1722 1734 Py_ssize_t length = index_length(self);
1723 1735 int ret = 0;
1724 1736
1725 1737 /* Argument changed from PySliceObject* to PyObject* in Python 3. */
1726 1738 #ifdef IS_PY3K
1727 1739 if (PySlice_GetIndicesEx(item, length,
1728 1740 #else
1729 1741 if (PySlice_GetIndicesEx((PySliceObject*)item, length,
1730 1742 #endif
1731 1743 &start, &stop, &step, &slicelength) < 0)
1732 1744 return -1;
1733 1745
1734 1746 if (slicelength <= 0)
1735 1747 return 0;
1736 1748
1737 1749 if ((step < 0 && start < stop) || (step > 0 && start > stop))
1738 1750 stop = start;
1739 1751
1740 1752 if (step < 0) {
1741 1753 stop = start + 1;
1742 1754 start = stop + step*(slicelength - 1) - 1;
1743 1755 step = -step;
1744 1756 }
1745 1757
1746 1758 if (step != 1) {
1747 1759 PyErr_SetString(PyExc_ValueError,
1748 1760 "revlog index delete requires step size of 1");
1749 1761 return -1;
1750 1762 }
1751 1763
1752 1764 if (stop != length - 1) {
1753 1765 PyErr_SetString(PyExc_IndexError,
1754 1766 "revlog index deletion indices are invalid");
1755 1767 return -1;
1756 1768 }
1757 1769
1758 1770 if (start < self->length - 1) {
1759 1771 if (self->nt) {
1760 1772 Py_ssize_t i;
1761 1773
1762 1774 for (i = start + 1; i < self->length - 1; i++) {
1763 1775 const char *node = index_node(self, i);
1764 1776
1765 1777 if (node)
1766 1778 nt_insert(self, node, -1);
1767 1779 }
1768 1780 if (self->added)
1769 1781 nt_invalidate_added(self, 0);
1770 1782 if (self->ntrev > start)
1771 1783 self->ntrev = (int)start;
1772 1784 }
1773 1785 self->length = start + 1;
1774 1786 if (start < self->raw_length) {
1775 1787 if (self->cache) {
1776 1788 Py_ssize_t i;
1777 1789 for (i = start; i < self->raw_length; i++)
1778 1790 Py_CLEAR(self->cache[i]);
1779 1791 }
1780 1792 self->raw_length = start;
1781 1793 }
1782 1794 goto done;
1783 1795 }
1784 1796
1785 1797 if (self->nt) {
1786 1798 nt_invalidate_added(self, start - self->length + 1);
1787 1799 if (self->ntrev > start)
1788 1800 self->ntrev = (int)start;
1789 1801 }
1790 1802 if (self->added)
1791 1803 ret = PyList_SetSlice(self->added, start - self->length + 1,
1792 1804 PyList_GET_SIZE(self->added), NULL);
1793 1805 done:
1794 1806 Py_CLEAR(self->headrevs);
1795 1807 return ret;
1796 1808 }
1797 1809
1798 1810 /*
1799 1811 * Supported ops:
1800 1812 *
1801 1813 * slice deletion
1802 1814 * string assignment (extend node->rev mapping)
1803 1815 * string deletion (shrink node->rev mapping)
1804 1816 */
1805 1817 static int index_assign_subscript(indexObject *self, PyObject *item,
1806 1818 PyObject *value)
1807 1819 {
1808 1820 char *node;
1809 1821 Py_ssize_t nodelen;
1810 1822 long rev;
1811 1823
1812 1824 if (PySlice_Check(item) && value == NULL)
1813 1825 return index_slice_del(self, item);
1814 1826
1815 1827 if (node_check(item, &node, &nodelen) == -1)
1816 1828 return -1;
1817 1829
1818 1830 if (value == NULL)
1819 1831 return self->nt ? nt_insert(self, node, -1) : 0;
1820 1832 rev = PyInt_AsLong(value);
1821 1833 if (rev > INT_MAX || rev < 0) {
1822 1834 if (!PyErr_Occurred())
1823 1835 PyErr_SetString(PyExc_ValueError, "rev out of range");
1824 1836 return -1;
1825 1837 }
1826 1838
1827 1839 if (nt_init(self) == -1)
1828 1840 return -1;
1829 1841 return nt_insert(self, node, (int)rev);
1830 1842 }
1831 1843
1832 1844 /*
1833 1845 * Find all RevlogNG entries in an index that has inline data. Update
1834 1846 * the optional "offsets" table with those entries.
1835 1847 */
1836 1848 static Py_ssize_t inline_scan(indexObject *self, const char **offsets)
1837 1849 {
1838 1850 const char *data = (const char *)self->buf.buf;
1839 1851 Py_ssize_t pos = 0;
1840 1852 Py_ssize_t end = self->buf.len;
1841 1853 long incr = v1_hdrsize;
1842 1854 Py_ssize_t len = 0;
1843 1855
1844 1856 while (pos + v1_hdrsize <= end && pos >= 0) {
1845 1857 uint32_t comp_len;
1846 1858 /* 3rd element of header is length of compressed inline data */
1847 1859 comp_len = getbe32(data + pos + 8);
1848 1860 incr = v1_hdrsize + comp_len;
1849 1861 if (offsets)
1850 1862 offsets[len] = data + pos;
1851 1863 len++;
1852 1864 pos += incr;
1853 1865 }
1854 1866
1855 1867 if (pos != end) {
1856 1868 if (!PyErr_Occurred())
1857 1869 PyErr_SetString(PyExc_ValueError, "corrupt index file");
1858 1870 return -1;
1859 1871 }
1860 1872
1861 1873 return len;
1862 1874 }
1863 1875
1864 1876 static int index_init(indexObject *self, PyObject *args)
1865 1877 {
1866 1878 PyObject *data_obj, *inlined_obj;
1867 1879 Py_ssize_t size;
1868 1880
1869 1881 /* Initialize before argument-checking to avoid index_dealloc() crash. */
1870 1882 self->raw_length = 0;
1871 1883 self->added = NULL;
1872 1884 self->cache = NULL;
1873 1885 self->data = NULL;
1874 1886 memset(&self->buf, 0, sizeof(self->buf));
1875 1887 self->headrevs = NULL;
1876 1888 self->filteredrevs = Py_None;
1877 1889 Py_INCREF(Py_None);
1878 1890 self->nt = NULL;
1879 1891 self->offsets = NULL;
1880 1892
1881 1893 if (!PyArg_ParseTuple(args, "OO", &data_obj, &inlined_obj))
1882 1894 return -1;
1883 1895 if (!PyObject_CheckBuffer(data_obj)) {
1884 1896 PyErr_SetString(PyExc_TypeError,
1885 1897 "data does not support buffer interface");
1886 1898 return -1;
1887 1899 }
1888 1900
1889 1901 if (PyObject_GetBuffer(data_obj, &self->buf, PyBUF_SIMPLE) == -1)
1890 1902 return -1;
1891 1903 size = self->buf.len;
1892 1904
1893 1905 self->inlined = inlined_obj && PyObject_IsTrue(inlined_obj);
1894 1906 self->data = data_obj;
1895 1907
1896 1908 self->ntlength = self->ntcapacity = 0;
1897 1909 self->ntdepth = self->ntsplits = 0;
1898 1910 self->ntlookups = self->ntmisses = 0;
1899 1911 self->ntrev = -1;
1900 1912 Py_INCREF(self->data);
1901 1913
1902 1914 if (self->inlined) {
1903 1915 Py_ssize_t len = inline_scan(self, NULL);
1904 1916 if (len == -1)
1905 1917 goto bail;
1906 1918 self->raw_length = len;
1907 1919 self->length = len + 1;
1908 1920 } else {
1909 1921 if (size % v1_hdrsize) {
1910 1922 PyErr_SetString(PyExc_ValueError, "corrupt index file");
1911 1923 goto bail;
1912 1924 }
1913 1925 self->raw_length = size / v1_hdrsize;
1914 1926 self->length = self->raw_length + 1;
1915 1927 }
1916 1928
1917 1929 return 0;
1918 1930 bail:
1919 1931 return -1;
1920 1932 }
1921 1933
1922 1934 static PyObject *index_nodemap(indexObject *self)
1923 1935 {
1924 1936 Py_INCREF(self);
1925 1937 return (PyObject *)self;
1926 1938 }
1927 1939
1928 1940 static void index_dealloc(indexObject *self)
1929 1941 {
1930 1942 _index_clearcaches(self);
1931 1943 Py_XDECREF(self->filteredrevs);
1932 1944 if (self->buf.buf) {
1933 1945 PyBuffer_Release(&self->buf);
1934 1946 memset(&self->buf, 0, sizeof(self->buf));
1935 1947 }
1936 1948 Py_XDECREF(self->data);
1937 1949 Py_XDECREF(self->added);
1938 1950 PyObject_Del(self);
1939 1951 }
1940 1952
1941 1953 static PySequenceMethods index_sequence_methods = {
1942 1954 (lenfunc)index_length, /* sq_length */
1943 1955 0, /* sq_concat */
1944 1956 0, /* sq_repeat */
1945 1957 (ssizeargfunc)index_get, /* sq_item */
1946 1958 0, /* sq_slice */
1947 1959 0, /* sq_ass_item */
1948 1960 0, /* sq_ass_slice */
1949 1961 (objobjproc)index_contains, /* sq_contains */
1950 1962 };
1951 1963
1952 1964 static PyMappingMethods index_mapping_methods = {
1953 1965 (lenfunc)index_length, /* mp_length */
1954 1966 (binaryfunc)index_getitem, /* mp_subscript */
1955 1967 (objobjargproc)index_assign_subscript, /* mp_ass_subscript */
1956 1968 };
1957 1969
1958 1970 static PyMethodDef index_methods[] = {
1959 1971 {"ancestors", (PyCFunction)index_ancestors, METH_VARARGS,
1960 1972 "return the gca set of the given revs"},
1961 1973 {"commonancestorsheads", (PyCFunction)index_commonancestorsheads,
1962 1974 METH_VARARGS,
1963 1975 "return the heads of the common ancestors of the given revs"},
1964 1976 {"clearcaches", (PyCFunction)index_clearcaches, METH_NOARGS,
1965 1977 "clear the index caches"},
1966 1978 {"get", (PyCFunction)index_m_get, METH_VARARGS,
1967 1979 "get an index entry"},
1968 1980 {"computephasesmapsets", (PyCFunction)compute_phases_map_sets,
1969 1981 METH_VARARGS, "compute phases"},
1970 1982 {"reachableroots2", (PyCFunction)reachableroots2, METH_VARARGS,
1971 1983 "reachableroots"},
1972 1984 {"headrevs", (PyCFunction)index_headrevs, METH_VARARGS,
1973 1985 "get head revisions"}, /* Can do filtering since 3.2 */
1974 1986 {"headrevsfiltered", (PyCFunction)index_headrevs, METH_VARARGS,
1975 1987 "get filtered head revisions"}, /* Can always do filtering */
1976 1988 {"deltachain", (PyCFunction)index_deltachain, METH_VARARGS,
1977 1989 "determine revisions with deltas to reconstruct fulltext"},
1978 1990 {"insert", (PyCFunction)index_insert, METH_VARARGS,
1979 1991 "insert an index entry"},
1980 1992 {"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS,
1981 1993 "match a potentially ambiguous node ID"},
1982 1994 {"stats", (PyCFunction)index_stats, METH_NOARGS,
1983 1995 "stats for the index"},
1984 1996 {NULL} /* Sentinel */
1985 1997 };
1986 1998
1987 1999 static PyGetSetDef index_getset[] = {
1988 2000 {"nodemap", (getter)index_nodemap, NULL, "nodemap", NULL},
1989 2001 {NULL} /* Sentinel */
1990 2002 };
1991 2003
1992 2004 static PyTypeObject indexType = {
1993 2005 PyVarObject_HEAD_INIT(NULL, 0) /* header */
1994 2006 "parsers.index", /* tp_name */
1995 2007 sizeof(indexObject), /* tp_basicsize */
1996 2008 0, /* tp_itemsize */
1997 2009 (destructor)index_dealloc, /* tp_dealloc */
1998 2010 0, /* tp_print */
1999 2011 0, /* tp_getattr */
2000 2012 0, /* tp_setattr */
2001 2013 0, /* tp_compare */
2002 2014 0, /* tp_repr */
2003 2015 0, /* tp_as_number */
2004 2016 &index_sequence_methods, /* tp_as_sequence */
2005 2017 &index_mapping_methods, /* tp_as_mapping */
2006 2018 0, /* tp_hash */
2007 2019 0, /* tp_call */
2008 2020 0, /* tp_str */
2009 2021 0, /* tp_getattro */
2010 2022 0, /* tp_setattro */
2011 2023 0, /* tp_as_buffer */
2012 2024 Py_TPFLAGS_DEFAULT, /* tp_flags */
2013 2025 "revlog index", /* tp_doc */
2014 2026 0, /* tp_traverse */
2015 2027 0, /* tp_clear */
2016 2028 0, /* tp_richcompare */
2017 2029 0, /* tp_weaklistoffset */
2018 2030 0, /* tp_iter */
2019 2031 0, /* tp_iternext */
2020 2032 index_methods, /* tp_methods */
2021 2033 0, /* tp_members */
2022 2034 index_getset, /* tp_getset */
2023 2035 0, /* tp_base */
2024 2036 0, /* tp_dict */
2025 2037 0, /* tp_descr_get */
2026 2038 0, /* tp_descr_set */
2027 2039 0, /* tp_dictoffset */
2028 2040 (initproc)index_init, /* tp_init */
2029 2041 0, /* tp_alloc */
2030 2042 };
2031 2043
2032 2044 /*
2033 2045 * returns a tuple of the form (index, index, cache) with elements as
2034 2046 * follows:
2035 2047 *
2036 2048 * index: an index object that lazily parses RevlogNG records
2037 2049 * cache: if data is inlined, a tuple (0, index_file_content), else None
2038 2050 * index_file_content could be a string, or a buffer
2039 2051 *
2040 2052 * added complications are for backwards compatibility
2041 2053 */
2042 2054 PyObject *parse_index2(PyObject *self, PyObject *args)
2043 2055 {
2044 2056 PyObject *tuple = NULL, *cache = NULL;
2045 2057 indexObject *idx;
2046 2058 int ret;
2047 2059
2048 2060 idx = PyObject_New(indexObject, &indexType);
2049 2061 if (idx == NULL)
2050 2062 goto bail;
2051 2063
2052 2064 ret = index_init(idx, args);
2053 2065 if (ret == -1)
2054 2066 goto bail;
2055 2067
2056 2068 if (idx->inlined) {
2057 2069 cache = Py_BuildValue("iO", 0, idx->data);
2058 2070 if (cache == NULL)
2059 2071 goto bail;
2060 2072 } else {
2061 2073 cache = Py_None;
2062 2074 Py_INCREF(cache);
2063 2075 }
2064 2076
2065 2077 tuple = Py_BuildValue("NN", idx, cache);
2066 2078 if (!tuple)
2067 2079 goto bail;
2068 2080 return tuple;
2069 2081
2070 2082 bail:
2071 2083 Py_XDECREF(idx);
2072 2084 Py_XDECREF(cache);
2073 2085 Py_XDECREF(tuple);
2074 2086 return NULL;
2075 2087 }
2076 2088
2077 2089 void revlog_module_init(PyObject *mod)
2078 2090 {
2079 2091 indexType.tp_new = PyType_GenericNew;
2080 2092 if (PyType_Ready(&indexType) < 0)
2081 2093 return;
2082 2094 Py_INCREF(&indexType);
2083 2095 PyModule_AddObject(mod, "index", (PyObject *)&indexType);
2084 2096
2085 2097 nullentry = Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0, 0,
2086 2098 -1, -1, -1, -1, nullid, 20);
2087 2099 if (nullentry)
2088 2100 PyObject_GC_UnTrack(nullentry);
2089 2101 }
General Comments 0
You need to be logged in to leave comments. Login now