##// END OF EJS Templates
revlog: use literal -1 instead of variable that always has that value...
Martin von Zweigbergk -
r37949:89259247 default
parent child Browse files
Show More
@@ -1,2105 +1,2105 b''
1 1 /*
2 2 parsers.c - efficient content parsing
3 3
4 4 Copyright 2008 Matt Mackall <mpm@selenic.com> and others
5 5
6 6 This software may be used and distributed according to the terms of
7 7 the GNU General Public License, incorporated herein by reference.
8 8 */
9 9
10 10 #include <Python.h>
11 11 #include <assert.h>
12 12 #include <ctype.h>
13 13 #include <stddef.h>
14 14 #include <string.h>
15 15
16 16 #include "bitmanipulation.h"
17 17 #include "charencode.h"
18 18 #include "util.h"
19 19
20 20 #ifdef IS_PY3K
21 21 /* The mapping of Python types is meant to be temporary to get Python
22 22 * 3 to compile. We should remove this once Python 3 support is fully
23 23 * supported and proper types are used in the extensions themselves. */
24 24 #define PyInt_Check PyLong_Check
25 25 #define PyInt_FromLong PyLong_FromLong
26 26 #define PyInt_FromSsize_t PyLong_FromSsize_t
27 27 #define PyInt_AS_LONG PyLong_AS_LONG
28 28 #define PyInt_AsLong PyLong_AsLong
29 29 #endif
30 30
31 31 /*
32 32 * A base-16 trie for fast node->rev mapping.
33 33 *
34 34 * Positive value is index of the next node in the trie
35 35 * Negative value is a leaf: -(rev + 1)
36 36 * Zero is empty
37 37 */
38 38 typedef struct {
39 39 int children[16];
40 40 } nodetree;
41 41
42 42 /*
43 43 * This class has two behaviors.
44 44 *
45 45 * When used in a list-like way (with integer keys), we decode an
46 46 * entry in a RevlogNG index file on demand. Our last entry is a
47 47 * sentinel, always a nullid. We have limited support for
48 48 * integer-keyed insert and delete, only at elements right before the
49 49 * sentinel.
50 50 *
51 51 * With string keys, we lazily perform a reverse mapping from node to
52 52 * rev, using a base-16 trie.
53 53 */
54 54 typedef struct {
55 55 PyObject_HEAD
56 56 /* Type-specific fields go here. */
57 57 PyObject *data; /* raw bytes of index */
58 58 Py_buffer buf; /* buffer of data */
59 59 PyObject **cache; /* cached tuples */
60 60 const char **offsets; /* populated on demand */
61 61 Py_ssize_t raw_length; /* original number of elements */
62 62 Py_ssize_t length; /* current number of elements */
63 63 PyObject *added; /* populated on demand */
64 64 PyObject *headrevs; /* cache, invalidated on changes */
65 65 PyObject *filteredrevs;/* filtered revs set */
66 66 nodetree *nt; /* base-16 trie */
67 67 unsigned ntlength; /* # nodes in use */
68 68 unsigned ntcapacity; /* # nodes allocated */
69 69 int ntdepth; /* maximum depth of tree */
70 70 int ntsplits; /* # splits performed */
71 71 int ntrev; /* last rev scanned */
72 72 int ntlookups; /* # lookups */
73 73 int ntmisses; /* # lookups that miss the cache */
74 74 int inlined;
75 75 } indexObject;
76 76
77 77 static Py_ssize_t index_length(const indexObject *self)
78 78 {
79 79 if (self->added == NULL)
80 80 return self->length;
81 81 return self->length + PyList_GET_SIZE(self->added);
82 82 }
83 83
84 84 static PyObject *nullentry;
85 85 static const char nullid[20];
86 86
87 87 static Py_ssize_t inline_scan(indexObject *self, const char **offsets);
88 88
89 89 #if LONG_MAX == 0x7fffffffL
90 90 static const char *const tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
91 91 #else
92 92 static const char *const tuple_format = PY23("kiiiiiis#", "kiiiiiiy#");
93 93 #endif
94 94
95 95 /* A RevlogNG v1 index entry is 64 bytes long. */
96 96 static const long v1_hdrsize = 64;
97 97
98 98 /*
99 99 * Return a pointer to the beginning of a RevlogNG record.
100 100 */
101 101 static const char *index_deref(indexObject *self, Py_ssize_t pos)
102 102 {
103 103 if (self->inlined && pos > 0) {
104 104 if (self->offsets == NULL) {
105 105 self->offsets = PyMem_Malloc(self->raw_length *
106 106 sizeof(*self->offsets));
107 107 if (self->offsets == NULL)
108 108 return (const char *)PyErr_NoMemory();
109 109 inline_scan(self, self->offsets);
110 110 }
111 111 return self->offsets[pos];
112 112 }
113 113
114 114 return (const char *)(self->buf.buf) + pos * v1_hdrsize;
115 115 }
116 116
117 117 static inline int index_get_parents(indexObject *self, Py_ssize_t rev,
118 118 int *ps, int maxrev)
119 119 {
120 120 if (rev >= self->length - 1) {
121 121 PyObject *tuple = PyList_GET_ITEM(self->added,
122 122 rev - self->length + 1);
123 123 ps[0] = (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 5));
124 124 ps[1] = (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 6));
125 125 } else {
126 126 const char *data = index_deref(self, rev);
127 127 ps[0] = getbe32(data + 24);
128 128 ps[1] = getbe32(data + 28);
129 129 }
130 130 /* If index file is corrupted, ps[] may point to invalid revisions. So
131 131 * there is a risk of buffer overflow to trust them unconditionally. */
132 132 if (ps[0] > maxrev || ps[1] > maxrev) {
133 133 PyErr_SetString(PyExc_ValueError, "parent out of range");
134 134 return -1;
135 135 }
136 136 return 0;
137 137 }
138 138
139 139
140 140 /*
141 141 * RevlogNG format (all in big endian, data may be inlined):
142 142 * 6 bytes: offset
143 143 * 2 bytes: flags
144 144 * 4 bytes: compressed length
145 145 * 4 bytes: uncompressed length
146 146 * 4 bytes: base revision
147 147 * 4 bytes: link revision
148 148 * 4 bytes: parent 1 revision
149 149 * 4 bytes: parent 2 revision
150 150 * 32 bytes: nodeid (only 20 bytes used)
151 151 */
152 152 static PyObject *index_get(indexObject *self, Py_ssize_t pos)
153 153 {
154 154 uint64_t offset_flags;
155 155 int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
156 156 const char *c_node_id;
157 157 const char *data;
158 158 Py_ssize_t length = index_length(self);
159 159 PyObject *entry;
160 160
161 161 if (pos < 0)
162 162 pos += length;
163 163
164 164 if (pos < 0 || pos >= length) {
165 165 PyErr_SetString(PyExc_IndexError, "revlog index out of range");
166 166 return NULL;
167 167 }
168 168
169 169 if (pos == length - 1) {
170 170 Py_INCREF(nullentry);
171 171 return nullentry;
172 172 }
173 173
174 174 if (pos >= self->length - 1) {
175 175 PyObject *obj;
176 176 obj = PyList_GET_ITEM(self->added, pos - self->length + 1);
177 177 Py_INCREF(obj);
178 178 return obj;
179 179 }
180 180
181 181 if (self->cache) {
182 182 if (self->cache[pos]) {
183 183 Py_INCREF(self->cache[pos]);
184 184 return self->cache[pos];
185 185 }
186 186 } else {
187 187 self->cache = calloc(self->raw_length, sizeof(PyObject *));
188 188 if (self->cache == NULL)
189 189 return PyErr_NoMemory();
190 190 }
191 191
192 192 data = index_deref(self, pos);
193 193 if (data == NULL)
194 194 return NULL;
195 195
196 196 offset_flags = getbe32(data + 4);
197 197 if (pos == 0) /* mask out version number for the first entry */
198 198 offset_flags &= 0xFFFF;
199 199 else {
200 200 uint32_t offset_high = getbe32(data);
201 201 offset_flags |= ((uint64_t)offset_high) << 32;
202 202 }
203 203
204 204 comp_len = getbe32(data + 8);
205 205 uncomp_len = getbe32(data + 12);
206 206 base_rev = getbe32(data + 16);
207 207 link_rev = getbe32(data + 20);
208 208 parent_1 = getbe32(data + 24);
209 209 parent_2 = getbe32(data + 28);
210 210 c_node_id = data + 32;
211 211
212 212 entry = Py_BuildValue(tuple_format, offset_flags, comp_len,
213 213 uncomp_len, base_rev, link_rev,
214 214 parent_1, parent_2, c_node_id, 20);
215 215
216 216 if (entry) {
217 217 PyObject_GC_UnTrack(entry);
218 218 Py_INCREF(entry);
219 219 }
220 220
221 221 self->cache[pos] = entry;
222 222
223 223 return entry;
224 224 }
225 225
226 226 /*
227 227 * Return the 20-byte SHA of the node corresponding to the given rev.
228 228 */
229 229 static const char *index_node(indexObject *self, Py_ssize_t pos)
230 230 {
231 231 Py_ssize_t length = index_length(self);
232 232 const char *data;
233 233
234 234 if (pos == length - 1 || pos == INT_MAX)
235 235 return nullid;
236 236
237 237 if (pos >= length)
238 238 return NULL;
239 239
240 240 if (pos >= self->length - 1) {
241 241 PyObject *tuple, *str;
242 242 tuple = PyList_GET_ITEM(self->added, pos - self->length + 1);
243 243 str = PyTuple_GetItem(tuple, 7);
244 244 return str ? PyBytes_AS_STRING(str) : NULL;
245 245 }
246 246
247 247 data = index_deref(self, pos);
248 248 return data ? data + 32 : NULL;
249 249 }
250 250
251 251 /*
252 252 * Return the 20-byte SHA of the node corresponding to the given rev. The
253 253 * rev is assumed to be existing. If not, an exception is set.
254 254 */
255 255 static const char *index_node_existing(indexObject *self, Py_ssize_t pos)
256 256 {
257 257 const char *node = index_node(self, pos);
258 258 if (node == NULL) {
259 259 PyErr_Format(PyExc_IndexError, "could not access rev %d",
260 260 (int)pos);
261 261 }
262 262 return node;
263 263 }
264 264
265 265 static int nt_insert(indexObject *self, const char *node, int rev);
266 266
267 267 static int node_check(PyObject *obj, char **node, Py_ssize_t *nodelen)
268 268 {
269 269 if (PyBytes_AsStringAndSize(obj, node, nodelen) == -1)
270 270 return -1;
271 271 if (*nodelen == 20)
272 272 return 0;
273 273 PyErr_SetString(PyExc_ValueError, "20-byte hash required");
274 274 return -1;
275 275 }
276 276
277 277 static PyObject *index_insert(indexObject *self, PyObject *args)
278 278 {
279 279 PyObject *obj;
280 280 char *node;
281 281 int index;
282 282 Py_ssize_t len, nodelen;
283 283
284 284 if (!PyArg_ParseTuple(args, "iO", &index, &obj))
285 285 return NULL;
286 286
287 287 if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj) != 8) {
288 288 PyErr_SetString(PyExc_TypeError, "8-tuple required");
289 289 return NULL;
290 290 }
291 291
292 292 if (node_check(PyTuple_GET_ITEM(obj, 7), &node, &nodelen) == -1)
293 293 return NULL;
294 294
295 295 len = index_length(self);
296 296
297 297 if (index < 0)
298 298 index += len;
299 299
300 300 if (index != len - 1) {
301 301 PyErr_SetString(PyExc_IndexError,
302 302 "insert only supported at index -1");
303 303 return NULL;
304 304 }
305 305
306 306 if (self->added == NULL) {
307 307 self->added = PyList_New(0);
308 308 if (self->added == NULL)
309 309 return NULL;
310 310 }
311 311
312 312 if (PyList_Append(self->added, obj) == -1)
313 313 return NULL;
314 314
315 315 if (self->nt)
316 316 nt_insert(self, node, index);
317 317
318 318 Py_CLEAR(self->headrevs);
319 319 Py_RETURN_NONE;
320 320 }
321 321
322 322 static void _index_clearcaches(indexObject *self)
323 323 {
324 324 if (self->cache) {
325 325 Py_ssize_t i;
326 326
327 327 for (i = 0; i < self->raw_length; i++)
328 328 Py_CLEAR(self->cache[i]);
329 329 free(self->cache);
330 330 self->cache = NULL;
331 331 }
332 332 if (self->offsets) {
333 333 PyMem_Free(self->offsets);
334 334 self->offsets = NULL;
335 335 }
336 336 if (self->nt) {
337 337 free(self->nt);
338 338 self->nt = NULL;
339 339 }
340 340 Py_CLEAR(self->headrevs);
341 341 }
342 342
343 343 static PyObject *index_clearcaches(indexObject *self)
344 344 {
345 345 _index_clearcaches(self);
346 346 self->ntlength = self->ntcapacity = 0;
347 347 self->ntdepth = self->ntsplits = 0;
348 348 self->ntrev = -1;
349 349 self->ntlookups = self->ntmisses = 0;
350 350 Py_RETURN_NONE;
351 351 }
352 352
353 353 static PyObject *index_stats(indexObject *self)
354 354 {
355 355 PyObject *obj = PyDict_New();
356 356 PyObject *t = NULL;
357 357
358 358 if (obj == NULL)
359 359 return NULL;
360 360
361 361 #define istat(__n, __d) \
362 362 do { \
363 363 t = PyInt_FromSsize_t(self->__n); \
364 364 if (!t) \
365 365 goto bail; \
366 366 if (PyDict_SetItemString(obj, __d, t) == -1) \
367 367 goto bail; \
368 368 Py_DECREF(t); \
369 369 } while (0)
370 370
371 371 if (self->added) {
372 372 Py_ssize_t len = PyList_GET_SIZE(self->added);
373 373 t = PyInt_FromSsize_t(len);
374 374 if (!t)
375 375 goto bail;
376 376 if (PyDict_SetItemString(obj, "index entries added", t) == -1)
377 377 goto bail;
378 378 Py_DECREF(t);
379 379 }
380 380
381 381 if (self->raw_length != self->length - 1)
382 382 istat(raw_length, "revs on disk");
383 383 istat(length, "revs in memory");
384 384 istat(ntcapacity, "node trie capacity");
385 385 istat(ntdepth, "node trie depth");
386 386 istat(ntlength, "node trie count");
387 387 istat(ntlookups, "node trie lookups");
388 388 istat(ntmisses, "node trie misses");
389 389 istat(ntrev, "node trie last rev scanned");
390 390 istat(ntsplits, "node trie splits");
391 391
392 392 #undef istat
393 393
394 394 return obj;
395 395
396 396 bail:
397 397 Py_XDECREF(obj);
398 398 Py_XDECREF(t);
399 399 return NULL;
400 400 }
401 401
402 402 /*
403 403 * When we cache a list, we want to be sure the caller can't mutate
404 404 * the cached copy.
405 405 */
406 406 static PyObject *list_copy(PyObject *list)
407 407 {
408 408 Py_ssize_t len = PyList_GET_SIZE(list);
409 409 PyObject *newlist = PyList_New(len);
410 410 Py_ssize_t i;
411 411
412 412 if (newlist == NULL)
413 413 return NULL;
414 414
415 415 for (i = 0; i < len; i++) {
416 416 PyObject *obj = PyList_GET_ITEM(list, i);
417 417 Py_INCREF(obj);
418 418 PyList_SET_ITEM(newlist, i, obj);
419 419 }
420 420
421 421 return newlist;
422 422 }
423 423
424 424 static int check_filter(PyObject *filter, Py_ssize_t arg)
425 425 {
426 426 if (filter) {
427 427 PyObject *arglist, *result;
428 428 int isfiltered;
429 429
430 430 arglist = Py_BuildValue("(n)", arg);
431 431 if (!arglist) {
432 432 return -1;
433 433 }
434 434
435 435 result = PyEval_CallObject(filter, arglist);
436 436 Py_DECREF(arglist);
437 437 if (!result) {
438 438 return -1;
439 439 }
440 440
441 441 /* PyObject_IsTrue returns 1 if true, 0 if false, -1 if error,
442 442 * same as this function, so we can just return it directly.*/
443 443 isfiltered = PyObject_IsTrue(result);
444 444 Py_DECREF(result);
445 445 return isfiltered;
446 446 } else {
447 447 return 0;
448 448 }
449 449 }
450 450
451 451 static Py_ssize_t add_roots_get_min(indexObject *self, PyObject *list,
452 452 Py_ssize_t marker, char *phases)
453 453 {
454 454 PyObject *iter = NULL;
455 455 PyObject *iter_item = NULL;
456 456 Py_ssize_t min_idx = index_length(self) + 1;
457 457 long iter_item_long;
458 458
459 459 if (PyList_GET_SIZE(list) != 0) {
460 460 iter = PyObject_GetIter(list);
461 461 if (iter == NULL)
462 462 return -2;
463 463 while ((iter_item = PyIter_Next(iter))) {
464 464 iter_item_long = PyInt_AS_LONG(iter_item);
465 465 Py_DECREF(iter_item);
466 466 if (iter_item_long < min_idx)
467 467 min_idx = iter_item_long;
468 468 phases[iter_item_long] = marker;
469 469 }
470 470 Py_DECREF(iter);
471 471 }
472 472
473 473 return min_idx;
474 474 }
475 475
476 476 static inline void set_phase_from_parents(char *phases, int parent_1,
477 477 int parent_2, Py_ssize_t i)
478 478 {
479 479 if (parent_1 >= 0 && phases[parent_1] > phases[i])
480 480 phases[i] = phases[parent_1];
481 481 if (parent_2 >= 0 && phases[parent_2] > phases[i])
482 482 phases[i] = phases[parent_2];
483 483 }
484 484
485 485 static PyObject *reachableroots2(indexObject *self, PyObject *args)
486 486 {
487 487
488 488 /* Input */
489 489 long minroot;
490 490 PyObject *includepatharg = NULL;
491 491 int includepath = 0;
492 492 /* heads and roots are lists */
493 493 PyObject *heads = NULL;
494 494 PyObject *roots = NULL;
495 495 PyObject *reachable = NULL;
496 496
497 497 PyObject *val;
498 498 Py_ssize_t len = index_length(self) - 1;
499 499 long revnum;
500 500 Py_ssize_t k;
501 501 Py_ssize_t i;
502 502 Py_ssize_t l;
503 503 int r;
504 504 int parents[2];
505 505
506 506 /* Internal data structure:
507 507 * tovisit: array of length len+1 (all revs + nullrev), filled upto lentovisit
508 508 * revstates: array of length len+1 (all revs + nullrev) */
509 509 int *tovisit = NULL;
510 510 long lentovisit = 0;
511 511 enum { RS_SEEN = 1, RS_ROOT = 2, RS_REACHABLE = 4 };
512 512 char *revstates = NULL;
513 513
514 514 /* Get arguments */
515 515 if (!PyArg_ParseTuple(args, "lO!O!O!", &minroot, &PyList_Type, &heads,
516 516 &PyList_Type, &roots,
517 517 &PyBool_Type, &includepatharg))
518 518 goto bail;
519 519
520 520 if (includepatharg == Py_True)
521 521 includepath = 1;
522 522
523 523 /* Initialize return set */
524 524 reachable = PyList_New(0);
525 525 if (reachable == NULL)
526 526 goto bail;
527 527
528 528 /* Initialize internal datastructures */
529 529 tovisit = (int *)malloc((len + 1) * sizeof(int));
530 530 if (tovisit == NULL) {
531 531 PyErr_NoMemory();
532 532 goto bail;
533 533 }
534 534
535 535 revstates = (char *)calloc(len + 1, 1);
536 536 if (revstates == NULL) {
537 537 PyErr_NoMemory();
538 538 goto bail;
539 539 }
540 540
541 541 l = PyList_GET_SIZE(roots);
542 542 for (i = 0; i < l; i++) {
543 543 revnum = PyInt_AsLong(PyList_GET_ITEM(roots, i));
544 544 if (revnum == -1 && PyErr_Occurred())
545 545 goto bail;
546 546 /* If root is out of range, e.g. wdir(), it must be unreachable
547 547 * from heads. So we can just ignore it. */
548 548 if (revnum + 1 < 0 || revnum + 1 >= len + 1)
549 549 continue;
550 550 revstates[revnum + 1] |= RS_ROOT;
551 551 }
552 552
553 553 /* Populate tovisit with all the heads */
554 554 l = PyList_GET_SIZE(heads);
555 555 for (i = 0; i < l; i++) {
556 556 revnum = PyInt_AsLong(PyList_GET_ITEM(heads, i));
557 557 if (revnum == -1 && PyErr_Occurred())
558 558 goto bail;
559 559 if (revnum + 1 < 0 || revnum + 1 >= len + 1) {
560 560 PyErr_SetString(PyExc_IndexError, "head out of range");
561 561 goto bail;
562 562 }
563 563 if (!(revstates[revnum + 1] & RS_SEEN)) {
564 564 tovisit[lentovisit++] = (int)revnum;
565 565 revstates[revnum + 1] |= RS_SEEN;
566 566 }
567 567 }
568 568
569 569 /* Visit the tovisit list and find the reachable roots */
570 570 k = 0;
571 571 while (k < lentovisit) {
572 572 /* Add the node to reachable if it is a root*/
573 573 revnum = tovisit[k++];
574 574 if (revstates[revnum + 1] & RS_ROOT) {
575 575 revstates[revnum + 1] |= RS_REACHABLE;
576 576 val = PyInt_FromLong(revnum);
577 577 if (val == NULL)
578 578 goto bail;
579 579 r = PyList_Append(reachable, val);
580 580 Py_DECREF(val);
581 581 if (r < 0)
582 582 goto bail;
583 583 if (includepath == 0)
584 584 continue;
585 585 }
586 586
587 587 /* Add its parents to the list of nodes to visit */
588 588 if (revnum == -1)
589 589 continue;
590 590 r = index_get_parents(self, revnum, parents, (int)len - 1);
591 591 if (r < 0)
592 592 goto bail;
593 593 for (i = 0; i < 2; i++) {
594 594 if (!(revstates[parents[i] + 1] & RS_SEEN)
595 595 && parents[i] >= minroot) {
596 596 tovisit[lentovisit++] = parents[i];
597 597 revstates[parents[i] + 1] |= RS_SEEN;
598 598 }
599 599 }
600 600 }
601 601
602 602 /* Find all the nodes in between the roots we found and the heads
603 603 * and add them to the reachable set */
604 604 if (includepath == 1) {
605 605 long minidx = minroot;
606 606 if (minidx < 0)
607 607 minidx = 0;
608 608 for (i = minidx; i < len; i++) {
609 609 if (!(revstates[i + 1] & RS_SEEN))
610 610 continue;
611 611 r = index_get_parents(self, i, parents, (int)len - 1);
612 612 /* Corrupted index file, error is set from
613 613 * index_get_parents */
614 614 if (r < 0)
615 615 goto bail;
616 616 if (((revstates[parents[0] + 1] |
617 617 revstates[parents[1] + 1]) & RS_REACHABLE)
618 618 && !(revstates[i + 1] & RS_REACHABLE)) {
619 619 revstates[i + 1] |= RS_REACHABLE;
620 620 val = PyInt_FromLong(i);
621 621 if (val == NULL)
622 622 goto bail;
623 623 r = PyList_Append(reachable, val);
624 624 Py_DECREF(val);
625 625 if (r < 0)
626 626 goto bail;
627 627 }
628 628 }
629 629 }
630 630
631 631 free(revstates);
632 632 free(tovisit);
633 633 return reachable;
634 634 bail:
635 635 Py_XDECREF(reachable);
636 636 free(revstates);
637 637 free(tovisit);
638 638 return NULL;
639 639 }
640 640
641 641 static PyObject *compute_phases_map_sets(indexObject *self, PyObject *args)
642 642 {
643 643 PyObject *roots = Py_None;
644 644 PyObject *ret = NULL;
645 645 PyObject *phasessize = NULL;
646 646 PyObject *phaseroots = NULL;
647 647 PyObject *phaseset = NULL;
648 648 PyObject *phasessetlist = NULL;
649 649 PyObject *rev = NULL;
650 650 Py_ssize_t len = index_length(self) - 1;
651 651 Py_ssize_t numphase = 0;
652 652 Py_ssize_t minrevallphases = 0;
653 653 Py_ssize_t minrevphase = 0;
654 654 Py_ssize_t i = 0;
655 655 char *phases = NULL;
656 656 long phase;
657 657
658 658 if (!PyArg_ParseTuple(args, "O", &roots))
659 659 goto done;
660 660 if (roots == NULL || !PyList_Check(roots)) {
661 661 PyErr_SetString(PyExc_TypeError, "roots must be a list");
662 662 goto done;
663 663 }
664 664
665 665 phases = calloc(len, 1); /* phase per rev: {0: public, 1: draft, 2: secret} */
666 666 if (phases == NULL) {
667 667 PyErr_NoMemory();
668 668 goto done;
669 669 }
670 670 /* Put the phase information of all the roots in phases */
671 671 numphase = PyList_GET_SIZE(roots)+1;
672 672 minrevallphases = len + 1;
673 673 phasessetlist = PyList_New(numphase);
674 674 if (phasessetlist == NULL)
675 675 goto done;
676 676
677 677 PyList_SET_ITEM(phasessetlist, 0, Py_None);
678 678 Py_INCREF(Py_None);
679 679
680 680 for (i = 0; i < numphase-1; i++) {
681 681 phaseroots = PyList_GET_ITEM(roots, i);
682 682 phaseset = PySet_New(NULL);
683 683 if (phaseset == NULL)
684 684 goto release;
685 685 PyList_SET_ITEM(phasessetlist, i+1, phaseset);
686 686 if (!PyList_Check(phaseroots)) {
687 687 PyErr_SetString(PyExc_TypeError,
688 688 "roots item must be a list");
689 689 goto release;
690 690 }
691 691 minrevphase = add_roots_get_min(self, phaseroots, i+1, phases);
692 692 if (minrevphase == -2) /* Error from add_roots_get_min */
693 693 goto release;
694 694 minrevallphases = MIN(minrevallphases, minrevphase);
695 695 }
696 696 /* Propagate the phase information from the roots to the revs */
697 697 if (minrevallphases != -1) {
698 698 int parents[2];
699 699 for (i = minrevallphases; i < len; i++) {
700 700 if (index_get_parents(self, i, parents,
701 701 (int)len - 1) < 0)
702 702 goto release;
703 703 set_phase_from_parents(phases, parents[0], parents[1], i);
704 704 }
705 705 }
706 706 /* Transform phase list to a python list */
707 707 phasessize = PyInt_FromLong(len);
708 708 if (phasessize == NULL)
709 709 goto release;
710 710 for (i = 0; i < len; i++) {
711 711 phase = phases[i];
712 712 /* We only store the sets of phase for non public phase, the public phase
713 713 * is computed as a difference */
714 714 if (phase != 0) {
715 715 phaseset = PyList_GET_ITEM(phasessetlist, phase);
716 716 rev = PyInt_FromLong(i);
717 717 if (rev == NULL)
718 718 goto release;
719 719 PySet_Add(phaseset, rev);
720 720 Py_XDECREF(rev);
721 721 }
722 722 }
723 723 ret = PyTuple_Pack(2, phasessize, phasessetlist);
724 724
725 725 release:
726 726 Py_XDECREF(phasessize);
727 727 Py_XDECREF(phasessetlist);
728 728 done:
729 729 free(phases);
730 730 return ret;
731 731 }
732 732
733 733 static PyObject *index_headrevs(indexObject *self, PyObject *args)
734 734 {
735 735 Py_ssize_t i, j, len;
736 736 char *nothead = NULL;
737 737 PyObject *heads = NULL;
738 738 PyObject *filter = NULL;
739 739 PyObject *filteredrevs = Py_None;
740 740
741 741 if (!PyArg_ParseTuple(args, "|O", &filteredrevs)) {
742 742 return NULL;
743 743 }
744 744
745 745 if (self->headrevs && filteredrevs == self->filteredrevs)
746 746 return list_copy(self->headrevs);
747 747
748 748 Py_DECREF(self->filteredrevs);
749 749 self->filteredrevs = filteredrevs;
750 750 Py_INCREF(filteredrevs);
751 751
752 752 if (filteredrevs != Py_None) {
753 753 filter = PyObject_GetAttrString(filteredrevs, "__contains__");
754 754 if (!filter) {
755 755 PyErr_SetString(PyExc_TypeError,
756 756 "filteredrevs has no attribute __contains__");
757 757 goto bail;
758 758 }
759 759 }
760 760
761 761 len = index_length(self) - 1;
762 762 heads = PyList_New(0);
763 763 if (heads == NULL)
764 764 goto bail;
765 765 if (len == 0) {
766 766 PyObject *nullid = PyInt_FromLong(-1);
767 767 if (nullid == NULL || PyList_Append(heads, nullid) == -1) {
768 768 Py_XDECREF(nullid);
769 769 goto bail;
770 770 }
771 771 goto done;
772 772 }
773 773
774 774 nothead = calloc(len, 1);
775 775 if (nothead == NULL) {
776 776 PyErr_NoMemory();
777 777 goto bail;
778 778 }
779 779
780 780 for (i = len - 1; i >= 0; i--) {
781 781 int isfiltered;
782 782 int parents[2];
783 783
784 784 /* If nothead[i] == 1, it means we've seen an unfiltered child of this
785 785 * node already, and therefore this node is not filtered. So we can skip
786 786 * the expensive check_filter step.
787 787 */
788 788 if (nothead[i] != 1) {
789 789 isfiltered = check_filter(filter, i);
790 790 if (isfiltered == -1) {
791 791 PyErr_SetString(PyExc_TypeError,
792 792 "unable to check filter");
793 793 goto bail;
794 794 }
795 795
796 796 if (isfiltered) {
797 797 nothead[i] = 1;
798 798 continue;
799 799 }
800 800 }
801 801
802 802 if (index_get_parents(self, i, parents, (int)len - 1) < 0)
803 803 goto bail;
804 804 for (j = 0; j < 2; j++) {
805 805 if (parents[j] >= 0)
806 806 nothead[parents[j]] = 1;
807 807 }
808 808 }
809 809
810 810 for (i = 0; i < len; i++) {
811 811 PyObject *head;
812 812
813 813 if (nothead[i])
814 814 continue;
815 815 head = PyInt_FromSsize_t(i);
816 816 if (head == NULL || PyList_Append(heads, head) == -1) {
817 817 Py_XDECREF(head);
818 818 goto bail;
819 819 }
820 820 }
821 821
822 822 done:
823 823 self->headrevs = heads;
824 824 Py_XDECREF(filter);
825 825 free(nothead);
826 826 return list_copy(self->headrevs);
827 827 bail:
828 828 Py_XDECREF(filter);
829 829 Py_XDECREF(heads);
830 830 free(nothead);
831 831 return NULL;
832 832 }
833 833
834 834 /**
835 835 * Obtain the base revision index entry.
836 836 *
837 837 * Callers must ensure that rev >= 0 or illegal memory access may occur.
838 838 */
839 839 static inline int index_baserev(indexObject *self, int rev)
840 840 {
841 841 const char *data;
842 842
843 843 if (rev >= self->length - 1) {
844 844 PyObject *tuple = PyList_GET_ITEM(self->added,
845 845 rev - self->length + 1);
846 846 return (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 3));
847 847 }
848 848 else {
849 849 data = index_deref(self, rev);
850 850 if (data == NULL) {
851 851 return -2;
852 852 }
853 853
854 854 return getbe32(data + 16);
855 855 }
856 856 }
857 857
858 858 static PyObject *index_deltachain(indexObject *self, PyObject *args)
859 859 {
860 860 int rev, generaldelta;
861 861 PyObject *stoparg;
862 862 int stoprev, iterrev, baserev = -1;
863 863 int stopped;
864 864 PyObject *chain = NULL, *result = NULL;
865 865 const Py_ssize_t length = index_length(self);
866 866
867 867 if (!PyArg_ParseTuple(args, "iOi", &rev, &stoparg, &generaldelta)) {
868 868 return NULL;
869 869 }
870 870
871 871 if (PyInt_Check(stoparg)) {
872 872 stoprev = (int)PyInt_AsLong(stoparg);
873 873 if (stoprev == -1 && PyErr_Occurred()) {
874 874 return NULL;
875 875 }
876 876 }
877 877 else if (stoparg == Py_None) {
878 878 stoprev = -2;
879 879 }
880 880 else {
881 881 PyErr_SetString(PyExc_ValueError,
882 882 "stoprev must be integer or None");
883 883 return NULL;
884 884 }
885 885
886 886 if (rev < 0 || rev >= length - 1) {
887 887 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
888 888 return NULL;
889 889 }
890 890
891 891 chain = PyList_New(0);
892 892 if (chain == NULL) {
893 893 return NULL;
894 894 }
895 895
896 896 baserev = index_baserev(self, rev);
897 897
898 898 /* This should never happen. */
899 899 if (baserev <= -2) {
900 900 /* Error should be set by index_deref() */
901 901 assert(PyErr_Occurred());
902 902 goto bail;
903 903 }
904 904
905 905 iterrev = rev;
906 906
907 907 while (iterrev != baserev && iterrev != stoprev) {
908 908 PyObject *value = PyInt_FromLong(iterrev);
909 909 if (value == NULL) {
910 910 goto bail;
911 911 }
912 912 if (PyList_Append(chain, value)) {
913 913 Py_DECREF(value);
914 914 goto bail;
915 915 }
916 916 Py_DECREF(value);
917 917
918 918 if (generaldelta) {
919 919 iterrev = baserev;
920 920 }
921 921 else {
922 922 iterrev--;
923 923 }
924 924
925 925 if (iterrev < 0) {
926 926 break;
927 927 }
928 928
929 929 if (iterrev >= length - 1) {
930 930 PyErr_SetString(PyExc_IndexError, "revision outside index");
931 931 return NULL;
932 932 }
933 933
934 934 baserev = index_baserev(self, iterrev);
935 935
936 936 /* This should never happen. */
937 937 if (baserev <= -2) {
938 938 /* Error should be set by index_deref() */
939 939 assert(PyErr_Occurred());
940 940 goto bail;
941 941 }
942 942 }
943 943
944 944 if (iterrev == stoprev) {
945 945 stopped = 1;
946 946 }
947 947 else {
948 948 PyObject *value = PyInt_FromLong(iterrev);
949 949 if (value == NULL) {
950 950 goto bail;
951 951 }
952 952 if (PyList_Append(chain, value)) {
953 953 Py_DECREF(value);
954 954 goto bail;
955 955 }
956 956 Py_DECREF(value);
957 957
958 958 stopped = 0;
959 959 }
960 960
961 961 if (PyList_Reverse(chain)) {
962 962 goto bail;
963 963 }
964 964
965 965 result = Py_BuildValue("OO", chain, stopped ? Py_True : Py_False);
966 966 Py_DECREF(chain);
967 967 return result;
968 968
969 969 bail:
970 970 Py_DECREF(chain);
971 971 return NULL;
972 972 }
973 973
974 974 static inline int nt_level(const char *node, Py_ssize_t level)
975 975 {
976 976 int v = node[level>>1];
977 977 if (!(level & 1))
978 978 v >>= 4;
979 979 return v & 0xf;
980 980 }
981 981
982 982 /*
983 983 * Return values:
984 984 *
985 985 * -4: match is ambiguous (multiple candidates)
986 986 * -2: not found
987 987 * rest: valid rev
988 988 */
989 989 static int nt_find(indexObject *self, const char *node, Py_ssize_t nodelen,
990 990 int hex)
991 991 {
992 992 int (*getnybble)(const char *, Py_ssize_t) = hex ? hexdigit : nt_level;
993 993 int level, maxlevel, off;
994 994
995 995 if (nodelen == 20 && node[0] == '\0' && memcmp(node, nullid, 20) == 0)
996 996 return -1;
997 997
998 998 if (self->nt == NULL)
999 999 return -2;
1000 1000
1001 1001 if (hex)
1002 1002 maxlevel = nodelen > 40 ? 40 : (int)nodelen;
1003 1003 else
1004 1004 maxlevel = nodelen > 20 ? 40 : ((int)nodelen * 2);
1005 1005
1006 1006 for (level = off = 0; level < maxlevel; level++) {
1007 1007 int k = getnybble(node, level);
1008 1008 nodetree *n = &self->nt[off];
1009 1009 int v = n->children[k];
1010 1010
1011 1011 if (v < 0) {
1012 1012 const char *n;
1013 1013 Py_ssize_t i;
1014 1014
1015 1015 v = -(v + 1);
1016 1016 n = index_node(self, v);
1017 1017 if (n == NULL)
1018 1018 return -2;
1019 1019 for (i = level; i < maxlevel; i++)
1020 1020 if (getnybble(node, i) != nt_level(n, i))
1021 1021 return -2;
1022 1022 return v;
1023 1023 }
1024 1024 if (v == 0)
1025 1025 return -2;
1026 1026 off = v;
1027 1027 }
1028 1028 /* multiple matches against an ambiguous prefix */
1029 1029 return -4;
1030 1030 }
1031 1031
1032 1032 static int nt_new(indexObject *self)
1033 1033 {
1034 1034 if (self->ntlength == self->ntcapacity) {
1035 1035 if (self->ntcapacity >= INT_MAX / (sizeof(nodetree) * 2)) {
1036 1036 PyErr_SetString(PyExc_MemoryError,
1037 1037 "overflow in nt_new");
1038 1038 return -1;
1039 1039 }
1040 1040 self->ntcapacity *= 2;
1041 1041 self->nt = realloc(self->nt,
1042 1042 self->ntcapacity * sizeof(nodetree));
1043 1043 if (self->nt == NULL) {
1044 1044 PyErr_SetString(PyExc_MemoryError, "out of memory");
1045 1045 return -1;
1046 1046 }
1047 1047 memset(&self->nt[self->ntlength], 0,
1048 1048 sizeof(nodetree) * (self->ntcapacity - self->ntlength));
1049 1049 }
1050 1050 return self->ntlength++;
1051 1051 }
1052 1052
1053 1053 static int nt_insert(indexObject *self, const char *node, int rev)
1054 1054 {
1055 1055 int level = 0;
1056 1056 int off = 0;
1057 1057
1058 1058 while (level < 40) {
1059 1059 int k = nt_level(node, level);
1060 1060 nodetree *n;
1061 1061 int v;
1062 1062
1063 1063 n = &self->nt[off];
1064 1064 v = n->children[k];
1065 1065
1066 1066 if (v == 0) {
1067 1067 n->children[k] = -rev - 1;
1068 1068 return 0;
1069 1069 }
1070 1070 if (v < 0) {
1071 1071 const char *oldnode = index_node(self, -(v + 1));
1072 1072 int noff;
1073 1073
1074 1074 if (!oldnode || !memcmp(oldnode, node, 20)) {
1075 1075 n->children[k] = -rev - 1;
1076 1076 return 0;
1077 1077 }
1078 1078 noff = nt_new(self);
1079 1079 if (noff == -1)
1080 1080 return -1;
1081 1081 /* self->nt may have been changed by realloc */
1082 1082 self->nt[off].children[k] = noff;
1083 1083 off = noff;
1084 1084 n = &self->nt[off];
1085 1085 n->children[nt_level(oldnode, ++level)] = v;
1086 1086 if (level > self->ntdepth)
1087 1087 self->ntdepth = level;
1088 1088 self->ntsplits += 1;
1089 1089 } else {
1090 1090 level += 1;
1091 1091 off = v;
1092 1092 }
1093 1093 }
1094 1094
1095 1095 return -1;
1096 1096 }
1097 1097
1098 1098 static int nt_init(indexObject *self)
1099 1099 {
1100 1100 if (self->nt == NULL) {
1101 1101 if ((size_t)self->raw_length > INT_MAX / sizeof(nodetree)) {
1102 1102 PyErr_SetString(PyExc_ValueError, "overflow in nt_init");
1103 1103 return -1;
1104 1104 }
1105 1105 self->ntcapacity = self->raw_length < 4
1106 1106 ? 4 : (int)self->raw_length / 2;
1107 1107
1108 1108 self->nt = calloc(self->ntcapacity, sizeof(nodetree));
1109 1109 if (self->nt == NULL) {
1110 1110 PyErr_NoMemory();
1111 1111 return -1;
1112 1112 }
1113 1113 self->ntlength = 1;
1114 1114 self->ntrev = (int)index_length(self) - 1;
1115 1115 self->ntlookups = 1;
1116 1116 self->ntmisses = 0;
1117 1117 if (nt_insert(self, nullid, INT_MAX) == -1)
1118 1118 return -1;
1119 1119 }
1120 1120 return 0;
1121 1121 }
1122 1122
1123 1123 /*
1124 1124 * Return values:
1125 1125 *
1126 1126 * -3: error (exception set)
1127 1127 * -2: not found (no exception set)
1128 1128 * rest: valid rev
1129 1129 */
1130 1130 static int index_find_node(indexObject *self,
1131 1131 const char *node, Py_ssize_t nodelen)
1132 1132 {
1133 1133 int rev;
1134 1134
1135 1135 self->ntlookups++;
1136 1136 rev = nt_find(self, node, nodelen, 0);
1137 1137 if (rev >= -1)
1138 1138 return rev;
1139 1139
1140 1140 if (nt_init(self) == -1)
1141 1141 return -3;
1142 1142
1143 1143 /*
1144 1144 * For the first handful of lookups, we scan the entire index,
1145 1145 * and cache only the matching nodes. This optimizes for cases
1146 1146 * like "hg tip", where only a few nodes are accessed.
1147 1147 *
1148 1148 * After that, we cache every node we visit, using a single
1149 1149 * scan amortized over multiple lookups. This gives the best
1150 1150 * bulk performance, e.g. for "hg log".
1151 1151 */
1152 1152 if (self->ntmisses++ < 4) {
1153 1153 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1154 1154 const char *n = index_node_existing(self, rev);
1155 1155 if (n == NULL)
1156 1156 return -3;
1157 1157 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1158 1158 if (nt_insert(self, n, rev) == -1)
1159 1159 return -3;
1160 1160 break;
1161 1161 }
1162 1162 }
1163 1163 } else {
1164 1164 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1165 1165 const char *n = index_node_existing(self, rev);
1166 1166 if (n == NULL)
1167 1167 return -3;
1168 1168 if (nt_insert(self, n, rev) == -1) {
1169 1169 self->ntrev = rev + 1;
1170 1170 return -3;
1171 1171 }
1172 1172 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1173 1173 break;
1174 1174 }
1175 1175 }
1176 1176 self->ntrev = rev;
1177 1177 }
1178 1178
1179 1179 if (rev >= 0)
1180 1180 return rev;
1181 1181 return -2;
1182 1182 }
1183 1183
1184 1184 static void raise_revlog_error(void)
1185 1185 {
1186 1186 PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
1187 1187
1188 1188 mod = PyImport_ImportModule("mercurial.error");
1189 1189 if (mod == NULL) {
1190 1190 goto cleanup;
1191 1191 }
1192 1192
1193 1193 dict = PyModule_GetDict(mod);
1194 1194 if (dict == NULL) {
1195 1195 goto cleanup;
1196 1196 }
1197 1197 Py_INCREF(dict);
1198 1198
1199 1199 errclass = PyDict_GetItemString(dict, "RevlogError");
1200 1200 if (errclass == NULL) {
1201 1201 PyErr_SetString(PyExc_SystemError,
1202 1202 "could not find RevlogError");
1203 1203 goto cleanup;
1204 1204 }
1205 1205
1206 1206 /* value of exception is ignored by callers */
1207 1207 PyErr_SetString(errclass, "RevlogError");
1208 1208
1209 1209 cleanup:
1210 1210 Py_XDECREF(dict);
1211 1211 Py_XDECREF(mod);
1212 1212 }
1213 1213
1214 1214 static PyObject *index_getitem(indexObject *self, PyObject *value)
1215 1215 {
1216 1216 char *node;
1217 1217 Py_ssize_t nodelen;
1218 1218 int rev;
1219 1219
1220 1220 if (PyInt_Check(value))
1221 1221 return index_get(self, PyInt_AS_LONG(value));
1222 1222
1223 1223 if (node_check(value, &node, &nodelen) == -1)
1224 1224 return NULL;
1225 1225 rev = index_find_node(self, node, nodelen);
1226 1226 if (rev >= -1)
1227 1227 return PyInt_FromLong(rev);
1228 1228 if (rev == -2)
1229 1229 raise_revlog_error();
1230 1230 return NULL;
1231 1231 }
1232 1232
1233 1233 /*
1234 1234 * Fully populate the radix tree.
1235 1235 */
1236 1236 static int nt_populate(indexObject *self) {
1237 1237 int rev;
1238 1238 if (self->ntrev > 0) {
1239 1239 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1240 1240 const char *n = index_node_existing(self, rev);
1241 1241 if (n == NULL)
1242 1242 return -1;
1243 1243 if (nt_insert(self, n, rev) == -1)
1244 1244 return -1;
1245 1245 }
1246 self->ntrev = rev;
1246 self->ntrev = -1;
1247 1247 }
1248 1248 return 0;
1249 1249 }
1250 1250
1251 1251 static int nt_partialmatch(indexObject *self, const char *node,
1252 1252 Py_ssize_t nodelen)
1253 1253 {
1254 1254 if (nt_init(self) == -1)
1255 1255 return -3;
1256 1256 if (nt_populate(self) == -1)
1257 1257 return -3;
1258 1258
1259 1259 return nt_find(self, node, nodelen, 1);
1260 1260 }
1261 1261
1262 1262 static PyObject *index_partialmatch(indexObject *self, PyObject *args)
1263 1263 {
1264 1264 const char *fullnode;
1265 1265 int nodelen;
1266 1266 char *node;
1267 1267 int rev, i;
1268 1268
1269 1269 if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &node, &nodelen))
1270 1270 return NULL;
1271 1271
1272 1272 if (nodelen < 1) {
1273 1273 PyErr_SetString(PyExc_ValueError, "key too short");
1274 1274 return NULL;
1275 1275 }
1276 1276
1277 1277 if (nodelen > 40) {
1278 1278 PyErr_SetString(PyExc_ValueError, "key too long");
1279 1279 return NULL;
1280 1280 }
1281 1281
1282 1282 for (i = 0; i < nodelen; i++)
1283 1283 hexdigit(node, i);
1284 1284 if (PyErr_Occurred()) {
1285 1285 /* input contains non-hex characters */
1286 1286 PyErr_Clear();
1287 1287 Py_RETURN_NONE;
1288 1288 }
1289 1289
1290 1290 rev = nt_partialmatch(self, node, nodelen);
1291 1291
1292 1292 switch (rev) {
1293 1293 case -4:
1294 1294 raise_revlog_error();
1295 1295 case -3:
1296 1296 return NULL;
1297 1297 case -2:
1298 1298 Py_RETURN_NONE;
1299 1299 case -1:
1300 1300 return PyBytes_FromStringAndSize(nullid, 20);
1301 1301 }
1302 1302
1303 1303 fullnode = index_node_existing(self, rev);
1304 1304 if (fullnode == NULL) {
1305 1305 return NULL;
1306 1306 }
1307 1307 return PyBytes_FromStringAndSize(fullnode, 20);
1308 1308 }
1309 1309
1310 1310 static PyObject *index_m_get(indexObject *self, PyObject *args)
1311 1311 {
1312 1312 Py_ssize_t nodelen;
1313 1313 PyObject *val;
1314 1314 char *node;
1315 1315 int rev;
1316 1316
1317 1317 if (!PyArg_ParseTuple(args, "O", &val))
1318 1318 return NULL;
1319 1319 if (node_check(val, &node, &nodelen) == -1)
1320 1320 return NULL;
1321 1321 rev = index_find_node(self, node, nodelen);
1322 1322 if (rev == -3)
1323 1323 return NULL;
1324 1324 if (rev == -2)
1325 1325 Py_RETURN_NONE;
1326 1326 return PyInt_FromLong(rev);
1327 1327 }
1328 1328
1329 1329 static int index_contains(indexObject *self, PyObject *value)
1330 1330 {
1331 1331 char *node;
1332 1332 Py_ssize_t nodelen;
1333 1333
1334 1334 if (PyInt_Check(value)) {
1335 1335 long rev = PyInt_AS_LONG(value);
1336 1336 return rev >= -1 && rev < index_length(self);
1337 1337 }
1338 1338
1339 1339 if (node_check(value, &node, &nodelen) == -1)
1340 1340 return -1;
1341 1341
1342 1342 switch (index_find_node(self, node, nodelen)) {
1343 1343 case -3:
1344 1344 return -1;
1345 1345 case -2:
1346 1346 return 0;
1347 1347 default:
1348 1348 return 1;
1349 1349 }
1350 1350 }
1351 1351
1352 1352 typedef uint64_t bitmask;
1353 1353
1354 1354 /*
1355 1355 * Given a disjoint set of revs, return all candidates for the
1356 1356 * greatest common ancestor. In revset notation, this is the set
1357 1357 * "heads(::a and ::b and ...)"
1358 1358 */
1359 1359 static PyObject *find_gca_candidates(indexObject *self, const int *revs,
1360 1360 int revcount)
1361 1361 {
1362 1362 const bitmask allseen = (1ull << revcount) - 1;
1363 1363 const bitmask poison = 1ull << revcount;
1364 1364 PyObject *gca = PyList_New(0);
1365 1365 int i, v, interesting;
1366 1366 int maxrev = -1;
1367 1367 bitmask sp;
1368 1368 bitmask *seen;
1369 1369
1370 1370 if (gca == NULL)
1371 1371 return PyErr_NoMemory();
1372 1372
1373 1373 for (i = 0; i < revcount; i++) {
1374 1374 if (revs[i] > maxrev)
1375 1375 maxrev = revs[i];
1376 1376 }
1377 1377
1378 1378 seen = calloc(sizeof(*seen), maxrev + 1);
1379 1379 if (seen == NULL) {
1380 1380 Py_DECREF(gca);
1381 1381 return PyErr_NoMemory();
1382 1382 }
1383 1383
1384 1384 for (i = 0; i < revcount; i++)
1385 1385 seen[revs[i]] = 1ull << i;
1386 1386
1387 1387 interesting = revcount;
1388 1388
1389 1389 for (v = maxrev; v >= 0 && interesting; v--) {
1390 1390 bitmask sv = seen[v];
1391 1391 int parents[2];
1392 1392
1393 1393 if (!sv)
1394 1394 continue;
1395 1395
1396 1396 if (sv < poison) {
1397 1397 interesting -= 1;
1398 1398 if (sv == allseen) {
1399 1399 PyObject *obj = PyInt_FromLong(v);
1400 1400 if (obj == NULL)
1401 1401 goto bail;
1402 1402 if (PyList_Append(gca, obj) == -1) {
1403 1403 Py_DECREF(obj);
1404 1404 goto bail;
1405 1405 }
1406 1406 sv |= poison;
1407 1407 for (i = 0; i < revcount; i++) {
1408 1408 if (revs[i] == v)
1409 1409 goto done;
1410 1410 }
1411 1411 }
1412 1412 }
1413 1413 if (index_get_parents(self, v, parents, maxrev) < 0)
1414 1414 goto bail;
1415 1415
1416 1416 for (i = 0; i < 2; i++) {
1417 1417 int p = parents[i];
1418 1418 if (p == -1)
1419 1419 continue;
1420 1420 sp = seen[p];
1421 1421 if (sv < poison) {
1422 1422 if (sp == 0) {
1423 1423 seen[p] = sv;
1424 1424 interesting++;
1425 1425 }
1426 1426 else if (sp != sv)
1427 1427 seen[p] |= sv;
1428 1428 } else {
1429 1429 if (sp && sp < poison)
1430 1430 interesting--;
1431 1431 seen[p] = sv;
1432 1432 }
1433 1433 }
1434 1434 }
1435 1435
1436 1436 done:
1437 1437 free(seen);
1438 1438 return gca;
1439 1439 bail:
1440 1440 free(seen);
1441 1441 Py_XDECREF(gca);
1442 1442 return NULL;
1443 1443 }
1444 1444
1445 1445 /*
1446 1446 * Given a disjoint set of revs, return the subset with the longest
1447 1447 * path to the root.
1448 1448 */
1449 1449 static PyObject *find_deepest(indexObject *self, PyObject *revs)
1450 1450 {
1451 1451 const Py_ssize_t revcount = PyList_GET_SIZE(revs);
1452 1452 static const Py_ssize_t capacity = 24;
1453 1453 int *depth, *interesting = NULL;
1454 1454 int i, j, v, ninteresting;
1455 1455 PyObject *dict = NULL, *keys = NULL;
1456 1456 long *seen = NULL;
1457 1457 int maxrev = -1;
1458 1458 long final;
1459 1459
1460 1460 if (revcount > capacity) {
1461 1461 PyErr_Format(PyExc_OverflowError,
1462 1462 "bitset size (%ld) > capacity (%ld)",
1463 1463 (long)revcount, (long)capacity);
1464 1464 return NULL;
1465 1465 }
1466 1466
1467 1467 for (i = 0; i < revcount; i++) {
1468 1468 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
1469 1469 if (n > maxrev)
1470 1470 maxrev = n;
1471 1471 }
1472 1472
1473 1473 depth = calloc(sizeof(*depth), maxrev + 1);
1474 1474 if (depth == NULL)
1475 1475 return PyErr_NoMemory();
1476 1476
1477 1477 seen = calloc(sizeof(*seen), maxrev + 1);
1478 1478 if (seen == NULL) {
1479 1479 PyErr_NoMemory();
1480 1480 goto bail;
1481 1481 }
1482 1482
1483 1483 interesting = calloc(sizeof(*interesting), 1 << revcount);
1484 1484 if (interesting == NULL) {
1485 1485 PyErr_NoMemory();
1486 1486 goto bail;
1487 1487 }
1488 1488
1489 1489 if (PyList_Sort(revs) == -1)
1490 1490 goto bail;
1491 1491
1492 1492 for (i = 0; i < revcount; i++) {
1493 1493 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
1494 1494 long b = 1l << i;
1495 1495 depth[n] = 1;
1496 1496 seen[n] = b;
1497 1497 interesting[b] = 1;
1498 1498 }
1499 1499
1500 1500 /* invariant: ninteresting is the number of non-zero entries in
1501 1501 * interesting. */
1502 1502 ninteresting = (int)revcount;
1503 1503
1504 1504 for (v = maxrev; v >= 0 && ninteresting > 1; v--) {
1505 1505 int dv = depth[v];
1506 1506 int parents[2];
1507 1507 long sv;
1508 1508
1509 1509 if (dv == 0)
1510 1510 continue;
1511 1511
1512 1512 sv = seen[v];
1513 1513 if (index_get_parents(self, v, parents, maxrev) < 0)
1514 1514 goto bail;
1515 1515
1516 1516 for (i = 0; i < 2; i++) {
1517 1517 int p = parents[i];
1518 1518 long sp;
1519 1519 int dp;
1520 1520
1521 1521 if (p == -1)
1522 1522 continue;
1523 1523
1524 1524 dp = depth[p];
1525 1525 sp = seen[p];
1526 1526 if (dp <= dv) {
1527 1527 depth[p] = dv + 1;
1528 1528 if (sp != sv) {
1529 1529 interesting[sv] += 1;
1530 1530 seen[p] = sv;
1531 1531 if (sp) {
1532 1532 interesting[sp] -= 1;
1533 1533 if (interesting[sp] == 0)
1534 1534 ninteresting -= 1;
1535 1535 }
1536 1536 }
1537 1537 }
1538 1538 else if (dv == dp - 1) {
1539 1539 long nsp = sp | sv;
1540 1540 if (nsp == sp)
1541 1541 continue;
1542 1542 seen[p] = nsp;
1543 1543 interesting[sp] -= 1;
1544 1544 if (interesting[sp] == 0)
1545 1545 ninteresting -= 1;
1546 1546 if (interesting[nsp] == 0)
1547 1547 ninteresting += 1;
1548 1548 interesting[nsp] += 1;
1549 1549 }
1550 1550 }
1551 1551 interesting[sv] -= 1;
1552 1552 if (interesting[sv] == 0)
1553 1553 ninteresting -= 1;
1554 1554 }
1555 1555
1556 1556 final = 0;
1557 1557 j = ninteresting;
1558 1558 for (i = 0; i < (int)(2 << revcount) && j > 0; i++) {
1559 1559 if (interesting[i] == 0)
1560 1560 continue;
1561 1561 final |= i;
1562 1562 j -= 1;
1563 1563 }
1564 1564 if (final == 0) {
1565 1565 keys = PyList_New(0);
1566 1566 goto bail;
1567 1567 }
1568 1568
1569 1569 dict = PyDict_New();
1570 1570 if (dict == NULL)
1571 1571 goto bail;
1572 1572
1573 1573 for (i = 0; i < revcount; i++) {
1574 1574 PyObject *key;
1575 1575
1576 1576 if ((final & (1 << i)) == 0)
1577 1577 continue;
1578 1578
1579 1579 key = PyList_GET_ITEM(revs, i);
1580 1580 Py_INCREF(key);
1581 1581 Py_INCREF(Py_None);
1582 1582 if (PyDict_SetItem(dict, key, Py_None) == -1) {
1583 1583 Py_DECREF(key);
1584 1584 Py_DECREF(Py_None);
1585 1585 goto bail;
1586 1586 }
1587 1587 }
1588 1588
1589 1589 keys = PyDict_Keys(dict);
1590 1590
1591 1591 bail:
1592 1592 free(depth);
1593 1593 free(seen);
1594 1594 free(interesting);
1595 1595 Py_XDECREF(dict);
1596 1596
1597 1597 return keys;
1598 1598 }
1599 1599
1600 1600 /*
1601 1601 * Given a (possibly overlapping) set of revs, return all the
1602 1602 * common ancestors heads: heads(::args[0] and ::a[1] and ...)
1603 1603 */
1604 1604 static PyObject *index_commonancestorsheads(indexObject *self, PyObject *args)
1605 1605 {
1606 1606 PyObject *ret = NULL;
1607 1607 Py_ssize_t argcount, i, len;
1608 1608 bitmask repeat = 0;
1609 1609 int revcount = 0;
1610 1610 int *revs;
1611 1611
1612 1612 argcount = PySequence_Length(args);
1613 1613 revs = PyMem_Malloc(argcount * sizeof(*revs));
1614 1614 if (argcount > 0 && revs == NULL)
1615 1615 return PyErr_NoMemory();
1616 1616 len = index_length(self) - 1;
1617 1617
1618 1618 for (i = 0; i < argcount; i++) {
1619 1619 static const int capacity = 24;
1620 1620 PyObject *obj = PySequence_GetItem(args, i);
1621 1621 bitmask x;
1622 1622 long val;
1623 1623
1624 1624 if (!PyInt_Check(obj)) {
1625 1625 PyErr_SetString(PyExc_TypeError,
1626 1626 "arguments must all be ints");
1627 1627 Py_DECREF(obj);
1628 1628 goto bail;
1629 1629 }
1630 1630 val = PyInt_AsLong(obj);
1631 1631 Py_DECREF(obj);
1632 1632 if (val == -1) {
1633 1633 ret = PyList_New(0);
1634 1634 goto done;
1635 1635 }
1636 1636 if (val < 0 || val >= len) {
1637 1637 PyErr_SetString(PyExc_IndexError,
1638 1638 "index out of range");
1639 1639 goto bail;
1640 1640 }
1641 1641 /* this cheesy bloom filter lets us avoid some more
1642 1642 * expensive duplicate checks in the common set-is-disjoint
1643 1643 * case */
1644 1644 x = 1ull << (val & 0x3f);
1645 1645 if (repeat & x) {
1646 1646 int k;
1647 1647 for (k = 0; k < revcount; k++) {
1648 1648 if (val == revs[k])
1649 1649 goto duplicate;
1650 1650 }
1651 1651 }
1652 1652 else repeat |= x;
1653 1653 if (revcount >= capacity) {
1654 1654 PyErr_Format(PyExc_OverflowError,
1655 1655 "bitset size (%d) > capacity (%d)",
1656 1656 revcount, capacity);
1657 1657 goto bail;
1658 1658 }
1659 1659 revs[revcount++] = (int)val;
1660 1660 duplicate:;
1661 1661 }
1662 1662
1663 1663 if (revcount == 0) {
1664 1664 ret = PyList_New(0);
1665 1665 goto done;
1666 1666 }
1667 1667 if (revcount == 1) {
1668 1668 PyObject *obj;
1669 1669 ret = PyList_New(1);
1670 1670 if (ret == NULL)
1671 1671 goto bail;
1672 1672 obj = PyInt_FromLong(revs[0]);
1673 1673 if (obj == NULL)
1674 1674 goto bail;
1675 1675 PyList_SET_ITEM(ret, 0, obj);
1676 1676 goto done;
1677 1677 }
1678 1678
1679 1679 ret = find_gca_candidates(self, revs, revcount);
1680 1680 if (ret == NULL)
1681 1681 goto bail;
1682 1682
1683 1683 done:
1684 1684 PyMem_Free(revs);
1685 1685 return ret;
1686 1686
1687 1687 bail:
1688 1688 PyMem_Free(revs);
1689 1689 Py_XDECREF(ret);
1690 1690 return NULL;
1691 1691 }
1692 1692
1693 1693 /*
1694 1694 * Given a (possibly overlapping) set of revs, return the greatest
1695 1695 * common ancestors: those with the longest path to the root.
1696 1696 */
1697 1697 static PyObject *index_ancestors(indexObject *self, PyObject *args)
1698 1698 {
1699 1699 PyObject *ret;
1700 1700 PyObject *gca = index_commonancestorsheads(self, args);
1701 1701 if (gca == NULL)
1702 1702 return NULL;
1703 1703
1704 1704 if (PyList_GET_SIZE(gca) <= 1) {
1705 1705 return gca;
1706 1706 }
1707 1707
1708 1708 ret = find_deepest(self, gca);
1709 1709 Py_DECREF(gca);
1710 1710 return ret;
1711 1711 }
1712 1712
1713 1713 /*
1714 1714 * Invalidate any trie entries introduced by added revs.
1715 1715 */
1716 1716 static void nt_invalidate_added(indexObject *self, Py_ssize_t start)
1717 1717 {
1718 1718 Py_ssize_t i, len = PyList_GET_SIZE(self->added);
1719 1719
1720 1720 for (i = start; i < len; i++) {
1721 1721 PyObject *tuple = PyList_GET_ITEM(self->added, i);
1722 1722 PyObject *node = PyTuple_GET_ITEM(tuple, 7);
1723 1723
1724 1724 nt_insert(self, PyBytes_AS_STRING(node), -1);
1725 1725 }
1726 1726
1727 1727 if (start == 0)
1728 1728 Py_CLEAR(self->added);
1729 1729 }
1730 1730
1731 1731 /*
1732 1732 * Delete a numeric range of revs, which must be at the end of the
1733 1733 * range, but exclude the sentinel nullid entry.
1734 1734 */
1735 1735 static int index_slice_del(indexObject *self, PyObject *item)
1736 1736 {
1737 1737 Py_ssize_t start, stop, step, slicelength;
1738 1738 Py_ssize_t length = index_length(self);
1739 1739 int ret = 0;
1740 1740
1741 1741 /* Argument changed from PySliceObject* to PyObject* in Python 3. */
1742 1742 #ifdef IS_PY3K
1743 1743 if (PySlice_GetIndicesEx(item, length,
1744 1744 #else
1745 1745 if (PySlice_GetIndicesEx((PySliceObject*)item, length,
1746 1746 #endif
1747 1747 &start, &stop, &step, &slicelength) < 0)
1748 1748 return -1;
1749 1749
1750 1750 if (slicelength <= 0)
1751 1751 return 0;
1752 1752
1753 1753 if ((step < 0 && start < stop) || (step > 0 && start > stop))
1754 1754 stop = start;
1755 1755
1756 1756 if (step < 0) {
1757 1757 stop = start + 1;
1758 1758 start = stop + step*(slicelength - 1) - 1;
1759 1759 step = -step;
1760 1760 }
1761 1761
1762 1762 if (step != 1) {
1763 1763 PyErr_SetString(PyExc_ValueError,
1764 1764 "revlog index delete requires step size of 1");
1765 1765 return -1;
1766 1766 }
1767 1767
1768 1768 if (stop != length - 1) {
1769 1769 PyErr_SetString(PyExc_IndexError,
1770 1770 "revlog index deletion indices are invalid");
1771 1771 return -1;
1772 1772 }
1773 1773
1774 1774 if (start < self->length - 1) {
1775 1775 if (self->nt) {
1776 1776 Py_ssize_t i;
1777 1777
1778 1778 for (i = start + 1; i < self->length - 1; i++) {
1779 1779 const char *node = index_node(self, i);
1780 1780
1781 1781 if (node)
1782 1782 nt_insert(self, node, -1);
1783 1783 }
1784 1784 if (self->added)
1785 1785 nt_invalidate_added(self, 0);
1786 1786 if (self->ntrev > start)
1787 1787 self->ntrev = (int)start;
1788 1788 }
1789 1789 self->length = start + 1;
1790 1790 if (start < self->raw_length) {
1791 1791 if (self->cache) {
1792 1792 Py_ssize_t i;
1793 1793 for (i = start; i < self->raw_length; i++)
1794 1794 Py_CLEAR(self->cache[i]);
1795 1795 }
1796 1796 self->raw_length = start;
1797 1797 }
1798 1798 goto done;
1799 1799 }
1800 1800
1801 1801 if (self->nt) {
1802 1802 nt_invalidate_added(self, start - self->length + 1);
1803 1803 if (self->ntrev > start)
1804 1804 self->ntrev = (int)start;
1805 1805 }
1806 1806 if (self->added)
1807 1807 ret = PyList_SetSlice(self->added, start - self->length + 1,
1808 1808 PyList_GET_SIZE(self->added), NULL);
1809 1809 done:
1810 1810 Py_CLEAR(self->headrevs);
1811 1811 return ret;
1812 1812 }
1813 1813
1814 1814 /*
1815 1815 * Supported ops:
1816 1816 *
1817 1817 * slice deletion
1818 1818 * string assignment (extend node->rev mapping)
1819 1819 * string deletion (shrink node->rev mapping)
1820 1820 */
1821 1821 static int index_assign_subscript(indexObject *self, PyObject *item,
1822 1822 PyObject *value)
1823 1823 {
1824 1824 char *node;
1825 1825 Py_ssize_t nodelen;
1826 1826 long rev;
1827 1827
1828 1828 if (PySlice_Check(item) && value == NULL)
1829 1829 return index_slice_del(self, item);
1830 1830
1831 1831 if (node_check(item, &node, &nodelen) == -1)
1832 1832 return -1;
1833 1833
1834 1834 if (value == NULL)
1835 1835 return self->nt ? nt_insert(self, node, -1) : 0;
1836 1836 rev = PyInt_AsLong(value);
1837 1837 if (rev > INT_MAX || rev < 0) {
1838 1838 if (!PyErr_Occurred())
1839 1839 PyErr_SetString(PyExc_ValueError, "rev out of range");
1840 1840 return -1;
1841 1841 }
1842 1842
1843 1843 if (nt_init(self) == -1)
1844 1844 return -1;
1845 1845 return nt_insert(self, node, (int)rev);
1846 1846 }
1847 1847
1848 1848 /*
1849 1849 * Find all RevlogNG entries in an index that has inline data. Update
1850 1850 * the optional "offsets" table with those entries.
1851 1851 */
1852 1852 static Py_ssize_t inline_scan(indexObject *self, const char **offsets)
1853 1853 {
1854 1854 const char *data = (const char *)self->buf.buf;
1855 1855 Py_ssize_t pos = 0;
1856 1856 Py_ssize_t end = self->buf.len;
1857 1857 long incr = v1_hdrsize;
1858 1858 Py_ssize_t len = 0;
1859 1859
1860 1860 while (pos + v1_hdrsize <= end && pos >= 0) {
1861 1861 uint32_t comp_len;
1862 1862 /* 3rd element of header is length of compressed inline data */
1863 1863 comp_len = getbe32(data + pos + 8);
1864 1864 incr = v1_hdrsize + comp_len;
1865 1865 if (offsets)
1866 1866 offsets[len] = data + pos;
1867 1867 len++;
1868 1868 pos += incr;
1869 1869 }
1870 1870
1871 1871 if (pos != end) {
1872 1872 if (!PyErr_Occurred())
1873 1873 PyErr_SetString(PyExc_ValueError, "corrupt index file");
1874 1874 return -1;
1875 1875 }
1876 1876
1877 1877 return len;
1878 1878 }
1879 1879
1880 1880 static int index_init(indexObject *self, PyObject *args)
1881 1881 {
1882 1882 PyObject *data_obj, *inlined_obj;
1883 1883 Py_ssize_t size;
1884 1884
1885 1885 /* Initialize before argument-checking to avoid index_dealloc() crash. */
1886 1886 self->raw_length = 0;
1887 1887 self->added = NULL;
1888 1888 self->cache = NULL;
1889 1889 self->data = NULL;
1890 1890 memset(&self->buf, 0, sizeof(self->buf));
1891 1891 self->headrevs = NULL;
1892 1892 self->filteredrevs = Py_None;
1893 1893 Py_INCREF(Py_None);
1894 1894 self->nt = NULL;
1895 1895 self->offsets = NULL;
1896 1896
1897 1897 if (!PyArg_ParseTuple(args, "OO", &data_obj, &inlined_obj))
1898 1898 return -1;
1899 1899 if (!PyObject_CheckBuffer(data_obj)) {
1900 1900 PyErr_SetString(PyExc_TypeError,
1901 1901 "data does not support buffer interface");
1902 1902 return -1;
1903 1903 }
1904 1904
1905 1905 if (PyObject_GetBuffer(data_obj, &self->buf, PyBUF_SIMPLE) == -1)
1906 1906 return -1;
1907 1907 size = self->buf.len;
1908 1908
1909 1909 self->inlined = inlined_obj && PyObject_IsTrue(inlined_obj);
1910 1910 self->data = data_obj;
1911 1911
1912 1912 self->ntlength = self->ntcapacity = 0;
1913 1913 self->ntdepth = self->ntsplits = 0;
1914 1914 self->ntlookups = self->ntmisses = 0;
1915 1915 self->ntrev = -1;
1916 1916 Py_INCREF(self->data);
1917 1917
1918 1918 if (self->inlined) {
1919 1919 Py_ssize_t len = inline_scan(self, NULL);
1920 1920 if (len == -1)
1921 1921 goto bail;
1922 1922 self->raw_length = len;
1923 1923 self->length = len + 1;
1924 1924 } else {
1925 1925 if (size % v1_hdrsize) {
1926 1926 PyErr_SetString(PyExc_ValueError, "corrupt index file");
1927 1927 goto bail;
1928 1928 }
1929 1929 self->raw_length = size / v1_hdrsize;
1930 1930 self->length = self->raw_length + 1;
1931 1931 }
1932 1932
1933 1933 return 0;
1934 1934 bail:
1935 1935 return -1;
1936 1936 }
1937 1937
1938 1938 static PyObject *index_nodemap(indexObject *self)
1939 1939 {
1940 1940 Py_INCREF(self);
1941 1941 return (PyObject *)self;
1942 1942 }
1943 1943
1944 1944 static void index_dealloc(indexObject *self)
1945 1945 {
1946 1946 _index_clearcaches(self);
1947 1947 Py_XDECREF(self->filteredrevs);
1948 1948 if (self->buf.buf) {
1949 1949 PyBuffer_Release(&self->buf);
1950 1950 memset(&self->buf, 0, sizeof(self->buf));
1951 1951 }
1952 1952 Py_XDECREF(self->data);
1953 1953 Py_XDECREF(self->added);
1954 1954 PyObject_Del(self);
1955 1955 }
1956 1956
1957 1957 static PySequenceMethods index_sequence_methods = {
1958 1958 (lenfunc)index_length, /* sq_length */
1959 1959 0, /* sq_concat */
1960 1960 0, /* sq_repeat */
1961 1961 (ssizeargfunc)index_get, /* sq_item */
1962 1962 0, /* sq_slice */
1963 1963 0, /* sq_ass_item */
1964 1964 0, /* sq_ass_slice */
1965 1965 (objobjproc)index_contains, /* sq_contains */
1966 1966 };
1967 1967
1968 1968 static PyMappingMethods index_mapping_methods = {
1969 1969 (lenfunc)index_length, /* mp_length */
1970 1970 (binaryfunc)index_getitem, /* mp_subscript */
1971 1971 (objobjargproc)index_assign_subscript, /* mp_ass_subscript */
1972 1972 };
1973 1973
1974 1974 static PyMethodDef index_methods[] = {
1975 1975 {"ancestors", (PyCFunction)index_ancestors, METH_VARARGS,
1976 1976 "return the gca set of the given revs"},
1977 1977 {"commonancestorsheads", (PyCFunction)index_commonancestorsheads,
1978 1978 METH_VARARGS,
1979 1979 "return the heads of the common ancestors of the given revs"},
1980 1980 {"clearcaches", (PyCFunction)index_clearcaches, METH_NOARGS,
1981 1981 "clear the index caches"},
1982 1982 {"get", (PyCFunction)index_m_get, METH_VARARGS,
1983 1983 "get an index entry"},
1984 1984 {"computephasesmapsets", (PyCFunction)compute_phases_map_sets,
1985 1985 METH_VARARGS, "compute phases"},
1986 1986 {"reachableroots2", (PyCFunction)reachableroots2, METH_VARARGS,
1987 1987 "reachableroots"},
1988 1988 {"headrevs", (PyCFunction)index_headrevs, METH_VARARGS,
1989 1989 "get head revisions"}, /* Can do filtering since 3.2 */
1990 1990 {"headrevsfiltered", (PyCFunction)index_headrevs, METH_VARARGS,
1991 1991 "get filtered head revisions"}, /* Can always do filtering */
1992 1992 {"deltachain", (PyCFunction)index_deltachain, METH_VARARGS,
1993 1993 "determine revisions with deltas to reconstruct fulltext"},
1994 1994 {"insert", (PyCFunction)index_insert, METH_VARARGS,
1995 1995 "insert an index entry"},
1996 1996 {"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS,
1997 1997 "match a potentially ambiguous node ID"},
1998 1998 {"stats", (PyCFunction)index_stats, METH_NOARGS,
1999 1999 "stats for the index"},
2000 2000 {NULL} /* Sentinel */
2001 2001 };
2002 2002
2003 2003 static PyGetSetDef index_getset[] = {
2004 2004 {"nodemap", (getter)index_nodemap, NULL, "nodemap", NULL},
2005 2005 {NULL} /* Sentinel */
2006 2006 };
2007 2007
2008 2008 static PyTypeObject indexType = {
2009 2009 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2010 2010 "parsers.index", /* tp_name */
2011 2011 sizeof(indexObject), /* tp_basicsize */
2012 2012 0, /* tp_itemsize */
2013 2013 (destructor)index_dealloc, /* tp_dealloc */
2014 2014 0, /* tp_print */
2015 2015 0, /* tp_getattr */
2016 2016 0, /* tp_setattr */
2017 2017 0, /* tp_compare */
2018 2018 0, /* tp_repr */
2019 2019 0, /* tp_as_number */
2020 2020 &index_sequence_methods, /* tp_as_sequence */
2021 2021 &index_mapping_methods, /* tp_as_mapping */
2022 2022 0, /* tp_hash */
2023 2023 0, /* tp_call */
2024 2024 0, /* tp_str */
2025 2025 0, /* tp_getattro */
2026 2026 0, /* tp_setattro */
2027 2027 0, /* tp_as_buffer */
2028 2028 Py_TPFLAGS_DEFAULT, /* tp_flags */
2029 2029 "revlog index", /* tp_doc */
2030 2030 0, /* tp_traverse */
2031 2031 0, /* tp_clear */
2032 2032 0, /* tp_richcompare */
2033 2033 0, /* tp_weaklistoffset */
2034 2034 0, /* tp_iter */
2035 2035 0, /* tp_iternext */
2036 2036 index_methods, /* tp_methods */
2037 2037 0, /* tp_members */
2038 2038 index_getset, /* tp_getset */
2039 2039 0, /* tp_base */
2040 2040 0, /* tp_dict */
2041 2041 0, /* tp_descr_get */
2042 2042 0, /* tp_descr_set */
2043 2043 0, /* tp_dictoffset */
2044 2044 (initproc)index_init, /* tp_init */
2045 2045 0, /* tp_alloc */
2046 2046 };
2047 2047
2048 2048 /*
2049 2049 * returns a tuple of the form (index, index, cache) with elements as
2050 2050 * follows:
2051 2051 *
2052 2052 * index: an index object that lazily parses RevlogNG records
2053 2053 * cache: if data is inlined, a tuple (0, index_file_content), else None
2054 2054 * index_file_content could be a string, or a buffer
2055 2055 *
2056 2056 * added complications are for backwards compatibility
2057 2057 */
2058 2058 PyObject *parse_index2(PyObject *self, PyObject *args)
2059 2059 {
2060 2060 PyObject *tuple = NULL, *cache = NULL;
2061 2061 indexObject *idx;
2062 2062 int ret;
2063 2063
2064 2064 idx = PyObject_New(indexObject, &indexType);
2065 2065 if (idx == NULL)
2066 2066 goto bail;
2067 2067
2068 2068 ret = index_init(idx, args);
2069 2069 if (ret == -1)
2070 2070 goto bail;
2071 2071
2072 2072 if (idx->inlined) {
2073 2073 cache = Py_BuildValue("iO", 0, idx->data);
2074 2074 if (cache == NULL)
2075 2075 goto bail;
2076 2076 } else {
2077 2077 cache = Py_None;
2078 2078 Py_INCREF(cache);
2079 2079 }
2080 2080
2081 2081 tuple = Py_BuildValue("NN", idx, cache);
2082 2082 if (!tuple)
2083 2083 goto bail;
2084 2084 return tuple;
2085 2085
2086 2086 bail:
2087 2087 Py_XDECREF(idx);
2088 2088 Py_XDECREF(cache);
2089 2089 Py_XDECREF(tuple);
2090 2090 return NULL;
2091 2091 }
2092 2092
2093 2093 void revlog_module_init(PyObject *mod)
2094 2094 {
2095 2095 indexType.tp_new = PyType_GenericNew;
2096 2096 if (PyType_Ready(&indexType) < 0)
2097 2097 return;
2098 2098 Py_INCREF(&indexType);
2099 2099 PyModule_AddObject(mod, "index", (PyObject *)&indexType);
2100 2100
2101 2101 nullentry = Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0, 0,
2102 2102 -1, -1, -1, -1, nullid, 20);
2103 2103 if (nullentry)
2104 2104 PyObject_GC_UnTrack(nullentry);
2105 2105 }
General Comments 0
You need to be logged in to leave comments. Login now