##// END OF EJS Templates
index: return False for "len(index) in index"...
Martin von Zweigbergk -
r38902:aa33988a default
parent child Browse files
Show More
@@ -1,2167 +1,2167 b''
1 1 /*
2 2 parsers.c - efficient content parsing
3 3
4 4 Copyright 2008 Matt Mackall <mpm@selenic.com> and others
5 5
6 6 This software may be used and distributed according to the terms of
7 7 the GNU General Public License, incorporated herein by reference.
8 8 */
9 9
10 10 #include <Python.h>
11 11 #include <assert.h>
12 12 #include <ctype.h>
13 13 #include <stddef.h>
14 14 #include <string.h>
15 15
16 16 #include "bitmanipulation.h"
17 17 #include "charencode.h"
18 18 #include "util.h"
19 19
20 20 #ifdef IS_PY3K
21 21 /* The mapping of Python types is meant to be temporary to get Python
22 22 * 3 to compile. We should remove this once Python 3 support is fully
23 23 * supported and proper types are used in the extensions themselves. */
24 24 #define PyInt_Check PyLong_Check
25 25 #define PyInt_FromLong PyLong_FromLong
26 26 #define PyInt_FromSsize_t PyLong_FromSsize_t
27 27 #define PyInt_AS_LONG PyLong_AS_LONG
28 28 #define PyInt_AsLong PyLong_AsLong
29 29 #endif
30 30
31 31 /*
32 32 * A base-16 trie for fast node->rev mapping.
33 33 *
34 34 * Positive value is index of the next node in the trie
35 35 * Negative value is a leaf: -(rev + 2)
36 36 * Zero is empty
37 37 */
38 38 typedef struct {
39 39 int children[16];
40 40 } nodetree;
41 41
42 42 /*
43 43 * This class has two behaviors.
44 44 *
45 45 * When used in a list-like way (with integer keys), we decode an
46 46 * entry in a RevlogNG index file on demand. Our last entry is a
47 47 * sentinel, always a nullid. We have limited support for
48 48 * integer-keyed insert and delete, only at elements right before the
49 49 * sentinel.
50 50 *
51 51 * With string keys, we lazily perform a reverse mapping from node to
52 52 * rev, using a base-16 trie.
53 53 */
54 54 typedef struct {
55 55 PyObject_HEAD
56 56 /* Type-specific fields go here. */
57 57 PyObject *data; /* raw bytes of index */
58 58 Py_buffer buf; /* buffer of data */
59 59 PyObject **cache; /* cached tuples */
60 60 const char **offsets; /* populated on demand */
61 61 Py_ssize_t raw_length; /* original number of elements */
62 62 Py_ssize_t length; /* current number of elements */
63 63 PyObject *added; /* populated on demand */
64 64 PyObject *headrevs; /* cache, invalidated on changes */
65 65 PyObject *filteredrevs;/* filtered revs set */
66 66 nodetree *nt; /* base-16 trie */
67 67 unsigned ntlength; /* # nodes in use */
68 68 unsigned ntcapacity; /* # nodes allocated */
69 69 int ntdepth; /* maximum depth of tree */
70 70 int ntsplits; /* # splits performed */
71 71 int ntrev; /* last rev scanned */
72 72 int ntlookups; /* # lookups */
73 73 int ntmisses; /* # lookups that miss the cache */
74 74 int inlined;
75 75 } indexObject;
76 76
77 77 static Py_ssize_t index_length(const indexObject *self)
78 78 {
79 79 if (self->added == NULL)
80 80 return self->length - 1;
81 81 return self->length + PyList_GET_SIZE(self->added) - 1;
82 82 }
83 83
84 84 static PyObject *nullentry;
85 85 static const char nullid[20];
86 86
87 87 static Py_ssize_t inline_scan(indexObject *self, const char **offsets);
88 88
89 89 #if LONG_MAX == 0x7fffffffL
90 90 static const char *const tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
91 91 #else
92 92 static const char *const tuple_format = PY23("kiiiiiis#", "kiiiiiiy#");
93 93 #endif
94 94
95 95 /* A RevlogNG v1 index entry is 64 bytes long. */
96 96 static const long v1_hdrsize = 64;
97 97
98 98 /*
99 99 * Return a pointer to the beginning of a RevlogNG record.
100 100 */
101 101 static const char *index_deref(indexObject *self, Py_ssize_t pos)
102 102 {
103 103 if (self->inlined && pos > 0) {
104 104 if (self->offsets == NULL) {
105 105 self->offsets = PyMem_Malloc(self->raw_length *
106 106 sizeof(*self->offsets));
107 107 if (self->offsets == NULL)
108 108 return (const char *)PyErr_NoMemory();
109 109 inline_scan(self, self->offsets);
110 110 }
111 111 return self->offsets[pos];
112 112 }
113 113
114 114 return (const char *)(self->buf.buf) + pos * v1_hdrsize;
115 115 }
116 116
117 117 static inline int index_get_parents(indexObject *self, Py_ssize_t rev,
118 118 int *ps, int maxrev)
119 119 {
120 120 if (rev >= self->length - 1) {
121 121 PyObject *tuple = PyList_GET_ITEM(self->added,
122 122 rev - self->length + 1);
123 123 ps[0] = (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 5));
124 124 ps[1] = (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 6));
125 125 } else {
126 126 const char *data = index_deref(self, rev);
127 127 ps[0] = getbe32(data + 24);
128 128 ps[1] = getbe32(data + 28);
129 129 }
130 130 /* If index file is corrupted, ps[] may point to invalid revisions. So
131 131 * there is a risk of buffer overflow to trust them unconditionally. */
132 132 if (ps[0] > maxrev || ps[1] > maxrev) {
133 133 PyErr_SetString(PyExc_ValueError, "parent out of range");
134 134 return -1;
135 135 }
136 136 return 0;
137 137 }
138 138
139 139
140 140 /*
141 141 * RevlogNG format (all in big endian, data may be inlined):
142 142 * 6 bytes: offset
143 143 * 2 bytes: flags
144 144 * 4 bytes: compressed length
145 145 * 4 bytes: uncompressed length
146 146 * 4 bytes: base revision
147 147 * 4 bytes: link revision
148 148 * 4 bytes: parent 1 revision
149 149 * 4 bytes: parent 2 revision
150 150 * 32 bytes: nodeid (only 20 bytes used)
151 151 */
152 152 static PyObject *index_get(indexObject *self, Py_ssize_t pos)
153 153 {
154 154 uint64_t offset_flags;
155 155 int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
156 156 const char *c_node_id;
157 157 const char *data;
158 158 Py_ssize_t length = index_length(self) + 1;
159 159 PyObject *entry;
160 160
161 161 if (pos == -1) {
162 162 Py_INCREF(nullentry);
163 163 return nullentry;
164 164 }
165 165
166 166 if (pos < 0 || pos >= length - 1) {
167 167 PyErr_SetString(PyExc_IndexError, "revlog index out of range");
168 168 return NULL;
169 169 }
170 170
171 171 if (pos >= self->length - 1) {
172 172 PyObject *obj;
173 173 obj = PyList_GET_ITEM(self->added, pos - self->length + 1);
174 174 Py_INCREF(obj);
175 175 return obj;
176 176 }
177 177
178 178 if (self->cache) {
179 179 if (self->cache[pos]) {
180 180 Py_INCREF(self->cache[pos]);
181 181 return self->cache[pos];
182 182 }
183 183 } else {
184 184 self->cache = calloc(self->raw_length, sizeof(PyObject *));
185 185 if (self->cache == NULL)
186 186 return PyErr_NoMemory();
187 187 }
188 188
189 189 data = index_deref(self, pos);
190 190 if (data == NULL)
191 191 return NULL;
192 192
193 193 offset_flags = getbe32(data + 4);
194 194 if (pos == 0) /* mask out version number for the first entry */
195 195 offset_flags &= 0xFFFF;
196 196 else {
197 197 uint32_t offset_high = getbe32(data);
198 198 offset_flags |= ((uint64_t)offset_high) << 32;
199 199 }
200 200
201 201 comp_len = getbe32(data + 8);
202 202 uncomp_len = getbe32(data + 12);
203 203 base_rev = getbe32(data + 16);
204 204 link_rev = getbe32(data + 20);
205 205 parent_1 = getbe32(data + 24);
206 206 parent_2 = getbe32(data + 28);
207 207 c_node_id = data + 32;
208 208
209 209 entry = Py_BuildValue(tuple_format, offset_flags, comp_len,
210 210 uncomp_len, base_rev, link_rev,
211 211 parent_1, parent_2, c_node_id, 20);
212 212
213 213 if (entry) {
214 214 PyObject_GC_UnTrack(entry);
215 215 Py_INCREF(entry);
216 216 }
217 217
218 218 self->cache[pos] = entry;
219 219
220 220 return entry;
221 221 }
222 222
223 223 /*
224 224 * Return the 20-byte SHA of the node corresponding to the given rev.
225 225 */
226 226 static const char *index_node(indexObject *self, Py_ssize_t pos)
227 227 {
228 228 Py_ssize_t length = index_length(self) + 1;
229 229 const char *data;
230 230
231 231 if (pos == length - 1 || pos == -1)
232 232 return nullid;
233 233
234 234 if (pos >= length)
235 235 return NULL;
236 236
237 237 if (pos >= self->length - 1) {
238 238 PyObject *tuple, *str;
239 239 tuple = PyList_GET_ITEM(self->added, pos - self->length + 1);
240 240 str = PyTuple_GetItem(tuple, 7);
241 241 return str ? PyBytes_AS_STRING(str) : NULL;
242 242 }
243 243
244 244 data = index_deref(self, pos);
245 245 return data ? data + 32 : NULL;
246 246 }
247 247
248 248 /*
249 249 * Return the 20-byte SHA of the node corresponding to the given rev. The
250 250 * rev is assumed to be existing. If not, an exception is set.
251 251 */
252 252 static const char *index_node_existing(indexObject *self, Py_ssize_t pos)
253 253 {
254 254 const char *node = index_node(self, pos);
255 255 if (node == NULL) {
256 256 PyErr_Format(PyExc_IndexError, "could not access rev %d",
257 257 (int)pos);
258 258 }
259 259 return node;
260 260 }
261 261
262 262 static int nt_insert(indexObject *self, const char *node, int rev);
263 263
264 264 static int node_check(PyObject *obj, char **node)
265 265 {
266 266 Py_ssize_t nodelen;
267 267 if (PyBytes_AsStringAndSize(obj, node, &nodelen) == -1)
268 268 return -1;
269 269 if (nodelen == 20)
270 270 return 0;
271 271 PyErr_SetString(PyExc_ValueError, "20-byte hash required");
272 272 return -1;
273 273 }
274 274
275 275 static PyObject *index_append(indexObject *self, PyObject *obj)
276 276 {
277 277 char *node;
278 278 Py_ssize_t len;
279 279
280 280 if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj) != 8) {
281 281 PyErr_SetString(PyExc_TypeError, "8-tuple required");
282 282 return NULL;
283 283 }
284 284
285 285 if (node_check(PyTuple_GET_ITEM(obj, 7), &node) == -1)
286 286 return NULL;
287 287
288 288 len = index_length(self) + 1;
289 289
290 290 if (self->added == NULL) {
291 291 self->added = PyList_New(0);
292 292 if (self->added == NULL)
293 293 return NULL;
294 294 }
295 295
296 296 if (PyList_Append(self->added, obj) == -1)
297 297 return NULL;
298 298
299 299 if (self->nt)
300 300 nt_insert(self, node, len - 1);
301 301
302 302 Py_CLEAR(self->headrevs);
303 303 Py_RETURN_NONE;
304 304 }
305 305
306 306 static void _index_clearcaches(indexObject *self)
307 307 {
308 308 if (self->cache) {
309 309 Py_ssize_t i;
310 310
311 311 for (i = 0; i < self->raw_length; i++)
312 312 Py_CLEAR(self->cache[i]);
313 313 free(self->cache);
314 314 self->cache = NULL;
315 315 }
316 316 if (self->offsets) {
317 317 PyMem_Free(self->offsets);
318 318 self->offsets = NULL;
319 319 }
320 320 free(self->nt);
321 321 self->nt = NULL;
322 322 Py_CLEAR(self->headrevs);
323 323 }
324 324
325 325 static PyObject *index_clearcaches(indexObject *self)
326 326 {
327 327 _index_clearcaches(self);
328 328 self->ntlength = self->ntcapacity = 0;
329 329 self->ntdepth = self->ntsplits = 0;
330 330 self->ntrev = -1;
331 331 self->ntlookups = self->ntmisses = 0;
332 332 Py_RETURN_NONE;
333 333 }
334 334
335 335 static PyObject *index_stats(indexObject *self)
336 336 {
337 337 PyObject *obj = PyDict_New();
338 338 PyObject *t = NULL;
339 339
340 340 if (obj == NULL)
341 341 return NULL;
342 342
343 343 #define istat(__n, __d) \
344 344 do { \
345 345 t = PyInt_FromSsize_t(self->__n); \
346 346 if (!t) \
347 347 goto bail; \
348 348 if (PyDict_SetItemString(obj, __d, t) == -1) \
349 349 goto bail; \
350 350 Py_DECREF(t); \
351 351 } while (0)
352 352
353 353 if (self->added) {
354 354 Py_ssize_t len = PyList_GET_SIZE(self->added);
355 355 t = PyInt_FromSsize_t(len);
356 356 if (!t)
357 357 goto bail;
358 358 if (PyDict_SetItemString(obj, "index entries added", t) == -1)
359 359 goto bail;
360 360 Py_DECREF(t);
361 361 }
362 362
363 363 if (self->raw_length != self->length - 1)
364 364 istat(raw_length, "revs on disk");
365 365 istat(length, "revs in memory");
366 366 istat(ntcapacity, "node trie capacity");
367 367 istat(ntdepth, "node trie depth");
368 368 istat(ntlength, "node trie count");
369 369 istat(ntlookups, "node trie lookups");
370 370 istat(ntmisses, "node trie misses");
371 371 istat(ntrev, "node trie last rev scanned");
372 372 istat(ntsplits, "node trie splits");
373 373
374 374 #undef istat
375 375
376 376 return obj;
377 377
378 378 bail:
379 379 Py_XDECREF(obj);
380 380 Py_XDECREF(t);
381 381 return NULL;
382 382 }
383 383
384 384 /*
385 385 * When we cache a list, we want to be sure the caller can't mutate
386 386 * the cached copy.
387 387 */
388 388 static PyObject *list_copy(PyObject *list)
389 389 {
390 390 Py_ssize_t len = PyList_GET_SIZE(list);
391 391 PyObject *newlist = PyList_New(len);
392 392 Py_ssize_t i;
393 393
394 394 if (newlist == NULL)
395 395 return NULL;
396 396
397 397 for (i = 0; i < len; i++) {
398 398 PyObject *obj = PyList_GET_ITEM(list, i);
399 399 Py_INCREF(obj);
400 400 PyList_SET_ITEM(newlist, i, obj);
401 401 }
402 402
403 403 return newlist;
404 404 }
405 405
406 406 static int check_filter(PyObject *filter, Py_ssize_t arg)
407 407 {
408 408 if (filter) {
409 409 PyObject *arglist, *result;
410 410 int isfiltered;
411 411
412 412 arglist = Py_BuildValue("(n)", arg);
413 413 if (!arglist) {
414 414 return -1;
415 415 }
416 416
417 417 result = PyEval_CallObject(filter, arglist);
418 418 Py_DECREF(arglist);
419 419 if (!result) {
420 420 return -1;
421 421 }
422 422
423 423 /* PyObject_IsTrue returns 1 if true, 0 if false, -1 if error,
424 424 * same as this function, so we can just return it directly.*/
425 425 isfiltered = PyObject_IsTrue(result);
426 426 Py_DECREF(result);
427 427 return isfiltered;
428 428 } else {
429 429 return 0;
430 430 }
431 431 }
432 432
433 433 static Py_ssize_t add_roots_get_min(indexObject *self, PyObject *list,
434 434 Py_ssize_t marker, char *phases)
435 435 {
436 436 PyObject *iter = NULL;
437 437 PyObject *iter_item = NULL;
438 438 Py_ssize_t min_idx = index_length(self) + 2;
439 439 long iter_item_long;
440 440
441 441 if (PyList_GET_SIZE(list) != 0) {
442 442 iter = PyObject_GetIter(list);
443 443 if (iter == NULL)
444 444 return -2;
445 445 while ((iter_item = PyIter_Next(iter))) {
446 446 iter_item_long = PyInt_AS_LONG(iter_item);
447 447 Py_DECREF(iter_item);
448 448 if (iter_item_long < min_idx)
449 449 min_idx = iter_item_long;
450 450 phases[iter_item_long] = marker;
451 451 }
452 452 Py_DECREF(iter);
453 453 }
454 454
455 455 return min_idx;
456 456 }
457 457
458 458 static inline void set_phase_from_parents(char *phases, int parent_1,
459 459 int parent_2, Py_ssize_t i)
460 460 {
461 461 if (parent_1 >= 0 && phases[parent_1] > phases[i])
462 462 phases[i] = phases[parent_1];
463 463 if (parent_2 >= 0 && phases[parent_2] > phases[i])
464 464 phases[i] = phases[parent_2];
465 465 }
466 466
467 467 static PyObject *reachableroots2(indexObject *self, PyObject *args)
468 468 {
469 469
470 470 /* Input */
471 471 long minroot;
472 472 PyObject *includepatharg = NULL;
473 473 int includepath = 0;
474 474 /* heads and roots are lists */
475 475 PyObject *heads = NULL;
476 476 PyObject *roots = NULL;
477 477 PyObject *reachable = NULL;
478 478
479 479 PyObject *val;
480 480 Py_ssize_t len = index_length(self);
481 481 long revnum;
482 482 Py_ssize_t k;
483 483 Py_ssize_t i;
484 484 Py_ssize_t l;
485 485 int r;
486 486 int parents[2];
487 487
488 488 /* Internal data structure:
489 489 * tovisit: array of length len+1 (all revs + nullrev), filled upto lentovisit
490 490 * revstates: array of length len+1 (all revs + nullrev) */
491 491 int *tovisit = NULL;
492 492 long lentovisit = 0;
493 493 enum { RS_SEEN = 1, RS_ROOT = 2, RS_REACHABLE = 4 };
494 494 char *revstates = NULL;
495 495
496 496 /* Get arguments */
497 497 if (!PyArg_ParseTuple(args, "lO!O!O!", &minroot, &PyList_Type, &heads,
498 498 &PyList_Type, &roots,
499 499 &PyBool_Type, &includepatharg))
500 500 goto bail;
501 501
502 502 if (includepatharg == Py_True)
503 503 includepath = 1;
504 504
505 505 /* Initialize return set */
506 506 reachable = PyList_New(0);
507 507 if (reachable == NULL)
508 508 goto bail;
509 509
510 510 /* Initialize internal datastructures */
511 511 tovisit = (int *)malloc((len + 1) * sizeof(int));
512 512 if (tovisit == NULL) {
513 513 PyErr_NoMemory();
514 514 goto bail;
515 515 }
516 516
517 517 revstates = (char *)calloc(len + 1, 1);
518 518 if (revstates == NULL) {
519 519 PyErr_NoMemory();
520 520 goto bail;
521 521 }
522 522
523 523 l = PyList_GET_SIZE(roots);
524 524 for (i = 0; i < l; i++) {
525 525 revnum = PyInt_AsLong(PyList_GET_ITEM(roots, i));
526 526 if (revnum == -1 && PyErr_Occurred())
527 527 goto bail;
528 528 /* If root is out of range, e.g. wdir(), it must be unreachable
529 529 * from heads. So we can just ignore it. */
530 530 if (revnum + 1 < 0 || revnum + 1 >= len + 1)
531 531 continue;
532 532 revstates[revnum + 1] |= RS_ROOT;
533 533 }
534 534
535 535 /* Populate tovisit with all the heads */
536 536 l = PyList_GET_SIZE(heads);
537 537 for (i = 0; i < l; i++) {
538 538 revnum = PyInt_AsLong(PyList_GET_ITEM(heads, i));
539 539 if (revnum == -1 && PyErr_Occurred())
540 540 goto bail;
541 541 if (revnum + 1 < 0 || revnum + 1 >= len + 1) {
542 542 PyErr_SetString(PyExc_IndexError, "head out of range");
543 543 goto bail;
544 544 }
545 545 if (!(revstates[revnum + 1] & RS_SEEN)) {
546 546 tovisit[lentovisit++] = (int)revnum;
547 547 revstates[revnum + 1] |= RS_SEEN;
548 548 }
549 549 }
550 550
551 551 /* Visit the tovisit list and find the reachable roots */
552 552 k = 0;
553 553 while (k < lentovisit) {
554 554 /* Add the node to reachable if it is a root*/
555 555 revnum = tovisit[k++];
556 556 if (revstates[revnum + 1] & RS_ROOT) {
557 557 revstates[revnum + 1] |= RS_REACHABLE;
558 558 val = PyInt_FromLong(revnum);
559 559 if (val == NULL)
560 560 goto bail;
561 561 r = PyList_Append(reachable, val);
562 562 Py_DECREF(val);
563 563 if (r < 0)
564 564 goto bail;
565 565 if (includepath == 0)
566 566 continue;
567 567 }
568 568
569 569 /* Add its parents to the list of nodes to visit */
570 570 if (revnum == -1)
571 571 continue;
572 572 r = index_get_parents(self, revnum, parents, (int)len - 1);
573 573 if (r < 0)
574 574 goto bail;
575 575 for (i = 0; i < 2; i++) {
576 576 if (!(revstates[parents[i] + 1] & RS_SEEN)
577 577 && parents[i] >= minroot) {
578 578 tovisit[lentovisit++] = parents[i];
579 579 revstates[parents[i] + 1] |= RS_SEEN;
580 580 }
581 581 }
582 582 }
583 583
584 584 /* Find all the nodes in between the roots we found and the heads
585 585 * and add them to the reachable set */
586 586 if (includepath == 1) {
587 587 long minidx = minroot;
588 588 if (minidx < 0)
589 589 minidx = 0;
590 590 for (i = minidx; i < len; i++) {
591 591 if (!(revstates[i + 1] & RS_SEEN))
592 592 continue;
593 593 r = index_get_parents(self, i, parents, (int)len - 1);
594 594 /* Corrupted index file, error is set from
595 595 * index_get_parents */
596 596 if (r < 0)
597 597 goto bail;
598 598 if (((revstates[parents[0] + 1] |
599 599 revstates[parents[1] + 1]) & RS_REACHABLE)
600 600 && !(revstates[i + 1] & RS_REACHABLE)) {
601 601 revstates[i + 1] |= RS_REACHABLE;
602 602 val = PyInt_FromLong(i);
603 603 if (val == NULL)
604 604 goto bail;
605 605 r = PyList_Append(reachable, val);
606 606 Py_DECREF(val);
607 607 if (r < 0)
608 608 goto bail;
609 609 }
610 610 }
611 611 }
612 612
613 613 free(revstates);
614 614 free(tovisit);
615 615 return reachable;
616 616 bail:
617 617 Py_XDECREF(reachable);
618 618 free(revstates);
619 619 free(tovisit);
620 620 return NULL;
621 621 }
622 622
623 623 static PyObject *compute_phases_map_sets(indexObject *self, PyObject *args)
624 624 {
625 625 PyObject *roots = Py_None;
626 626 PyObject *ret = NULL;
627 627 PyObject *phasessize = NULL;
628 628 PyObject *phaseroots = NULL;
629 629 PyObject *phaseset = NULL;
630 630 PyObject *phasessetlist = NULL;
631 631 PyObject *rev = NULL;
632 632 Py_ssize_t len = index_length(self);
633 633 Py_ssize_t numphase = 0;
634 634 Py_ssize_t minrevallphases = 0;
635 635 Py_ssize_t minrevphase = 0;
636 636 Py_ssize_t i = 0;
637 637 char *phases = NULL;
638 638 long phase;
639 639
640 640 if (!PyArg_ParseTuple(args, "O", &roots))
641 641 goto done;
642 642 if (roots == NULL || !PyList_Check(roots)) {
643 643 PyErr_SetString(PyExc_TypeError, "roots must be a list");
644 644 goto done;
645 645 }
646 646
647 647 phases = calloc(len, 1); /* phase per rev: {0: public, 1: draft, 2: secret} */
648 648 if (phases == NULL) {
649 649 PyErr_NoMemory();
650 650 goto done;
651 651 }
652 652 /* Put the phase information of all the roots in phases */
653 653 numphase = PyList_GET_SIZE(roots)+1;
654 654 minrevallphases = len + 1;
655 655 phasessetlist = PyList_New(numphase);
656 656 if (phasessetlist == NULL)
657 657 goto done;
658 658
659 659 PyList_SET_ITEM(phasessetlist, 0, Py_None);
660 660 Py_INCREF(Py_None);
661 661
662 662 for (i = 0; i < numphase-1; i++) {
663 663 phaseroots = PyList_GET_ITEM(roots, i);
664 664 phaseset = PySet_New(NULL);
665 665 if (phaseset == NULL)
666 666 goto release;
667 667 PyList_SET_ITEM(phasessetlist, i+1, phaseset);
668 668 if (!PyList_Check(phaseroots)) {
669 669 PyErr_SetString(PyExc_TypeError,
670 670 "roots item must be a list");
671 671 goto release;
672 672 }
673 673 minrevphase = add_roots_get_min(self, phaseroots, i+1, phases);
674 674 if (minrevphase == -2) /* Error from add_roots_get_min */
675 675 goto release;
676 676 minrevallphases = MIN(minrevallphases, minrevphase);
677 677 }
678 678 /* Propagate the phase information from the roots to the revs */
679 679 if (minrevallphases != -1) {
680 680 int parents[2];
681 681 for (i = minrevallphases; i < len; i++) {
682 682 if (index_get_parents(self, i, parents,
683 683 (int)len - 1) < 0)
684 684 goto release;
685 685 set_phase_from_parents(phases, parents[0], parents[1], i);
686 686 }
687 687 }
688 688 /* Transform phase list to a python list */
689 689 phasessize = PyInt_FromLong(len);
690 690 if (phasessize == NULL)
691 691 goto release;
692 692 for (i = 0; i < len; i++) {
693 693 phase = phases[i];
694 694 /* We only store the sets of phase for non public phase, the public phase
695 695 * is computed as a difference */
696 696 if (phase != 0) {
697 697 phaseset = PyList_GET_ITEM(phasessetlist, phase);
698 698 rev = PyInt_FromLong(i);
699 699 if (rev == NULL)
700 700 goto release;
701 701 PySet_Add(phaseset, rev);
702 702 Py_XDECREF(rev);
703 703 }
704 704 }
705 705 ret = PyTuple_Pack(2, phasessize, phasessetlist);
706 706
707 707 release:
708 708 Py_XDECREF(phasessize);
709 709 Py_XDECREF(phasessetlist);
710 710 done:
711 711 free(phases);
712 712 return ret;
713 713 }
714 714
715 715 static PyObject *index_headrevs(indexObject *self, PyObject *args)
716 716 {
717 717 Py_ssize_t i, j, len;
718 718 char *nothead = NULL;
719 719 PyObject *heads = NULL;
720 720 PyObject *filter = NULL;
721 721 PyObject *filteredrevs = Py_None;
722 722
723 723 if (!PyArg_ParseTuple(args, "|O", &filteredrevs)) {
724 724 return NULL;
725 725 }
726 726
727 727 if (self->headrevs && filteredrevs == self->filteredrevs)
728 728 return list_copy(self->headrevs);
729 729
730 730 Py_DECREF(self->filteredrevs);
731 731 self->filteredrevs = filteredrevs;
732 732 Py_INCREF(filteredrevs);
733 733
734 734 if (filteredrevs != Py_None) {
735 735 filter = PyObject_GetAttrString(filteredrevs, "__contains__");
736 736 if (!filter) {
737 737 PyErr_SetString(PyExc_TypeError,
738 738 "filteredrevs has no attribute __contains__");
739 739 goto bail;
740 740 }
741 741 }
742 742
743 743 len = index_length(self);
744 744 heads = PyList_New(0);
745 745 if (heads == NULL)
746 746 goto bail;
747 747 if (len == 0) {
748 748 PyObject *nullid = PyInt_FromLong(-1);
749 749 if (nullid == NULL || PyList_Append(heads, nullid) == -1) {
750 750 Py_XDECREF(nullid);
751 751 goto bail;
752 752 }
753 753 goto done;
754 754 }
755 755
756 756 nothead = calloc(len, 1);
757 757 if (nothead == NULL) {
758 758 PyErr_NoMemory();
759 759 goto bail;
760 760 }
761 761
762 762 for (i = len - 1; i >= 0; i--) {
763 763 int isfiltered;
764 764 int parents[2];
765 765
766 766 /* If nothead[i] == 1, it means we've seen an unfiltered child of this
767 767 * node already, and therefore this node is not filtered. So we can skip
768 768 * the expensive check_filter step.
769 769 */
770 770 if (nothead[i] != 1) {
771 771 isfiltered = check_filter(filter, i);
772 772 if (isfiltered == -1) {
773 773 PyErr_SetString(PyExc_TypeError,
774 774 "unable to check filter");
775 775 goto bail;
776 776 }
777 777
778 778 if (isfiltered) {
779 779 nothead[i] = 1;
780 780 continue;
781 781 }
782 782 }
783 783
784 784 if (index_get_parents(self, i, parents, (int)len - 1) < 0)
785 785 goto bail;
786 786 for (j = 0; j < 2; j++) {
787 787 if (parents[j] >= 0)
788 788 nothead[parents[j]] = 1;
789 789 }
790 790 }
791 791
792 792 for (i = 0; i < len; i++) {
793 793 PyObject *head;
794 794
795 795 if (nothead[i])
796 796 continue;
797 797 head = PyInt_FromSsize_t(i);
798 798 if (head == NULL || PyList_Append(heads, head) == -1) {
799 799 Py_XDECREF(head);
800 800 goto bail;
801 801 }
802 802 }
803 803
804 804 done:
805 805 self->headrevs = heads;
806 806 Py_XDECREF(filter);
807 807 free(nothead);
808 808 return list_copy(self->headrevs);
809 809 bail:
810 810 Py_XDECREF(filter);
811 811 Py_XDECREF(heads);
812 812 free(nothead);
813 813 return NULL;
814 814 }
815 815
816 816 /**
817 817 * Obtain the base revision index entry.
818 818 *
819 819 * Callers must ensure that rev >= 0 or illegal memory access may occur.
820 820 */
821 821 static inline int index_baserev(indexObject *self, int rev)
822 822 {
823 823 const char *data;
824 824
825 825 if (rev >= self->length - 1) {
826 826 PyObject *tuple = PyList_GET_ITEM(self->added,
827 827 rev - self->length + 1);
828 828 return (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 3));
829 829 }
830 830 else {
831 831 data = index_deref(self, rev);
832 832 if (data == NULL) {
833 833 return -2;
834 834 }
835 835
836 836 return getbe32(data + 16);
837 837 }
838 838 }
839 839
840 840 static PyObject *index_deltachain(indexObject *self, PyObject *args)
841 841 {
842 842 int rev, generaldelta;
843 843 PyObject *stoparg;
844 844 int stoprev, iterrev, baserev = -1;
845 845 int stopped;
846 846 PyObject *chain = NULL, *result = NULL;
847 847 const Py_ssize_t length = index_length(self) + 1;
848 848
849 849 if (!PyArg_ParseTuple(args, "iOi", &rev, &stoparg, &generaldelta)) {
850 850 return NULL;
851 851 }
852 852
853 853 if (PyInt_Check(stoparg)) {
854 854 stoprev = (int)PyInt_AsLong(stoparg);
855 855 if (stoprev == -1 && PyErr_Occurred()) {
856 856 return NULL;
857 857 }
858 858 }
859 859 else if (stoparg == Py_None) {
860 860 stoprev = -2;
861 861 }
862 862 else {
863 863 PyErr_SetString(PyExc_ValueError,
864 864 "stoprev must be integer or None");
865 865 return NULL;
866 866 }
867 867
868 868 if (rev < 0 || rev >= length - 1) {
869 869 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
870 870 return NULL;
871 871 }
872 872
873 873 chain = PyList_New(0);
874 874 if (chain == NULL) {
875 875 return NULL;
876 876 }
877 877
878 878 baserev = index_baserev(self, rev);
879 879
880 880 /* This should never happen. */
881 881 if (baserev <= -2) {
882 882 /* Error should be set by index_deref() */
883 883 assert(PyErr_Occurred());
884 884 goto bail;
885 885 }
886 886
887 887 iterrev = rev;
888 888
889 889 while (iterrev != baserev && iterrev != stoprev) {
890 890 PyObject *value = PyInt_FromLong(iterrev);
891 891 if (value == NULL) {
892 892 goto bail;
893 893 }
894 894 if (PyList_Append(chain, value)) {
895 895 Py_DECREF(value);
896 896 goto bail;
897 897 }
898 898 Py_DECREF(value);
899 899
900 900 if (generaldelta) {
901 901 iterrev = baserev;
902 902 }
903 903 else {
904 904 iterrev--;
905 905 }
906 906
907 907 if (iterrev < 0) {
908 908 break;
909 909 }
910 910
911 911 if (iterrev >= length - 1) {
912 912 PyErr_SetString(PyExc_IndexError, "revision outside index");
913 913 return NULL;
914 914 }
915 915
916 916 baserev = index_baserev(self, iterrev);
917 917
918 918 /* This should never happen. */
919 919 if (baserev <= -2) {
920 920 /* Error should be set by index_deref() */
921 921 assert(PyErr_Occurred());
922 922 goto bail;
923 923 }
924 924 }
925 925
926 926 if (iterrev == stoprev) {
927 927 stopped = 1;
928 928 }
929 929 else {
930 930 PyObject *value = PyInt_FromLong(iterrev);
931 931 if (value == NULL) {
932 932 goto bail;
933 933 }
934 934 if (PyList_Append(chain, value)) {
935 935 Py_DECREF(value);
936 936 goto bail;
937 937 }
938 938 Py_DECREF(value);
939 939
940 940 stopped = 0;
941 941 }
942 942
943 943 if (PyList_Reverse(chain)) {
944 944 goto bail;
945 945 }
946 946
947 947 result = Py_BuildValue("OO", chain, stopped ? Py_True : Py_False);
948 948 Py_DECREF(chain);
949 949 return result;
950 950
951 951 bail:
952 952 Py_DECREF(chain);
953 953 return NULL;
954 954 }
955 955
956 956 static inline int nt_level(const char *node, Py_ssize_t level)
957 957 {
958 958 int v = node[level>>1];
959 959 if (!(level & 1))
960 960 v >>= 4;
961 961 return v & 0xf;
962 962 }
963 963
964 964 /*
965 965 * Return values:
966 966 *
967 967 * -4: match is ambiguous (multiple candidates)
968 968 * -2: not found
969 969 * rest: valid rev
970 970 */
971 971 static int nt_find(indexObject *self, const char *node, Py_ssize_t nodelen,
972 972 int hex)
973 973 {
974 974 int (*getnybble)(const char *, Py_ssize_t) = hex ? hexdigit : nt_level;
975 975 int level, maxlevel, off;
976 976
977 977 if (nodelen == 20 && node[0] == '\0' && memcmp(node, nullid, 20) == 0)
978 978 return -1;
979 979
980 980 if (hex)
981 981 maxlevel = nodelen > 40 ? 40 : (int)nodelen;
982 982 else
983 983 maxlevel = nodelen > 20 ? 40 : ((int)nodelen * 2);
984 984
985 985 for (level = off = 0; level < maxlevel; level++) {
986 986 int k = getnybble(node, level);
987 987 nodetree *n = &self->nt[off];
988 988 int v = n->children[k];
989 989
990 990 if (v < 0) {
991 991 const char *n;
992 992 Py_ssize_t i;
993 993
994 994 v = -(v + 2);
995 995 n = index_node(self, v);
996 996 if (n == NULL)
997 997 return -2;
998 998 for (i = level; i < maxlevel; i++)
999 999 if (getnybble(node, i) != nt_level(n, i))
1000 1000 return -2;
1001 1001 return v;
1002 1002 }
1003 1003 if (v == 0)
1004 1004 return -2;
1005 1005 off = v;
1006 1006 }
1007 1007 /* multiple matches against an ambiguous prefix */
1008 1008 return -4;
1009 1009 }
1010 1010
1011 1011 static int nt_new(indexObject *self)
1012 1012 {
1013 1013 if (self->ntlength == self->ntcapacity) {
1014 1014 if (self->ntcapacity >= INT_MAX / (sizeof(nodetree) * 2)) {
1015 1015 PyErr_SetString(PyExc_MemoryError,
1016 1016 "overflow in nt_new");
1017 1017 return -1;
1018 1018 }
1019 1019 self->ntcapacity *= 2;
1020 1020 self->nt = realloc(self->nt,
1021 1021 self->ntcapacity * sizeof(nodetree));
1022 1022 if (self->nt == NULL) {
1023 1023 PyErr_SetString(PyExc_MemoryError, "out of memory");
1024 1024 return -1;
1025 1025 }
1026 1026 memset(&self->nt[self->ntlength], 0,
1027 1027 sizeof(nodetree) * (self->ntcapacity - self->ntlength));
1028 1028 }
1029 1029 return self->ntlength++;
1030 1030 }
1031 1031
1032 1032 static int nt_insert(indexObject *self, const char *node, int rev)
1033 1033 {
1034 1034 int level = 0;
1035 1035 int off = 0;
1036 1036
1037 1037 while (level < 40) {
1038 1038 int k = nt_level(node, level);
1039 1039 nodetree *n;
1040 1040 int v;
1041 1041
1042 1042 n = &self->nt[off];
1043 1043 v = n->children[k];
1044 1044
1045 1045 if (v == 0) {
1046 1046 n->children[k] = -rev - 2;
1047 1047 return 0;
1048 1048 }
1049 1049 if (v < 0) {
1050 1050 const char *oldnode = index_node_existing(self, -(v + 2));
1051 1051 int noff;
1052 1052
1053 1053 if (oldnode == NULL)
1054 1054 return -1;
1055 1055 if (!memcmp(oldnode, node, 20)) {
1056 1056 n->children[k] = -rev - 2;
1057 1057 return 0;
1058 1058 }
1059 1059 noff = nt_new(self);
1060 1060 if (noff == -1)
1061 1061 return -1;
1062 1062 /* self->nt may have been changed by realloc */
1063 1063 self->nt[off].children[k] = noff;
1064 1064 off = noff;
1065 1065 n = &self->nt[off];
1066 1066 n->children[nt_level(oldnode, ++level)] = v;
1067 1067 if (level > self->ntdepth)
1068 1068 self->ntdepth = level;
1069 1069 self->ntsplits += 1;
1070 1070 } else {
1071 1071 level += 1;
1072 1072 off = v;
1073 1073 }
1074 1074 }
1075 1075
1076 1076 return -1;
1077 1077 }
1078 1078
1079 1079 static int nt_delete_node(indexObject *self, const char *node)
1080 1080 {
1081 1081 /* rev==-2 happens to get encoded as 0, which is interpreted as not set */
1082 1082 return nt_insert(self, node, -2);
1083 1083 }
1084 1084
1085 1085 static int nt_init(indexObject *self)
1086 1086 {
1087 1087 if (self->nt == NULL) {
1088 1088 if ((size_t)self->raw_length > INT_MAX / sizeof(nodetree)) {
1089 1089 PyErr_SetString(PyExc_ValueError, "overflow in nt_init");
1090 1090 return -1;
1091 1091 }
1092 1092 self->ntcapacity = self->raw_length < 4
1093 1093 ? 4 : (int)self->raw_length / 2;
1094 1094
1095 1095 self->nt = calloc(self->ntcapacity, sizeof(nodetree));
1096 1096 if (self->nt == NULL) {
1097 1097 PyErr_NoMemory();
1098 1098 return -1;
1099 1099 }
1100 1100 self->ntlength = 1;
1101 1101 self->ntrev = (int)index_length(self);
1102 1102 self->ntlookups = 1;
1103 1103 self->ntmisses = 0;
1104 1104 if (nt_insert(self, nullid, -1) == -1) {
1105 1105 free(self->nt);
1106 1106 self->nt = NULL;
1107 1107 return -1;
1108 1108 }
1109 1109 }
1110 1110 return 0;
1111 1111 }
1112 1112
1113 1113 /*
1114 1114 * Return values:
1115 1115 *
1116 1116 * -3: error (exception set)
1117 1117 * -2: not found (no exception set)
1118 1118 * rest: valid rev
1119 1119 */
1120 1120 static int index_find_node(indexObject *self,
1121 1121 const char *node, Py_ssize_t nodelen)
1122 1122 {
1123 1123 int rev;
1124 1124
1125 1125 if (nt_init(self) == -1)
1126 1126 return -3;
1127 1127
1128 1128 self->ntlookups++;
1129 1129 rev = nt_find(self, node, nodelen, 0);
1130 1130 if (rev >= -1)
1131 1131 return rev;
1132 1132
1133 1133 /*
1134 1134 * For the first handful of lookups, we scan the entire index,
1135 1135 * and cache only the matching nodes. This optimizes for cases
1136 1136 * like "hg tip", where only a few nodes are accessed.
1137 1137 *
1138 1138 * After that, we cache every node we visit, using a single
1139 1139 * scan amortized over multiple lookups. This gives the best
1140 1140 * bulk performance, e.g. for "hg log".
1141 1141 */
1142 1142 if (self->ntmisses++ < 4) {
1143 1143 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1144 1144 const char *n = index_node_existing(self, rev);
1145 1145 if (n == NULL)
1146 1146 return -3;
1147 1147 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1148 1148 if (nt_insert(self, n, rev) == -1)
1149 1149 return -3;
1150 1150 break;
1151 1151 }
1152 1152 }
1153 1153 } else {
1154 1154 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1155 1155 const char *n = index_node_existing(self, rev);
1156 1156 if (n == NULL)
1157 1157 return -3;
1158 1158 if (nt_insert(self, n, rev) == -1) {
1159 1159 self->ntrev = rev + 1;
1160 1160 return -3;
1161 1161 }
1162 1162 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1163 1163 break;
1164 1164 }
1165 1165 }
1166 1166 self->ntrev = rev;
1167 1167 }
1168 1168
1169 1169 if (rev >= 0)
1170 1170 return rev;
1171 1171 return -2;
1172 1172 }
1173 1173
1174 1174 static void raise_revlog_error(void)
1175 1175 {
1176 1176 PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
1177 1177
1178 1178 mod = PyImport_ImportModule("mercurial.error");
1179 1179 if (mod == NULL) {
1180 1180 goto cleanup;
1181 1181 }
1182 1182
1183 1183 dict = PyModule_GetDict(mod);
1184 1184 if (dict == NULL) {
1185 1185 goto cleanup;
1186 1186 }
1187 1187 Py_INCREF(dict);
1188 1188
1189 1189 errclass = PyDict_GetItemString(dict, "RevlogError");
1190 1190 if (errclass == NULL) {
1191 1191 PyErr_SetString(PyExc_SystemError,
1192 1192 "could not find RevlogError");
1193 1193 goto cleanup;
1194 1194 }
1195 1195
1196 1196 /* value of exception is ignored by callers */
1197 1197 PyErr_SetString(errclass, "RevlogError");
1198 1198
1199 1199 cleanup:
1200 1200 Py_XDECREF(dict);
1201 1201 Py_XDECREF(mod);
1202 1202 }
1203 1203
1204 1204 static PyObject *index_getitem(indexObject *self, PyObject *value)
1205 1205 {
1206 1206 char *node;
1207 1207 int rev;
1208 1208
1209 1209 if (PyInt_Check(value))
1210 1210 return index_get(self, PyInt_AS_LONG(value));
1211 1211
1212 1212 if (node_check(value, &node) == -1)
1213 1213 return NULL;
1214 1214 rev = index_find_node(self, node, 20);
1215 1215 if (rev >= -1)
1216 1216 return PyInt_FromLong(rev);
1217 1217 if (rev == -2)
1218 1218 raise_revlog_error();
1219 1219 return NULL;
1220 1220 }
1221 1221
1222 1222 /*
1223 1223 * Fully populate the radix tree.
1224 1224 */
1225 1225 static int nt_populate(indexObject *self) {
1226 1226 int rev;
1227 1227 if (self->ntrev > 0) {
1228 1228 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1229 1229 const char *n = index_node_existing(self, rev);
1230 1230 if (n == NULL)
1231 1231 return -1;
1232 1232 if (nt_insert(self, n, rev) == -1)
1233 1233 return -1;
1234 1234 }
1235 1235 self->ntrev = -1;
1236 1236 }
1237 1237 return 0;
1238 1238 }
1239 1239
1240 1240 static int nt_partialmatch(indexObject *self, const char *node,
1241 1241 Py_ssize_t nodelen)
1242 1242 {
1243 1243 if (nt_init(self) == -1)
1244 1244 return -3;
1245 1245 if (nt_populate(self) == -1)
1246 1246 return -3;
1247 1247
1248 1248 return nt_find(self, node, nodelen, 1);
1249 1249 }
1250 1250
1251 1251 /*
1252 1252 * Find the length of the shortest unique prefix of node.
1253 1253 *
1254 1254 * Return values:
1255 1255 *
1256 1256 * -3: error (exception set)
1257 1257 * -2: not found (no exception set)
1258 1258 * rest: length of shortest prefix
1259 1259 */
1260 1260 static int nt_shortest(indexObject *self, const char *node)
1261 1261 {
1262 1262 int level, off;
1263 1263
1264 1264 if (nt_init(self) == -1)
1265 1265 return -3;
1266 1266 if (nt_populate(self) == -1)
1267 1267 return -3;
1268 1268
1269 1269 for (level = off = 0; level < 40; level++) {
1270 1270 int k, v;
1271 1271 nodetree *n = &self->nt[off];
1272 1272 k = nt_level(node, level);
1273 1273 v = n->children[k];
1274 1274 if (v < 0) {
1275 1275 const char *n;
1276 1276 v = -(v + 2);
1277 1277 n = index_node_existing(self, v);
1278 1278 if (n == NULL)
1279 1279 return -3;
1280 1280 if (memcmp(node, n, 20) != 0)
1281 1281 /*
1282 1282 * Found a unique prefix, but it wasn't for the
1283 1283 * requested node (i.e the requested node does
1284 1284 * not exist).
1285 1285 */
1286 1286 return -2;
1287 1287 return level + 1;
1288 1288 }
1289 1289 if (v == 0)
1290 1290 return -2;
1291 1291 off = v;
1292 1292 }
1293 1293 /*
1294 1294 * The node was still not unique after 40 hex digits, so this won't
1295 1295 * happen. Also, if we get here, then there's a programming error in
1296 1296 * this file that made us insert a node longer than 40 hex digits.
1297 1297 */
1298 1298 PyErr_SetString(PyExc_Exception, "broken node tree");
1299 1299 return -3;
1300 1300 }
1301 1301
1302 1302 static PyObject *index_partialmatch(indexObject *self, PyObject *args)
1303 1303 {
1304 1304 const char *fullnode;
1305 1305 int nodelen;
1306 1306 char *node;
1307 1307 int rev, i;
1308 1308
1309 1309 if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &node, &nodelen))
1310 1310 return NULL;
1311 1311
1312 1312 if (nodelen < 1) {
1313 1313 PyErr_SetString(PyExc_ValueError, "key too short");
1314 1314 return NULL;
1315 1315 }
1316 1316
1317 1317 if (nodelen > 40) {
1318 1318 PyErr_SetString(PyExc_ValueError, "key too long");
1319 1319 return NULL;
1320 1320 }
1321 1321
1322 1322 for (i = 0; i < nodelen; i++)
1323 1323 hexdigit(node, i);
1324 1324 if (PyErr_Occurred()) {
1325 1325 /* input contains non-hex characters */
1326 1326 PyErr_Clear();
1327 1327 Py_RETURN_NONE;
1328 1328 }
1329 1329
1330 1330 rev = nt_partialmatch(self, node, nodelen);
1331 1331
1332 1332 switch (rev) {
1333 1333 case -4:
1334 1334 raise_revlog_error();
1335 1335 case -3:
1336 1336 return NULL;
1337 1337 case -2:
1338 1338 Py_RETURN_NONE;
1339 1339 case -1:
1340 1340 return PyBytes_FromStringAndSize(nullid, 20);
1341 1341 }
1342 1342
1343 1343 fullnode = index_node_existing(self, rev);
1344 1344 if (fullnode == NULL) {
1345 1345 return NULL;
1346 1346 }
1347 1347 return PyBytes_FromStringAndSize(fullnode, 20);
1348 1348 }
1349 1349
1350 1350 static PyObject *index_shortest(indexObject *self, PyObject *args)
1351 1351 {
1352 1352 PyObject *val;
1353 1353 char *node;
1354 1354 int length;
1355 1355
1356 1356 if (!PyArg_ParseTuple(args, "O", &val))
1357 1357 return NULL;
1358 1358 if (node_check(val, &node) == -1)
1359 1359 return NULL;
1360 1360
1361 1361 self->ntlookups++;
1362 1362 length = nt_shortest(self, node);
1363 1363 if (length == -3)
1364 1364 return NULL;
1365 1365 if (length == -2) {
1366 1366 raise_revlog_error();
1367 1367 return NULL;
1368 1368 }
1369 1369 return PyInt_FromLong(length);
1370 1370 }
1371 1371
1372 1372 static PyObject *index_m_get(indexObject *self, PyObject *args)
1373 1373 {
1374 1374 PyObject *val;
1375 1375 char *node;
1376 1376 int rev;
1377 1377
1378 1378 if (!PyArg_ParseTuple(args, "O", &val))
1379 1379 return NULL;
1380 1380 if (node_check(val, &node) == -1)
1381 1381 return NULL;
1382 1382 rev = index_find_node(self, node, 20);
1383 1383 if (rev == -3)
1384 1384 return NULL;
1385 1385 if (rev == -2)
1386 1386 Py_RETURN_NONE;
1387 1387 return PyInt_FromLong(rev);
1388 1388 }
1389 1389
1390 1390 static int index_contains(indexObject *self, PyObject *value)
1391 1391 {
1392 1392 char *node;
1393 1393
1394 1394 if (PyInt_Check(value)) {
1395 1395 long rev = PyInt_AS_LONG(value);
1396 return rev >= -1 && rev < index_length(self) + 1;
1396 return rev >= -1 && rev < index_length(self);
1397 1397 }
1398 1398
1399 1399 if (node_check(value, &node) == -1)
1400 1400 return -1;
1401 1401
1402 1402 switch (index_find_node(self, node, 20)) {
1403 1403 case -3:
1404 1404 return -1;
1405 1405 case -2:
1406 1406 return 0;
1407 1407 default:
1408 1408 return 1;
1409 1409 }
1410 1410 }
1411 1411
1412 1412 typedef uint64_t bitmask;
1413 1413
1414 1414 /*
1415 1415 * Given a disjoint set of revs, return all candidates for the
1416 1416 * greatest common ancestor. In revset notation, this is the set
1417 1417 * "heads(::a and ::b and ...)"
1418 1418 */
1419 1419 static PyObject *find_gca_candidates(indexObject *self, const int *revs,
1420 1420 int revcount)
1421 1421 {
1422 1422 const bitmask allseen = (1ull << revcount) - 1;
1423 1423 const bitmask poison = 1ull << revcount;
1424 1424 PyObject *gca = PyList_New(0);
1425 1425 int i, v, interesting;
1426 1426 int maxrev = -1;
1427 1427 bitmask sp;
1428 1428 bitmask *seen;
1429 1429
1430 1430 if (gca == NULL)
1431 1431 return PyErr_NoMemory();
1432 1432
1433 1433 for (i = 0; i < revcount; i++) {
1434 1434 if (revs[i] > maxrev)
1435 1435 maxrev = revs[i];
1436 1436 }
1437 1437
1438 1438 seen = calloc(sizeof(*seen), maxrev + 1);
1439 1439 if (seen == NULL) {
1440 1440 Py_DECREF(gca);
1441 1441 return PyErr_NoMemory();
1442 1442 }
1443 1443
1444 1444 for (i = 0; i < revcount; i++)
1445 1445 seen[revs[i]] = 1ull << i;
1446 1446
1447 1447 interesting = revcount;
1448 1448
1449 1449 for (v = maxrev; v >= 0 && interesting; v--) {
1450 1450 bitmask sv = seen[v];
1451 1451 int parents[2];
1452 1452
1453 1453 if (!sv)
1454 1454 continue;
1455 1455
1456 1456 if (sv < poison) {
1457 1457 interesting -= 1;
1458 1458 if (sv == allseen) {
1459 1459 PyObject *obj = PyInt_FromLong(v);
1460 1460 if (obj == NULL)
1461 1461 goto bail;
1462 1462 if (PyList_Append(gca, obj) == -1) {
1463 1463 Py_DECREF(obj);
1464 1464 goto bail;
1465 1465 }
1466 1466 sv |= poison;
1467 1467 for (i = 0; i < revcount; i++) {
1468 1468 if (revs[i] == v)
1469 1469 goto done;
1470 1470 }
1471 1471 }
1472 1472 }
1473 1473 if (index_get_parents(self, v, parents, maxrev) < 0)
1474 1474 goto bail;
1475 1475
1476 1476 for (i = 0; i < 2; i++) {
1477 1477 int p = parents[i];
1478 1478 if (p == -1)
1479 1479 continue;
1480 1480 sp = seen[p];
1481 1481 if (sv < poison) {
1482 1482 if (sp == 0) {
1483 1483 seen[p] = sv;
1484 1484 interesting++;
1485 1485 }
1486 1486 else if (sp != sv)
1487 1487 seen[p] |= sv;
1488 1488 } else {
1489 1489 if (sp && sp < poison)
1490 1490 interesting--;
1491 1491 seen[p] = sv;
1492 1492 }
1493 1493 }
1494 1494 }
1495 1495
1496 1496 done:
1497 1497 free(seen);
1498 1498 return gca;
1499 1499 bail:
1500 1500 free(seen);
1501 1501 Py_XDECREF(gca);
1502 1502 return NULL;
1503 1503 }
1504 1504
1505 1505 /*
1506 1506 * Given a disjoint set of revs, return the subset with the longest
1507 1507 * path to the root.
1508 1508 */
1509 1509 static PyObject *find_deepest(indexObject *self, PyObject *revs)
1510 1510 {
1511 1511 const Py_ssize_t revcount = PyList_GET_SIZE(revs);
1512 1512 static const Py_ssize_t capacity = 24;
1513 1513 int *depth, *interesting = NULL;
1514 1514 int i, j, v, ninteresting;
1515 1515 PyObject *dict = NULL, *keys = NULL;
1516 1516 long *seen = NULL;
1517 1517 int maxrev = -1;
1518 1518 long final;
1519 1519
1520 1520 if (revcount > capacity) {
1521 1521 PyErr_Format(PyExc_OverflowError,
1522 1522 "bitset size (%ld) > capacity (%ld)",
1523 1523 (long)revcount, (long)capacity);
1524 1524 return NULL;
1525 1525 }
1526 1526
1527 1527 for (i = 0; i < revcount; i++) {
1528 1528 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
1529 1529 if (n > maxrev)
1530 1530 maxrev = n;
1531 1531 }
1532 1532
1533 1533 depth = calloc(sizeof(*depth), maxrev + 1);
1534 1534 if (depth == NULL)
1535 1535 return PyErr_NoMemory();
1536 1536
1537 1537 seen = calloc(sizeof(*seen), maxrev + 1);
1538 1538 if (seen == NULL) {
1539 1539 PyErr_NoMemory();
1540 1540 goto bail;
1541 1541 }
1542 1542
1543 1543 interesting = calloc(sizeof(*interesting), 1 << revcount);
1544 1544 if (interesting == NULL) {
1545 1545 PyErr_NoMemory();
1546 1546 goto bail;
1547 1547 }
1548 1548
1549 1549 if (PyList_Sort(revs) == -1)
1550 1550 goto bail;
1551 1551
1552 1552 for (i = 0; i < revcount; i++) {
1553 1553 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
1554 1554 long b = 1l << i;
1555 1555 depth[n] = 1;
1556 1556 seen[n] = b;
1557 1557 interesting[b] = 1;
1558 1558 }
1559 1559
1560 1560 /* invariant: ninteresting is the number of non-zero entries in
1561 1561 * interesting. */
1562 1562 ninteresting = (int)revcount;
1563 1563
1564 1564 for (v = maxrev; v >= 0 && ninteresting > 1; v--) {
1565 1565 int dv = depth[v];
1566 1566 int parents[2];
1567 1567 long sv;
1568 1568
1569 1569 if (dv == 0)
1570 1570 continue;
1571 1571
1572 1572 sv = seen[v];
1573 1573 if (index_get_parents(self, v, parents, maxrev) < 0)
1574 1574 goto bail;
1575 1575
1576 1576 for (i = 0; i < 2; i++) {
1577 1577 int p = parents[i];
1578 1578 long sp;
1579 1579 int dp;
1580 1580
1581 1581 if (p == -1)
1582 1582 continue;
1583 1583
1584 1584 dp = depth[p];
1585 1585 sp = seen[p];
1586 1586 if (dp <= dv) {
1587 1587 depth[p] = dv + 1;
1588 1588 if (sp != sv) {
1589 1589 interesting[sv] += 1;
1590 1590 seen[p] = sv;
1591 1591 if (sp) {
1592 1592 interesting[sp] -= 1;
1593 1593 if (interesting[sp] == 0)
1594 1594 ninteresting -= 1;
1595 1595 }
1596 1596 }
1597 1597 }
1598 1598 else if (dv == dp - 1) {
1599 1599 long nsp = sp | sv;
1600 1600 if (nsp == sp)
1601 1601 continue;
1602 1602 seen[p] = nsp;
1603 1603 interesting[sp] -= 1;
1604 1604 if (interesting[sp] == 0)
1605 1605 ninteresting -= 1;
1606 1606 if (interesting[nsp] == 0)
1607 1607 ninteresting += 1;
1608 1608 interesting[nsp] += 1;
1609 1609 }
1610 1610 }
1611 1611 interesting[sv] -= 1;
1612 1612 if (interesting[sv] == 0)
1613 1613 ninteresting -= 1;
1614 1614 }
1615 1615
1616 1616 final = 0;
1617 1617 j = ninteresting;
1618 1618 for (i = 0; i < (int)(2 << revcount) && j > 0; i++) {
1619 1619 if (interesting[i] == 0)
1620 1620 continue;
1621 1621 final |= i;
1622 1622 j -= 1;
1623 1623 }
1624 1624 if (final == 0) {
1625 1625 keys = PyList_New(0);
1626 1626 goto bail;
1627 1627 }
1628 1628
1629 1629 dict = PyDict_New();
1630 1630 if (dict == NULL)
1631 1631 goto bail;
1632 1632
1633 1633 for (i = 0; i < revcount; i++) {
1634 1634 PyObject *key;
1635 1635
1636 1636 if ((final & (1 << i)) == 0)
1637 1637 continue;
1638 1638
1639 1639 key = PyList_GET_ITEM(revs, i);
1640 1640 Py_INCREF(key);
1641 1641 Py_INCREF(Py_None);
1642 1642 if (PyDict_SetItem(dict, key, Py_None) == -1) {
1643 1643 Py_DECREF(key);
1644 1644 Py_DECREF(Py_None);
1645 1645 goto bail;
1646 1646 }
1647 1647 }
1648 1648
1649 1649 keys = PyDict_Keys(dict);
1650 1650
1651 1651 bail:
1652 1652 free(depth);
1653 1653 free(seen);
1654 1654 free(interesting);
1655 1655 Py_XDECREF(dict);
1656 1656
1657 1657 return keys;
1658 1658 }
1659 1659
1660 1660 /*
1661 1661 * Given a (possibly overlapping) set of revs, return all the
1662 1662 * common ancestors heads: heads(::args[0] and ::a[1] and ...)
1663 1663 */
1664 1664 static PyObject *index_commonancestorsheads(indexObject *self, PyObject *args)
1665 1665 {
1666 1666 PyObject *ret = NULL;
1667 1667 Py_ssize_t argcount, i, len;
1668 1668 bitmask repeat = 0;
1669 1669 int revcount = 0;
1670 1670 int *revs;
1671 1671
1672 1672 argcount = PySequence_Length(args);
1673 1673 revs = PyMem_Malloc(argcount * sizeof(*revs));
1674 1674 if (argcount > 0 && revs == NULL)
1675 1675 return PyErr_NoMemory();
1676 1676 len = index_length(self);
1677 1677
1678 1678 for (i = 0; i < argcount; i++) {
1679 1679 static const int capacity = 24;
1680 1680 PyObject *obj = PySequence_GetItem(args, i);
1681 1681 bitmask x;
1682 1682 long val;
1683 1683
1684 1684 if (!PyInt_Check(obj)) {
1685 1685 PyErr_SetString(PyExc_TypeError,
1686 1686 "arguments must all be ints");
1687 1687 Py_DECREF(obj);
1688 1688 goto bail;
1689 1689 }
1690 1690 val = PyInt_AsLong(obj);
1691 1691 Py_DECREF(obj);
1692 1692 if (val == -1) {
1693 1693 ret = PyList_New(0);
1694 1694 goto done;
1695 1695 }
1696 1696 if (val < 0 || val >= len) {
1697 1697 PyErr_SetString(PyExc_IndexError,
1698 1698 "index out of range");
1699 1699 goto bail;
1700 1700 }
1701 1701 /* this cheesy bloom filter lets us avoid some more
1702 1702 * expensive duplicate checks in the common set-is-disjoint
1703 1703 * case */
1704 1704 x = 1ull << (val & 0x3f);
1705 1705 if (repeat & x) {
1706 1706 int k;
1707 1707 for (k = 0; k < revcount; k++) {
1708 1708 if (val == revs[k])
1709 1709 goto duplicate;
1710 1710 }
1711 1711 }
1712 1712 else repeat |= x;
1713 1713 if (revcount >= capacity) {
1714 1714 PyErr_Format(PyExc_OverflowError,
1715 1715 "bitset size (%d) > capacity (%d)",
1716 1716 revcount, capacity);
1717 1717 goto bail;
1718 1718 }
1719 1719 revs[revcount++] = (int)val;
1720 1720 duplicate:;
1721 1721 }
1722 1722
1723 1723 if (revcount == 0) {
1724 1724 ret = PyList_New(0);
1725 1725 goto done;
1726 1726 }
1727 1727 if (revcount == 1) {
1728 1728 PyObject *obj;
1729 1729 ret = PyList_New(1);
1730 1730 if (ret == NULL)
1731 1731 goto bail;
1732 1732 obj = PyInt_FromLong(revs[0]);
1733 1733 if (obj == NULL)
1734 1734 goto bail;
1735 1735 PyList_SET_ITEM(ret, 0, obj);
1736 1736 goto done;
1737 1737 }
1738 1738
1739 1739 ret = find_gca_candidates(self, revs, revcount);
1740 1740 if (ret == NULL)
1741 1741 goto bail;
1742 1742
1743 1743 done:
1744 1744 PyMem_Free(revs);
1745 1745 return ret;
1746 1746
1747 1747 bail:
1748 1748 PyMem_Free(revs);
1749 1749 Py_XDECREF(ret);
1750 1750 return NULL;
1751 1751 }
1752 1752
1753 1753 /*
1754 1754 * Given a (possibly overlapping) set of revs, return the greatest
1755 1755 * common ancestors: those with the longest path to the root.
1756 1756 */
1757 1757 static PyObject *index_ancestors(indexObject *self, PyObject *args)
1758 1758 {
1759 1759 PyObject *ret;
1760 1760 PyObject *gca = index_commonancestorsheads(self, args);
1761 1761 if (gca == NULL)
1762 1762 return NULL;
1763 1763
1764 1764 if (PyList_GET_SIZE(gca) <= 1) {
1765 1765 return gca;
1766 1766 }
1767 1767
1768 1768 ret = find_deepest(self, gca);
1769 1769 Py_DECREF(gca);
1770 1770 return ret;
1771 1771 }
1772 1772
1773 1773 /*
1774 1774 * Invalidate any trie entries introduced by added revs.
1775 1775 */
1776 1776 static void nt_invalidate_added(indexObject *self, Py_ssize_t start)
1777 1777 {
1778 1778 Py_ssize_t i, len = PyList_GET_SIZE(self->added);
1779 1779
1780 1780 for (i = start; i < len; i++) {
1781 1781 PyObject *tuple = PyList_GET_ITEM(self->added, i);
1782 1782 PyObject *node = PyTuple_GET_ITEM(tuple, 7);
1783 1783
1784 1784 nt_delete_node(self, PyBytes_AS_STRING(node));
1785 1785 }
1786 1786
1787 1787 if (start == 0)
1788 1788 Py_CLEAR(self->added);
1789 1789 }
1790 1790
1791 1791 /*
1792 1792 * Delete a numeric range of revs, which must be at the end of the
1793 1793 * range, but exclude the sentinel nullid entry.
1794 1794 */
1795 1795 static int index_slice_del(indexObject *self, PyObject *item)
1796 1796 {
1797 1797 Py_ssize_t start, stop, step, slicelength;
1798 1798 Py_ssize_t length = index_length(self) + 1;
1799 1799 int ret = 0;
1800 1800
1801 1801 /* Argument changed from PySliceObject* to PyObject* in Python 3. */
1802 1802 #ifdef IS_PY3K
1803 1803 if (PySlice_GetIndicesEx(item, length,
1804 1804 #else
1805 1805 if (PySlice_GetIndicesEx((PySliceObject*)item, length,
1806 1806 #endif
1807 1807 &start, &stop, &step, &slicelength) < 0)
1808 1808 return -1;
1809 1809
1810 1810 if (slicelength <= 0)
1811 1811 return 0;
1812 1812
1813 1813 if ((step < 0 && start < stop) || (step > 0 && start > stop))
1814 1814 stop = start;
1815 1815
1816 1816 if (step < 0) {
1817 1817 stop = start + 1;
1818 1818 start = stop + step*(slicelength - 1) - 1;
1819 1819 step = -step;
1820 1820 }
1821 1821
1822 1822 if (step != 1) {
1823 1823 PyErr_SetString(PyExc_ValueError,
1824 1824 "revlog index delete requires step size of 1");
1825 1825 return -1;
1826 1826 }
1827 1827
1828 1828 if (stop != length - 1) {
1829 1829 PyErr_SetString(PyExc_IndexError,
1830 1830 "revlog index deletion indices are invalid");
1831 1831 return -1;
1832 1832 }
1833 1833
1834 1834 if (start < self->length - 1) {
1835 1835 if (self->nt) {
1836 1836 Py_ssize_t i;
1837 1837
1838 1838 for (i = start + 1; i < self->length - 1; i++) {
1839 1839 const char *node = index_node_existing(self, i);
1840 1840 if (node == NULL)
1841 1841 return -1;
1842 1842
1843 1843 nt_delete_node(self, node);
1844 1844 }
1845 1845 if (self->added)
1846 1846 nt_invalidate_added(self, 0);
1847 1847 if (self->ntrev > start)
1848 1848 self->ntrev = (int)start;
1849 1849 }
1850 1850 self->length = start + 1;
1851 1851 if (start < self->raw_length) {
1852 1852 if (self->cache) {
1853 1853 Py_ssize_t i;
1854 1854 for (i = start; i < self->raw_length; i++)
1855 1855 Py_CLEAR(self->cache[i]);
1856 1856 }
1857 1857 self->raw_length = start;
1858 1858 }
1859 1859 goto done;
1860 1860 }
1861 1861
1862 1862 if (self->nt) {
1863 1863 nt_invalidate_added(self, start - self->length + 1);
1864 1864 if (self->ntrev > start)
1865 1865 self->ntrev = (int)start;
1866 1866 }
1867 1867 if (self->added)
1868 1868 ret = PyList_SetSlice(self->added, start - self->length + 1,
1869 1869 PyList_GET_SIZE(self->added), NULL);
1870 1870 done:
1871 1871 Py_CLEAR(self->headrevs);
1872 1872 return ret;
1873 1873 }
1874 1874
1875 1875 /*
1876 1876 * Supported ops:
1877 1877 *
1878 1878 * slice deletion
1879 1879 * string assignment (extend node->rev mapping)
1880 1880 * string deletion (shrink node->rev mapping)
1881 1881 */
1882 1882 static int index_assign_subscript(indexObject *self, PyObject *item,
1883 1883 PyObject *value)
1884 1884 {
1885 1885 char *node;
1886 1886 long rev;
1887 1887
1888 1888 if (PySlice_Check(item) && value == NULL)
1889 1889 return index_slice_del(self, item);
1890 1890
1891 1891 if (node_check(item, &node) == -1)
1892 1892 return -1;
1893 1893
1894 1894 if (value == NULL)
1895 1895 return self->nt ? nt_delete_node(self, node) : 0;
1896 1896 rev = PyInt_AsLong(value);
1897 1897 if (rev > INT_MAX || rev < 0) {
1898 1898 if (!PyErr_Occurred())
1899 1899 PyErr_SetString(PyExc_ValueError, "rev out of range");
1900 1900 return -1;
1901 1901 }
1902 1902
1903 1903 if (nt_init(self) == -1)
1904 1904 return -1;
1905 1905 return nt_insert(self, node, (int)rev);
1906 1906 }
1907 1907
1908 1908 /*
1909 1909 * Find all RevlogNG entries in an index that has inline data. Update
1910 1910 * the optional "offsets" table with those entries.
1911 1911 */
1912 1912 static Py_ssize_t inline_scan(indexObject *self, const char **offsets)
1913 1913 {
1914 1914 const char *data = (const char *)self->buf.buf;
1915 1915 Py_ssize_t pos = 0;
1916 1916 Py_ssize_t end = self->buf.len;
1917 1917 long incr = v1_hdrsize;
1918 1918 Py_ssize_t len = 0;
1919 1919
1920 1920 while (pos + v1_hdrsize <= end && pos >= 0) {
1921 1921 uint32_t comp_len;
1922 1922 /* 3rd element of header is length of compressed inline data */
1923 1923 comp_len = getbe32(data + pos + 8);
1924 1924 incr = v1_hdrsize + comp_len;
1925 1925 if (offsets)
1926 1926 offsets[len] = data + pos;
1927 1927 len++;
1928 1928 pos += incr;
1929 1929 }
1930 1930
1931 1931 if (pos != end) {
1932 1932 if (!PyErr_Occurred())
1933 1933 PyErr_SetString(PyExc_ValueError, "corrupt index file");
1934 1934 return -1;
1935 1935 }
1936 1936
1937 1937 return len;
1938 1938 }
1939 1939
1940 1940 static int index_init(indexObject *self, PyObject *args)
1941 1941 {
1942 1942 PyObject *data_obj, *inlined_obj;
1943 1943 Py_ssize_t size;
1944 1944
1945 1945 /* Initialize before argument-checking to avoid index_dealloc() crash. */
1946 1946 self->raw_length = 0;
1947 1947 self->added = NULL;
1948 1948 self->cache = NULL;
1949 1949 self->data = NULL;
1950 1950 memset(&self->buf, 0, sizeof(self->buf));
1951 1951 self->headrevs = NULL;
1952 1952 self->filteredrevs = Py_None;
1953 1953 Py_INCREF(Py_None);
1954 1954 self->nt = NULL;
1955 1955 self->offsets = NULL;
1956 1956
1957 1957 if (!PyArg_ParseTuple(args, "OO", &data_obj, &inlined_obj))
1958 1958 return -1;
1959 1959 if (!PyObject_CheckBuffer(data_obj)) {
1960 1960 PyErr_SetString(PyExc_TypeError,
1961 1961 "data does not support buffer interface");
1962 1962 return -1;
1963 1963 }
1964 1964
1965 1965 if (PyObject_GetBuffer(data_obj, &self->buf, PyBUF_SIMPLE) == -1)
1966 1966 return -1;
1967 1967 size = self->buf.len;
1968 1968
1969 1969 self->inlined = inlined_obj && PyObject_IsTrue(inlined_obj);
1970 1970 self->data = data_obj;
1971 1971
1972 1972 self->ntlength = self->ntcapacity = 0;
1973 1973 self->ntdepth = self->ntsplits = 0;
1974 1974 self->ntlookups = self->ntmisses = 0;
1975 1975 self->ntrev = -1;
1976 1976 Py_INCREF(self->data);
1977 1977
1978 1978 if (self->inlined) {
1979 1979 Py_ssize_t len = inline_scan(self, NULL);
1980 1980 if (len == -1)
1981 1981 goto bail;
1982 1982 self->raw_length = len;
1983 1983 self->length = len + 1;
1984 1984 } else {
1985 1985 if (size % v1_hdrsize) {
1986 1986 PyErr_SetString(PyExc_ValueError, "corrupt index file");
1987 1987 goto bail;
1988 1988 }
1989 1989 self->raw_length = size / v1_hdrsize;
1990 1990 self->length = self->raw_length + 1;
1991 1991 }
1992 1992
1993 1993 return 0;
1994 1994 bail:
1995 1995 return -1;
1996 1996 }
1997 1997
1998 1998 static PyObject *index_nodemap(indexObject *self)
1999 1999 {
2000 2000 Py_INCREF(self);
2001 2001 return (PyObject *)self;
2002 2002 }
2003 2003
2004 2004 static void index_dealloc(indexObject *self)
2005 2005 {
2006 2006 _index_clearcaches(self);
2007 2007 Py_XDECREF(self->filteredrevs);
2008 2008 if (self->buf.buf) {
2009 2009 PyBuffer_Release(&self->buf);
2010 2010 memset(&self->buf, 0, sizeof(self->buf));
2011 2011 }
2012 2012 Py_XDECREF(self->data);
2013 2013 Py_XDECREF(self->added);
2014 2014 PyObject_Del(self);
2015 2015 }
2016 2016
2017 2017 static PySequenceMethods index_sequence_methods = {
2018 2018 (lenfunc)index_length, /* sq_length */
2019 2019 0, /* sq_concat */
2020 2020 0, /* sq_repeat */
2021 2021 (ssizeargfunc)index_get, /* sq_item */
2022 2022 0, /* sq_slice */
2023 2023 0, /* sq_ass_item */
2024 2024 0, /* sq_ass_slice */
2025 2025 (objobjproc)index_contains, /* sq_contains */
2026 2026 };
2027 2027
2028 2028 static PyMappingMethods index_mapping_methods = {
2029 2029 (lenfunc)index_length, /* mp_length */
2030 2030 (binaryfunc)index_getitem, /* mp_subscript */
2031 2031 (objobjargproc)index_assign_subscript, /* mp_ass_subscript */
2032 2032 };
2033 2033
2034 2034 static PyMethodDef index_methods[] = {
2035 2035 {"ancestors", (PyCFunction)index_ancestors, METH_VARARGS,
2036 2036 "return the gca set of the given revs"},
2037 2037 {"commonancestorsheads", (PyCFunction)index_commonancestorsheads,
2038 2038 METH_VARARGS,
2039 2039 "return the heads of the common ancestors of the given revs"},
2040 2040 {"clearcaches", (PyCFunction)index_clearcaches, METH_NOARGS,
2041 2041 "clear the index caches"},
2042 2042 {"get", (PyCFunction)index_m_get, METH_VARARGS,
2043 2043 "get an index entry"},
2044 2044 {"computephasesmapsets", (PyCFunction)compute_phases_map_sets,
2045 2045 METH_VARARGS, "compute phases"},
2046 2046 {"reachableroots2", (PyCFunction)reachableroots2, METH_VARARGS,
2047 2047 "reachableroots"},
2048 2048 {"headrevs", (PyCFunction)index_headrevs, METH_VARARGS,
2049 2049 "get head revisions"}, /* Can do filtering since 3.2 */
2050 2050 {"headrevsfiltered", (PyCFunction)index_headrevs, METH_VARARGS,
2051 2051 "get filtered head revisions"}, /* Can always do filtering */
2052 2052 {"deltachain", (PyCFunction)index_deltachain, METH_VARARGS,
2053 2053 "determine revisions with deltas to reconstruct fulltext"},
2054 2054 {"append", (PyCFunction)index_append, METH_O,
2055 2055 "append an index entry"},
2056 2056 {"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS,
2057 2057 "match a potentially ambiguous node ID"},
2058 2058 {"shortest", (PyCFunction)index_shortest, METH_VARARGS,
2059 2059 "find length of shortest hex nodeid of a binary ID"},
2060 2060 {"stats", (PyCFunction)index_stats, METH_NOARGS,
2061 2061 "stats for the index"},
2062 2062 {NULL} /* Sentinel */
2063 2063 };
2064 2064
2065 2065 static PyGetSetDef index_getset[] = {
2066 2066 {"nodemap", (getter)index_nodemap, NULL, "nodemap", NULL},
2067 2067 {NULL} /* Sentinel */
2068 2068 };
2069 2069
2070 2070 static PyTypeObject indexType = {
2071 2071 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2072 2072 "parsers.index", /* tp_name */
2073 2073 sizeof(indexObject), /* tp_basicsize */
2074 2074 0, /* tp_itemsize */
2075 2075 (destructor)index_dealloc, /* tp_dealloc */
2076 2076 0, /* tp_print */
2077 2077 0, /* tp_getattr */
2078 2078 0, /* tp_setattr */
2079 2079 0, /* tp_compare */
2080 2080 0, /* tp_repr */
2081 2081 0, /* tp_as_number */
2082 2082 &index_sequence_methods, /* tp_as_sequence */
2083 2083 &index_mapping_methods, /* tp_as_mapping */
2084 2084 0, /* tp_hash */
2085 2085 0, /* tp_call */
2086 2086 0, /* tp_str */
2087 2087 0, /* tp_getattro */
2088 2088 0, /* tp_setattro */
2089 2089 0, /* tp_as_buffer */
2090 2090 Py_TPFLAGS_DEFAULT, /* tp_flags */
2091 2091 "revlog index", /* tp_doc */
2092 2092 0, /* tp_traverse */
2093 2093 0, /* tp_clear */
2094 2094 0, /* tp_richcompare */
2095 2095 0, /* tp_weaklistoffset */
2096 2096 0, /* tp_iter */
2097 2097 0, /* tp_iternext */
2098 2098 index_methods, /* tp_methods */
2099 2099 0, /* tp_members */
2100 2100 index_getset, /* tp_getset */
2101 2101 0, /* tp_base */
2102 2102 0, /* tp_dict */
2103 2103 0, /* tp_descr_get */
2104 2104 0, /* tp_descr_set */
2105 2105 0, /* tp_dictoffset */
2106 2106 (initproc)index_init, /* tp_init */
2107 2107 0, /* tp_alloc */
2108 2108 };
2109 2109
2110 2110 /*
2111 2111 * returns a tuple of the form (index, index, cache) with elements as
2112 2112 * follows:
2113 2113 *
2114 2114 * index: an index object that lazily parses RevlogNG records
2115 2115 * cache: if data is inlined, a tuple (0, index_file_content), else None
2116 2116 * index_file_content could be a string, or a buffer
2117 2117 *
2118 2118 * added complications are for backwards compatibility
2119 2119 */
2120 2120 PyObject *parse_index2(PyObject *self, PyObject *args)
2121 2121 {
2122 2122 PyObject *tuple = NULL, *cache = NULL;
2123 2123 indexObject *idx;
2124 2124 int ret;
2125 2125
2126 2126 idx = PyObject_New(indexObject, &indexType);
2127 2127 if (idx == NULL)
2128 2128 goto bail;
2129 2129
2130 2130 ret = index_init(idx, args);
2131 2131 if (ret == -1)
2132 2132 goto bail;
2133 2133
2134 2134 if (idx->inlined) {
2135 2135 cache = Py_BuildValue("iO", 0, idx->data);
2136 2136 if (cache == NULL)
2137 2137 goto bail;
2138 2138 } else {
2139 2139 cache = Py_None;
2140 2140 Py_INCREF(cache);
2141 2141 }
2142 2142
2143 2143 tuple = Py_BuildValue("NN", idx, cache);
2144 2144 if (!tuple)
2145 2145 goto bail;
2146 2146 return tuple;
2147 2147
2148 2148 bail:
2149 2149 Py_XDECREF(idx);
2150 2150 Py_XDECREF(cache);
2151 2151 Py_XDECREF(tuple);
2152 2152 return NULL;
2153 2153 }
2154 2154
2155 2155 void revlog_module_init(PyObject *mod)
2156 2156 {
2157 2157 indexType.tp_new = PyType_GenericNew;
2158 2158 if (PyType_Ready(&indexType) < 0)
2159 2159 return;
2160 2160 Py_INCREF(&indexType);
2161 2161 PyModule_AddObject(mod, "index", (PyObject *)&indexType);
2162 2162
2163 2163 nullentry = Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0, 0,
2164 2164 -1, -1, -1, -1, nullid, 20);
2165 2165 if (nullentry)
2166 2166 PyObject_GC_UnTrack(nullentry);
2167 2167 }
General Comments 0
You need to be logged in to leave comments. Login now