##// END OF EJS Templates
index: avoid duplicating capacity-growth expression...
Martin von Zweigbergk -
r39106:06ff7ea4 default
parent child Browse files
Show More
@@ -1,2190 +1,2189 b''
1 1 /*
2 2 parsers.c - efficient content parsing
3 3
4 4 Copyright 2008 Matt Mackall <mpm@selenic.com> and others
5 5
6 6 This software may be used and distributed according to the terms of
7 7 the GNU General Public License, incorporated herein by reference.
8 8 */
9 9
10 10 #include <Python.h>
11 11 #include <assert.h>
12 12 #include <ctype.h>
13 13 #include <stddef.h>
14 14 #include <string.h>
15 15
16 16 #include "bitmanipulation.h"
17 17 #include "charencode.h"
18 18 #include "util.h"
19 19
20 20 #ifdef IS_PY3K
21 21 /* The mapping of Python types is meant to be temporary to get Python
22 22 * 3 to compile. We should remove this once Python 3 support is fully
23 23 * supported and proper types are used in the extensions themselves. */
24 24 #define PyInt_Check PyLong_Check
25 25 #define PyInt_FromLong PyLong_FromLong
26 26 #define PyInt_FromSsize_t PyLong_FromSsize_t
27 27 #define PyInt_AS_LONG PyLong_AS_LONG
28 28 #define PyInt_AsLong PyLong_AsLong
29 29 #endif
30 30
31 31 typedef struct indexObjectStruct indexObject;
32 32
33 33 typedef struct {
34 34 int children[16];
35 35 } nodetreenode;
36 36
37 37 /*
38 38 * A base-16 trie for fast node->rev mapping.
39 39 *
40 40 * Positive value is index of the next node in the trie
41 41 * Negative value is a leaf: -(rev + 2)
42 42 * Zero is empty
43 43 */
44 44 typedef struct {
45 45 indexObject *index;
46 46 nodetreenode *nodes;
47 47 unsigned length; /* # nodes in use */
48 48 unsigned capacity; /* # nodes allocated */
49 49 int depth; /* maximum depth of tree */
50 50 int splits; /* # splits performed */
51 51 } nodetree;
52 52
53 53 /*
54 54 * This class has two behaviors.
55 55 *
56 56 * When used in a list-like way (with integer keys), we decode an
57 57 * entry in a RevlogNG index file on demand. Our last entry is a
58 58 * sentinel, always a nullid. We have limited support for
59 59 * integer-keyed insert and delete, only at elements right before the
60 60 * sentinel.
61 61 *
62 62 * With string keys, we lazily perform a reverse mapping from node to
63 63 * rev, using a base-16 trie.
64 64 */
65 65 struct indexObjectStruct {
66 66 PyObject_HEAD
67 67 /* Type-specific fields go here. */
68 68 PyObject *data; /* raw bytes of index */
69 69 Py_buffer buf; /* buffer of data */
70 70 PyObject **cache; /* cached tuples */
71 71 const char **offsets; /* populated on demand */
72 72 Py_ssize_t raw_length; /* original number of elements */
73 73 Py_ssize_t length; /* current number of elements */
74 74 PyObject *added; /* populated on demand */
75 75 PyObject *headrevs; /* cache, invalidated on changes */
76 76 PyObject *filteredrevs;/* filtered revs set */
77 77 nodetree *nt; /* base-16 trie */
78 78 int ntrev; /* last rev scanned */
79 79 int ntlookups; /* # lookups */
80 80 int ntmisses; /* # lookups that miss the cache */
81 81 int inlined;
82 82 };
83 83
84 84 static Py_ssize_t index_length(const indexObject *self)
85 85 {
86 86 if (self->added == NULL)
87 87 return self->length;
88 88 return self->length + PyList_GET_SIZE(self->added);
89 89 }
90 90
91 91 static PyObject *nullentry;
92 92 static const char nullid[20];
93 93
94 94 static Py_ssize_t inline_scan(indexObject *self, const char **offsets);
95 95
96 96 #if LONG_MAX == 0x7fffffffL
97 97 static const char *const tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
98 98 #else
99 99 static const char *const tuple_format = PY23("kiiiiiis#", "kiiiiiiy#");
100 100 #endif
101 101
102 102 /* A RevlogNG v1 index entry is 64 bytes long. */
103 103 static const long v1_hdrsize = 64;
104 104
105 105 /*
106 106 * Return a pointer to the beginning of a RevlogNG record.
107 107 */
108 108 static const char *index_deref(indexObject *self, Py_ssize_t pos)
109 109 {
110 110 if (self->inlined && pos > 0) {
111 111 if (self->offsets == NULL) {
112 112 self->offsets = PyMem_Malloc(self->raw_length *
113 113 sizeof(*self->offsets));
114 114 if (self->offsets == NULL)
115 115 return (const char *)PyErr_NoMemory();
116 116 inline_scan(self, self->offsets);
117 117 }
118 118 return self->offsets[pos];
119 119 }
120 120
121 121 return (const char *)(self->buf.buf) + pos * v1_hdrsize;
122 122 }
123 123
124 124 static inline int index_get_parents(indexObject *self, Py_ssize_t rev,
125 125 int *ps, int maxrev)
126 126 {
127 127 if (rev >= self->length) {
128 128 PyObject *tuple = PyList_GET_ITEM(self->added, rev - self->length);
129 129 ps[0] = (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 5));
130 130 ps[1] = (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 6));
131 131 } else {
132 132 const char *data = index_deref(self, rev);
133 133 ps[0] = getbe32(data + 24);
134 134 ps[1] = getbe32(data + 28);
135 135 }
136 136 /* If index file is corrupted, ps[] may point to invalid revisions. So
137 137 * there is a risk of buffer overflow to trust them unconditionally. */
138 138 if (ps[0] > maxrev || ps[1] > maxrev) {
139 139 PyErr_SetString(PyExc_ValueError, "parent out of range");
140 140 return -1;
141 141 }
142 142 return 0;
143 143 }
144 144
145 145
146 146 /*
147 147 * RevlogNG format (all in big endian, data may be inlined):
148 148 * 6 bytes: offset
149 149 * 2 bytes: flags
150 150 * 4 bytes: compressed length
151 151 * 4 bytes: uncompressed length
152 152 * 4 bytes: base revision
153 153 * 4 bytes: link revision
154 154 * 4 bytes: parent 1 revision
155 155 * 4 bytes: parent 2 revision
156 156 * 32 bytes: nodeid (only 20 bytes used)
157 157 */
158 158 static PyObject *index_get(indexObject *self, Py_ssize_t pos)
159 159 {
160 160 uint64_t offset_flags;
161 161 int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
162 162 const char *c_node_id;
163 163 const char *data;
164 164 Py_ssize_t length = index_length(self);
165 165 PyObject *entry;
166 166
167 167 if (pos == -1) {
168 168 Py_INCREF(nullentry);
169 169 return nullentry;
170 170 }
171 171
172 172 if (pos < 0 || pos >= length) {
173 173 PyErr_SetString(PyExc_IndexError, "revlog index out of range");
174 174 return NULL;
175 175 }
176 176
177 177 if (pos >= self->length) {
178 178 PyObject *obj;
179 179 obj = PyList_GET_ITEM(self->added, pos - self->length);
180 180 Py_INCREF(obj);
181 181 return obj;
182 182 }
183 183
184 184 if (self->cache) {
185 185 if (self->cache[pos]) {
186 186 Py_INCREF(self->cache[pos]);
187 187 return self->cache[pos];
188 188 }
189 189 } else {
190 190 self->cache = calloc(self->raw_length, sizeof(PyObject *));
191 191 if (self->cache == NULL)
192 192 return PyErr_NoMemory();
193 193 }
194 194
195 195 data = index_deref(self, pos);
196 196 if (data == NULL)
197 197 return NULL;
198 198
199 199 offset_flags = getbe32(data + 4);
200 200 if (pos == 0) /* mask out version number for the first entry */
201 201 offset_flags &= 0xFFFF;
202 202 else {
203 203 uint32_t offset_high = getbe32(data);
204 204 offset_flags |= ((uint64_t)offset_high) << 32;
205 205 }
206 206
207 207 comp_len = getbe32(data + 8);
208 208 uncomp_len = getbe32(data + 12);
209 209 base_rev = getbe32(data + 16);
210 210 link_rev = getbe32(data + 20);
211 211 parent_1 = getbe32(data + 24);
212 212 parent_2 = getbe32(data + 28);
213 213 c_node_id = data + 32;
214 214
215 215 entry = Py_BuildValue(tuple_format, offset_flags, comp_len,
216 216 uncomp_len, base_rev, link_rev,
217 217 parent_1, parent_2, c_node_id, 20);
218 218
219 219 if (entry) {
220 220 PyObject_GC_UnTrack(entry);
221 221 Py_INCREF(entry);
222 222 }
223 223
224 224 self->cache[pos] = entry;
225 225
226 226 return entry;
227 227 }
228 228
229 229 /*
230 230 * Return the 20-byte SHA of the node corresponding to the given rev.
231 231 */
232 232 static const char *index_node(indexObject *self, Py_ssize_t pos)
233 233 {
234 234 Py_ssize_t length = index_length(self);
235 235 const char *data;
236 236
237 237 if (pos == -1)
238 238 return nullid;
239 239
240 240 if (pos >= length)
241 241 return NULL;
242 242
243 243 if (pos >= self->length) {
244 244 PyObject *tuple, *str;
245 245 tuple = PyList_GET_ITEM(self->added, pos - self->length);
246 246 str = PyTuple_GetItem(tuple, 7);
247 247 return str ? PyBytes_AS_STRING(str) : NULL;
248 248 }
249 249
250 250 data = index_deref(self, pos);
251 251 return data ? data + 32 : NULL;
252 252 }
253 253
254 254 /*
255 255 * Return the 20-byte SHA of the node corresponding to the given rev. The
256 256 * rev is assumed to be existing. If not, an exception is set.
257 257 */
258 258 static const char *index_node_existing(indexObject *self, Py_ssize_t pos)
259 259 {
260 260 const char *node = index_node(self, pos);
261 261 if (node == NULL) {
262 262 PyErr_Format(PyExc_IndexError, "could not access rev %d",
263 263 (int)pos);
264 264 }
265 265 return node;
266 266 }
267 267
268 268 static int nt_insert(nodetree *self, const char *node, int rev);
269 269
270 270 static int node_check(PyObject *obj, char **node)
271 271 {
272 272 Py_ssize_t nodelen;
273 273 if (PyBytes_AsStringAndSize(obj, node, &nodelen) == -1)
274 274 return -1;
275 275 if (nodelen == 20)
276 276 return 0;
277 277 PyErr_SetString(PyExc_ValueError, "20-byte hash required");
278 278 return -1;
279 279 }
280 280
281 281 static PyObject *index_append(indexObject *self, PyObject *obj)
282 282 {
283 283 char *node;
284 284 Py_ssize_t len;
285 285
286 286 if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj) != 8) {
287 287 PyErr_SetString(PyExc_TypeError, "8-tuple required");
288 288 return NULL;
289 289 }
290 290
291 291 if (node_check(PyTuple_GET_ITEM(obj, 7), &node) == -1)
292 292 return NULL;
293 293
294 294 len = index_length(self);
295 295
296 296 if (self->added == NULL) {
297 297 self->added = PyList_New(0);
298 298 if (self->added == NULL)
299 299 return NULL;
300 300 }
301 301
302 302 if (PyList_Append(self->added, obj) == -1)
303 303 return NULL;
304 304
305 305 if (self->nt)
306 306 nt_insert(self->nt, node, len);
307 307
308 308 Py_CLEAR(self->headrevs);
309 309 Py_RETURN_NONE;
310 310 }
311 311
312 312 static PyObject *index_stats(indexObject *self)
313 313 {
314 314 PyObject *obj = PyDict_New();
315 315 PyObject *t = NULL;
316 316
317 317 if (obj == NULL)
318 318 return NULL;
319 319
320 320 #define istat(__n, __d) \
321 321 do { \
322 322 t = PyInt_FromSsize_t(self->__n); \
323 323 if (!t) \
324 324 goto bail; \
325 325 if (PyDict_SetItemString(obj, __d, t) == -1) \
326 326 goto bail; \
327 327 Py_DECREF(t); \
328 328 } while (0)
329 329
330 330 if (self->added) {
331 331 Py_ssize_t len = PyList_GET_SIZE(self->added);
332 332 t = PyInt_FromSsize_t(len);
333 333 if (!t)
334 334 goto bail;
335 335 if (PyDict_SetItemString(obj, "index entries added", t) == -1)
336 336 goto bail;
337 337 Py_DECREF(t);
338 338 }
339 339
340 340 if (self->raw_length != self->length)
341 341 istat(raw_length, "revs on disk");
342 342 istat(length, "revs in memory");
343 343 istat(ntlookups, "node trie lookups");
344 344 istat(ntmisses, "node trie misses");
345 345 istat(ntrev, "node trie last rev scanned");
346 346 if (self->nt) {
347 347 istat(nt->capacity, "node trie capacity");
348 348 istat(nt->depth, "node trie depth");
349 349 istat(nt->length, "node trie count");
350 350 istat(nt->splits, "node trie splits");
351 351 }
352 352
353 353 #undef istat
354 354
355 355 return obj;
356 356
357 357 bail:
358 358 Py_XDECREF(obj);
359 359 Py_XDECREF(t);
360 360 return NULL;
361 361 }
362 362
363 363 /*
364 364 * When we cache a list, we want to be sure the caller can't mutate
365 365 * the cached copy.
366 366 */
367 367 static PyObject *list_copy(PyObject *list)
368 368 {
369 369 Py_ssize_t len = PyList_GET_SIZE(list);
370 370 PyObject *newlist = PyList_New(len);
371 371 Py_ssize_t i;
372 372
373 373 if (newlist == NULL)
374 374 return NULL;
375 375
376 376 for (i = 0; i < len; i++) {
377 377 PyObject *obj = PyList_GET_ITEM(list, i);
378 378 Py_INCREF(obj);
379 379 PyList_SET_ITEM(newlist, i, obj);
380 380 }
381 381
382 382 return newlist;
383 383 }
384 384
385 385 static int check_filter(PyObject *filter, Py_ssize_t arg)
386 386 {
387 387 if (filter) {
388 388 PyObject *arglist, *result;
389 389 int isfiltered;
390 390
391 391 arglist = Py_BuildValue("(n)", arg);
392 392 if (!arglist) {
393 393 return -1;
394 394 }
395 395
396 396 result = PyEval_CallObject(filter, arglist);
397 397 Py_DECREF(arglist);
398 398 if (!result) {
399 399 return -1;
400 400 }
401 401
402 402 /* PyObject_IsTrue returns 1 if true, 0 if false, -1 if error,
403 403 * same as this function, so we can just return it directly.*/
404 404 isfiltered = PyObject_IsTrue(result);
405 405 Py_DECREF(result);
406 406 return isfiltered;
407 407 } else {
408 408 return 0;
409 409 }
410 410 }
411 411
412 412 static Py_ssize_t add_roots_get_min(indexObject *self, PyObject *list,
413 413 Py_ssize_t marker, char *phases)
414 414 {
415 415 PyObject *iter = NULL;
416 416 PyObject *iter_item = NULL;
417 417 Py_ssize_t min_idx = index_length(self) + 2;
418 418 long iter_item_long;
419 419
420 420 if (PyList_GET_SIZE(list) != 0) {
421 421 iter = PyObject_GetIter(list);
422 422 if (iter == NULL)
423 423 return -2;
424 424 while ((iter_item = PyIter_Next(iter))) {
425 425 iter_item_long = PyInt_AS_LONG(iter_item);
426 426 Py_DECREF(iter_item);
427 427 if (iter_item_long < min_idx)
428 428 min_idx = iter_item_long;
429 429 phases[iter_item_long] = marker;
430 430 }
431 431 Py_DECREF(iter);
432 432 }
433 433
434 434 return min_idx;
435 435 }
436 436
437 437 static inline void set_phase_from_parents(char *phases, int parent_1,
438 438 int parent_2, Py_ssize_t i)
439 439 {
440 440 if (parent_1 >= 0 && phases[parent_1] > phases[i])
441 441 phases[i] = phases[parent_1];
442 442 if (parent_2 >= 0 && phases[parent_2] > phases[i])
443 443 phases[i] = phases[parent_2];
444 444 }
445 445
446 446 static PyObject *reachableroots2(indexObject *self, PyObject *args)
447 447 {
448 448
449 449 /* Input */
450 450 long minroot;
451 451 PyObject *includepatharg = NULL;
452 452 int includepath = 0;
453 453 /* heads and roots are lists */
454 454 PyObject *heads = NULL;
455 455 PyObject *roots = NULL;
456 456 PyObject *reachable = NULL;
457 457
458 458 PyObject *val;
459 459 Py_ssize_t len = index_length(self);
460 460 long revnum;
461 461 Py_ssize_t k;
462 462 Py_ssize_t i;
463 463 Py_ssize_t l;
464 464 int r;
465 465 int parents[2];
466 466
467 467 /* Internal data structure:
468 468 * tovisit: array of length len+1 (all revs + nullrev), filled upto lentovisit
469 469 * revstates: array of length len+1 (all revs + nullrev) */
470 470 int *tovisit = NULL;
471 471 long lentovisit = 0;
472 472 enum { RS_SEEN = 1, RS_ROOT = 2, RS_REACHABLE = 4 };
473 473 char *revstates = NULL;
474 474
475 475 /* Get arguments */
476 476 if (!PyArg_ParseTuple(args, "lO!O!O!", &minroot, &PyList_Type, &heads,
477 477 &PyList_Type, &roots,
478 478 &PyBool_Type, &includepatharg))
479 479 goto bail;
480 480
481 481 if (includepatharg == Py_True)
482 482 includepath = 1;
483 483
484 484 /* Initialize return set */
485 485 reachable = PyList_New(0);
486 486 if (reachable == NULL)
487 487 goto bail;
488 488
489 489 /* Initialize internal datastructures */
490 490 tovisit = (int *)malloc((len + 1) * sizeof(int));
491 491 if (tovisit == NULL) {
492 492 PyErr_NoMemory();
493 493 goto bail;
494 494 }
495 495
496 496 revstates = (char *)calloc(len + 1, 1);
497 497 if (revstates == NULL) {
498 498 PyErr_NoMemory();
499 499 goto bail;
500 500 }
501 501
502 502 l = PyList_GET_SIZE(roots);
503 503 for (i = 0; i < l; i++) {
504 504 revnum = PyInt_AsLong(PyList_GET_ITEM(roots, i));
505 505 if (revnum == -1 && PyErr_Occurred())
506 506 goto bail;
507 507 /* If root is out of range, e.g. wdir(), it must be unreachable
508 508 * from heads. So we can just ignore it. */
509 509 if (revnum + 1 < 0 || revnum + 1 >= len + 1)
510 510 continue;
511 511 revstates[revnum + 1] |= RS_ROOT;
512 512 }
513 513
514 514 /* Populate tovisit with all the heads */
515 515 l = PyList_GET_SIZE(heads);
516 516 for (i = 0; i < l; i++) {
517 517 revnum = PyInt_AsLong(PyList_GET_ITEM(heads, i));
518 518 if (revnum == -1 && PyErr_Occurred())
519 519 goto bail;
520 520 if (revnum + 1 < 0 || revnum + 1 >= len + 1) {
521 521 PyErr_SetString(PyExc_IndexError, "head out of range");
522 522 goto bail;
523 523 }
524 524 if (!(revstates[revnum + 1] & RS_SEEN)) {
525 525 tovisit[lentovisit++] = (int)revnum;
526 526 revstates[revnum + 1] |= RS_SEEN;
527 527 }
528 528 }
529 529
530 530 /* Visit the tovisit list and find the reachable roots */
531 531 k = 0;
532 532 while (k < lentovisit) {
533 533 /* Add the node to reachable if it is a root*/
534 534 revnum = tovisit[k++];
535 535 if (revstates[revnum + 1] & RS_ROOT) {
536 536 revstates[revnum + 1] |= RS_REACHABLE;
537 537 val = PyInt_FromLong(revnum);
538 538 if (val == NULL)
539 539 goto bail;
540 540 r = PyList_Append(reachable, val);
541 541 Py_DECREF(val);
542 542 if (r < 0)
543 543 goto bail;
544 544 if (includepath == 0)
545 545 continue;
546 546 }
547 547
548 548 /* Add its parents to the list of nodes to visit */
549 549 if (revnum == -1)
550 550 continue;
551 551 r = index_get_parents(self, revnum, parents, (int)len - 1);
552 552 if (r < 0)
553 553 goto bail;
554 554 for (i = 0; i < 2; i++) {
555 555 if (!(revstates[parents[i] + 1] & RS_SEEN)
556 556 && parents[i] >= minroot) {
557 557 tovisit[lentovisit++] = parents[i];
558 558 revstates[parents[i] + 1] |= RS_SEEN;
559 559 }
560 560 }
561 561 }
562 562
563 563 /* Find all the nodes in between the roots we found and the heads
564 564 * and add them to the reachable set */
565 565 if (includepath == 1) {
566 566 long minidx = minroot;
567 567 if (minidx < 0)
568 568 minidx = 0;
569 569 for (i = minidx; i < len; i++) {
570 570 if (!(revstates[i + 1] & RS_SEEN))
571 571 continue;
572 572 r = index_get_parents(self, i, parents, (int)len - 1);
573 573 /* Corrupted index file, error is set from
574 574 * index_get_parents */
575 575 if (r < 0)
576 576 goto bail;
577 577 if (((revstates[parents[0] + 1] |
578 578 revstates[parents[1] + 1]) & RS_REACHABLE)
579 579 && !(revstates[i + 1] & RS_REACHABLE)) {
580 580 revstates[i + 1] |= RS_REACHABLE;
581 581 val = PyInt_FromLong(i);
582 582 if (val == NULL)
583 583 goto bail;
584 584 r = PyList_Append(reachable, val);
585 585 Py_DECREF(val);
586 586 if (r < 0)
587 587 goto bail;
588 588 }
589 589 }
590 590 }
591 591
592 592 free(revstates);
593 593 free(tovisit);
594 594 return reachable;
595 595 bail:
596 596 Py_XDECREF(reachable);
597 597 free(revstates);
598 598 free(tovisit);
599 599 return NULL;
600 600 }
601 601
602 602 static PyObject *compute_phases_map_sets(indexObject *self, PyObject *args)
603 603 {
604 604 PyObject *roots = Py_None;
605 605 PyObject *ret = NULL;
606 606 PyObject *phasessize = NULL;
607 607 PyObject *phaseroots = NULL;
608 608 PyObject *phaseset = NULL;
609 609 PyObject *phasessetlist = NULL;
610 610 PyObject *rev = NULL;
611 611 Py_ssize_t len = index_length(self);
612 612 Py_ssize_t numphase = 0;
613 613 Py_ssize_t minrevallphases = 0;
614 614 Py_ssize_t minrevphase = 0;
615 615 Py_ssize_t i = 0;
616 616 char *phases = NULL;
617 617 long phase;
618 618
619 619 if (!PyArg_ParseTuple(args, "O", &roots))
620 620 goto done;
621 621 if (roots == NULL || !PyList_Check(roots)) {
622 622 PyErr_SetString(PyExc_TypeError, "roots must be a list");
623 623 goto done;
624 624 }
625 625
626 626 phases = calloc(len, 1); /* phase per rev: {0: public, 1: draft, 2: secret} */
627 627 if (phases == NULL) {
628 628 PyErr_NoMemory();
629 629 goto done;
630 630 }
631 631 /* Put the phase information of all the roots in phases */
632 632 numphase = PyList_GET_SIZE(roots)+1;
633 633 minrevallphases = len + 1;
634 634 phasessetlist = PyList_New(numphase);
635 635 if (phasessetlist == NULL)
636 636 goto done;
637 637
638 638 PyList_SET_ITEM(phasessetlist, 0, Py_None);
639 639 Py_INCREF(Py_None);
640 640
641 641 for (i = 0; i < numphase-1; i++) {
642 642 phaseroots = PyList_GET_ITEM(roots, i);
643 643 phaseset = PySet_New(NULL);
644 644 if (phaseset == NULL)
645 645 goto release;
646 646 PyList_SET_ITEM(phasessetlist, i+1, phaseset);
647 647 if (!PyList_Check(phaseroots)) {
648 648 PyErr_SetString(PyExc_TypeError,
649 649 "roots item must be a list");
650 650 goto release;
651 651 }
652 652 minrevphase = add_roots_get_min(self, phaseroots, i+1, phases);
653 653 if (minrevphase == -2) /* Error from add_roots_get_min */
654 654 goto release;
655 655 minrevallphases = MIN(minrevallphases, minrevphase);
656 656 }
657 657 /* Propagate the phase information from the roots to the revs */
658 658 if (minrevallphases != -1) {
659 659 int parents[2];
660 660 for (i = minrevallphases; i < len; i++) {
661 661 if (index_get_parents(self, i, parents,
662 662 (int)len - 1) < 0)
663 663 goto release;
664 664 set_phase_from_parents(phases, parents[0], parents[1], i);
665 665 }
666 666 }
667 667 /* Transform phase list to a python list */
668 668 phasessize = PyInt_FromLong(len);
669 669 if (phasessize == NULL)
670 670 goto release;
671 671 for (i = 0; i < len; i++) {
672 672 phase = phases[i];
673 673 /* We only store the sets of phase for non public phase, the public phase
674 674 * is computed as a difference */
675 675 if (phase != 0) {
676 676 phaseset = PyList_GET_ITEM(phasessetlist, phase);
677 677 rev = PyInt_FromLong(i);
678 678 if (rev == NULL)
679 679 goto release;
680 680 PySet_Add(phaseset, rev);
681 681 Py_XDECREF(rev);
682 682 }
683 683 }
684 684 ret = PyTuple_Pack(2, phasessize, phasessetlist);
685 685
686 686 release:
687 687 Py_XDECREF(phasessize);
688 688 Py_XDECREF(phasessetlist);
689 689 done:
690 690 free(phases);
691 691 return ret;
692 692 }
693 693
694 694 static PyObject *index_headrevs(indexObject *self, PyObject *args)
695 695 {
696 696 Py_ssize_t i, j, len;
697 697 char *nothead = NULL;
698 698 PyObject *heads = NULL;
699 699 PyObject *filter = NULL;
700 700 PyObject *filteredrevs = Py_None;
701 701
702 702 if (!PyArg_ParseTuple(args, "|O", &filteredrevs)) {
703 703 return NULL;
704 704 }
705 705
706 706 if (self->headrevs && filteredrevs == self->filteredrevs)
707 707 return list_copy(self->headrevs);
708 708
709 709 Py_DECREF(self->filteredrevs);
710 710 self->filteredrevs = filteredrevs;
711 711 Py_INCREF(filteredrevs);
712 712
713 713 if (filteredrevs != Py_None) {
714 714 filter = PyObject_GetAttrString(filteredrevs, "__contains__");
715 715 if (!filter) {
716 716 PyErr_SetString(PyExc_TypeError,
717 717 "filteredrevs has no attribute __contains__");
718 718 goto bail;
719 719 }
720 720 }
721 721
722 722 len = index_length(self);
723 723 heads = PyList_New(0);
724 724 if (heads == NULL)
725 725 goto bail;
726 726 if (len == 0) {
727 727 PyObject *nullid = PyInt_FromLong(-1);
728 728 if (nullid == NULL || PyList_Append(heads, nullid) == -1) {
729 729 Py_XDECREF(nullid);
730 730 goto bail;
731 731 }
732 732 goto done;
733 733 }
734 734
735 735 nothead = calloc(len, 1);
736 736 if (nothead == NULL) {
737 737 PyErr_NoMemory();
738 738 goto bail;
739 739 }
740 740
741 741 for (i = len - 1; i >= 0; i--) {
742 742 int isfiltered;
743 743 int parents[2];
744 744
745 745 /* If nothead[i] == 1, it means we've seen an unfiltered child of this
746 746 * node already, and therefore this node is not filtered. So we can skip
747 747 * the expensive check_filter step.
748 748 */
749 749 if (nothead[i] != 1) {
750 750 isfiltered = check_filter(filter, i);
751 751 if (isfiltered == -1) {
752 752 PyErr_SetString(PyExc_TypeError,
753 753 "unable to check filter");
754 754 goto bail;
755 755 }
756 756
757 757 if (isfiltered) {
758 758 nothead[i] = 1;
759 759 continue;
760 760 }
761 761 }
762 762
763 763 if (index_get_parents(self, i, parents, (int)len - 1) < 0)
764 764 goto bail;
765 765 for (j = 0; j < 2; j++) {
766 766 if (parents[j] >= 0)
767 767 nothead[parents[j]] = 1;
768 768 }
769 769 }
770 770
771 771 for (i = 0; i < len; i++) {
772 772 PyObject *head;
773 773
774 774 if (nothead[i])
775 775 continue;
776 776 head = PyInt_FromSsize_t(i);
777 777 if (head == NULL || PyList_Append(heads, head) == -1) {
778 778 Py_XDECREF(head);
779 779 goto bail;
780 780 }
781 781 }
782 782
783 783 done:
784 784 self->headrevs = heads;
785 785 Py_XDECREF(filter);
786 786 free(nothead);
787 787 return list_copy(self->headrevs);
788 788 bail:
789 789 Py_XDECREF(filter);
790 790 Py_XDECREF(heads);
791 791 free(nothead);
792 792 return NULL;
793 793 }
794 794
795 795 /**
796 796 * Obtain the base revision index entry.
797 797 *
798 798 * Callers must ensure that rev >= 0 or illegal memory access may occur.
799 799 */
800 800 static inline int index_baserev(indexObject *self, int rev)
801 801 {
802 802 const char *data;
803 803
804 804 if (rev >= self->length) {
805 805 PyObject *tuple = PyList_GET_ITEM(self->added, rev - self->length);
806 806 return (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 3));
807 807 }
808 808 else {
809 809 data = index_deref(self, rev);
810 810 if (data == NULL) {
811 811 return -2;
812 812 }
813 813
814 814 return getbe32(data + 16);
815 815 }
816 816 }
817 817
818 818 static PyObject *index_deltachain(indexObject *self, PyObject *args)
819 819 {
820 820 int rev, generaldelta;
821 821 PyObject *stoparg;
822 822 int stoprev, iterrev, baserev = -1;
823 823 int stopped;
824 824 PyObject *chain = NULL, *result = NULL;
825 825 const Py_ssize_t length = index_length(self);
826 826
827 827 if (!PyArg_ParseTuple(args, "iOi", &rev, &stoparg, &generaldelta)) {
828 828 return NULL;
829 829 }
830 830
831 831 if (PyInt_Check(stoparg)) {
832 832 stoprev = (int)PyInt_AsLong(stoparg);
833 833 if (stoprev == -1 && PyErr_Occurred()) {
834 834 return NULL;
835 835 }
836 836 }
837 837 else if (stoparg == Py_None) {
838 838 stoprev = -2;
839 839 }
840 840 else {
841 841 PyErr_SetString(PyExc_ValueError,
842 842 "stoprev must be integer or None");
843 843 return NULL;
844 844 }
845 845
846 846 if (rev < 0 || rev >= length) {
847 847 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
848 848 return NULL;
849 849 }
850 850
851 851 chain = PyList_New(0);
852 852 if (chain == NULL) {
853 853 return NULL;
854 854 }
855 855
856 856 baserev = index_baserev(self, rev);
857 857
858 858 /* This should never happen. */
859 859 if (baserev <= -2) {
860 860 /* Error should be set by index_deref() */
861 861 assert(PyErr_Occurred());
862 862 goto bail;
863 863 }
864 864
865 865 iterrev = rev;
866 866
867 867 while (iterrev != baserev && iterrev != stoprev) {
868 868 PyObject *value = PyInt_FromLong(iterrev);
869 869 if (value == NULL) {
870 870 goto bail;
871 871 }
872 872 if (PyList_Append(chain, value)) {
873 873 Py_DECREF(value);
874 874 goto bail;
875 875 }
876 876 Py_DECREF(value);
877 877
878 878 if (generaldelta) {
879 879 iterrev = baserev;
880 880 }
881 881 else {
882 882 iterrev--;
883 883 }
884 884
885 885 if (iterrev < 0) {
886 886 break;
887 887 }
888 888
889 889 if (iterrev >= length) {
890 890 PyErr_SetString(PyExc_IndexError, "revision outside index");
891 891 return NULL;
892 892 }
893 893
894 894 baserev = index_baserev(self, iterrev);
895 895
896 896 /* This should never happen. */
897 897 if (baserev <= -2) {
898 898 /* Error should be set by index_deref() */
899 899 assert(PyErr_Occurred());
900 900 goto bail;
901 901 }
902 902 }
903 903
904 904 if (iterrev == stoprev) {
905 905 stopped = 1;
906 906 }
907 907 else {
908 908 PyObject *value = PyInt_FromLong(iterrev);
909 909 if (value == NULL) {
910 910 goto bail;
911 911 }
912 912 if (PyList_Append(chain, value)) {
913 913 Py_DECREF(value);
914 914 goto bail;
915 915 }
916 916 Py_DECREF(value);
917 917
918 918 stopped = 0;
919 919 }
920 920
921 921 if (PyList_Reverse(chain)) {
922 922 goto bail;
923 923 }
924 924
925 925 result = Py_BuildValue("OO", chain, stopped ? Py_True : Py_False);
926 926 Py_DECREF(chain);
927 927 return result;
928 928
929 929 bail:
930 930 Py_DECREF(chain);
931 931 return NULL;
932 932 }
933 933
934 934 static inline int nt_level(const char *node, Py_ssize_t level)
935 935 {
936 936 int v = node[level>>1];
937 937 if (!(level & 1))
938 938 v >>= 4;
939 939 return v & 0xf;
940 940 }
941 941
942 942 /*
943 943 * Return values:
944 944 *
945 945 * -4: match is ambiguous (multiple candidates)
946 946 * -2: not found
947 947 * rest: valid rev
948 948 */
949 949 static int nt_find(nodetree *self, const char *node, Py_ssize_t nodelen,
950 950 int hex)
951 951 {
952 952 int (*getnybble)(const char *, Py_ssize_t) = hex ? hexdigit : nt_level;
953 953 int level, maxlevel, off;
954 954
955 955 if (nodelen == 20 && node[0] == '\0' && memcmp(node, nullid, 20) == 0)
956 956 return -1;
957 957
958 958 if (hex)
959 959 maxlevel = nodelen > 40 ? 40 : (int)nodelen;
960 960 else
961 961 maxlevel = nodelen > 20 ? 40 : ((int)nodelen * 2);
962 962
963 963 for (level = off = 0; level < maxlevel; level++) {
964 964 int k = getnybble(node, level);
965 965 nodetreenode *n = &self->nodes[off];
966 966 int v = n->children[k];
967 967
968 968 if (v < 0) {
969 969 const char *n;
970 970 Py_ssize_t i;
971 971
972 972 v = -(v + 2);
973 973 n = index_node(self->index, v);
974 974 if (n == NULL)
975 975 return -2;
976 976 for (i = level; i < maxlevel; i++)
977 977 if (getnybble(node, i) != nt_level(n, i))
978 978 return -2;
979 979 return v;
980 980 }
981 981 if (v == 0)
982 982 return -2;
983 983 off = v;
984 984 }
985 985 /* multiple matches against an ambiguous prefix */
986 986 return -4;
987 987 }
988 988
989 989 static int nt_new(nodetree *self)
990 990 {
991 991 if (self->length == self->capacity) {
992 992 unsigned newcapacity;
993 993 nodetreenode *newnodes;
994 if (self->capacity >= INT_MAX / (sizeof(nodetreenode) * 2)) {
995 PyErr_SetString(PyExc_MemoryError,
996 "overflow in nt_new");
994 newcapacity = self->capacity * 2;
995 if (newcapacity >= INT_MAX / sizeof(nodetreenode)) {
996 PyErr_SetString(PyExc_MemoryError, "overflow in nt_new");
997 997 return -1;
998 998 }
999 newcapacity = self->capacity * 2;
1000 999 newnodes = realloc(self->nodes, newcapacity * sizeof(nodetreenode));
1001 1000 if (newnodes == NULL) {
1002 1001 PyErr_SetString(PyExc_MemoryError, "out of memory");
1003 1002 return -1;
1004 1003 }
1005 1004 self->capacity = newcapacity;
1006 1005 self->nodes = newnodes;
1007 1006 memset(&self->nodes[self->length], 0,
1008 1007 sizeof(nodetreenode) * (self->capacity - self->length));
1009 1008 }
1010 1009 return self->length++;
1011 1010 }
1012 1011
1013 1012 static int nt_insert(nodetree *self, const char *node, int rev)
1014 1013 {
1015 1014 int level = 0;
1016 1015 int off = 0;
1017 1016
1018 1017 while (level < 40) {
1019 1018 int k = nt_level(node, level);
1020 1019 nodetreenode *n;
1021 1020 int v;
1022 1021
1023 1022 n = &self->nodes[off];
1024 1023 v = n->children[k];
1025 1024
1026 1025 if (v == 0) {
1027 1026 n->children[k] = -rev - 2;
1028 1027 return 0;
1029 1028 }
1030 1029 if (v < 0) {
1031 1030 const char *oldnode = index_node_existing(self->index, -(v + 2));
1032 1031 int noff;
1033 1032
1034 1033 if (oldnode == NULL)
1035 1034 return -1;
1036 1035 if (!memcmp(oldnode, node, 20)) {
1037 1036 n->children[k] = -rev - 2;
1038 1037 return 0;
1039 1038 }
1040 1039 noff = nt_new(self);
1041 1040 if (noff == -1)
1042 1041 return -1;
1043 1042 /* self->nodes may have been changed by realloc */
1044 1043 self->nodes[off].children[k] = noff;
1045 1044 off = noff;
1046 1045 n = &self->nodes[off];
1047 1046 n->children[nt_level(oldnode, ++level)] = v;
1048 1047 if (level > self->depth)
1049 1048 self->depth = level;
1050 1049 self->splits += 1;
1051 1050 } else {
1052 1051 level += 1;
1053 1052 off = v;
1054 1053 }
1055 1054 }
1056 1055
1057 1056 return -1;
1058 1057 }
1059 1058
1060 1059 static int nt_delete_node(nodetree *self, const char *node)
1061 1060 {
1062 1061 /* rev==-2 happens to get encoded as 0, which is interpreted as not set */
1063 1062 return nt_insert(self, node, -2);
1064 1063 }
1065 1064
1066 1065 static int nt_init(nodetree *self, indexObject *index, unsigned capacity)
1067 1066 {
1068 1067 self->index = index;
1069 1068 self->capacity = capacity;
1070 1069 self->depth = 0;
1071 1070 self->splits = 0;
1072 1071 if ((size_t)self->capacity > INT_MAX / sizeof(nodetreenode)) {
1073 1072 PyErr_SetString(PyExc_ValueError, "overflow in init_nt");
1074 1073 return -1;
1075 1074 }
1076 1075 self->nodes = calloc(self->capacity, sizeof(nodetreenode));
1077 1076 if (self->nodes == NULL) {
1078 1077 PyErr_NoMemory();
1079 1078 return -1;
1080 1079 }
1081 1080 self->length = 1;
1082 1081 return 0;
1083 1082 }
1084 1083
1085 1084 static int nt_partialmatch(nodetree *self, const char *node,
1086 1085 Py_ssize_t nodelen)
1087 1086 {
1088 1087 return nt_find(self, node, nodelen, 1);
1089 1088 }
1090 1089
1091 1090 /*
1092 1091 * Find the length of the shortest unique prefix of node.
1093 1092 *
1094 1093 * Return values:
1095 1094 *
1096 1095 * -3: error (exception set)
1097 1096 * -2: not found (no exception set)
1098 1097 * rest: length of shortest prefix
1099 1098 */
1100 1099 static int nt_shortest(nodetree *self, const char *node)
1101 1100 {
1102 1101 int level, off;
1103 1102
1104 1103 for (level = off = 0; level < 40; level++) {
1105 1104 int k, v;
1106 1105 nodetreenode *n = &self->nodes[off];
1107 1106 k = nt_level(node, level);
1108 1107 v = n->children[k];
1109 1108 if (v < 0) {
1110 1109 const char *n;
1111 1110 v = -(v + 2);
1112 1111 n = index_node_existing(self->index, v);
1113 1112 if (n == NULL)
1114 1113 return -3;
1115 1114 if (memcmp(node, n, 20) != 0)
1116 1115 /*
1117 1116 * Found a unique prefix, but it wasn't for the
1118 1117 * requested node (i.e the requested node does
1119 1118 * not exist).
1120 1119 */
1121 1120 return -2;
1122 1121 return level + 1;
1123 1122 }
1124 1123 if (v == 0)
1125 1124 return -2;
1126 1125 off = v;
1127 1126 }
1128 1127 /*
1129 1128 * The node was still not unique after 40 hex digits, so this won't
1130 1129 * happen. Also, if we get here, then there's a programming error in
1131 1130 * this file that made us insert a node longer than 40 hex digits.
1132 1131 */
1133 1132 PyErr_SetString(PyExc_Exception, "broken node tree");
1134 1133 return -3;
1135 1134 }
1136 1135
1137 1136 static int index_init_nt(indexObject *self)
1138 1137 {
1139 1138 if (self->nt == NULL) {
1140 1139 self->nt = PyMem_Malloc(sizeof(nodetree));
1141 1140 if (self->nt == NULL) {
1142 1141 PyErr_NoMemory();
1143 1142 return -1;
1144 1143 }
1145 1144 unsigned capacity = (self->raw_length < 4 ? 4 : (int)self->raw_length / 2);
1146 1145 if (nt_init(self->nt, self, capacity) == -1) {
1147 1146 PyMem_Free(self->nt);
1148 1147 self->nt = NULL;
1149 1148 return -1;
1150 1149 }
1151 1150 if (nt_insert(self->nt, nullid, -1) == -1) {
1152 1151 PyMem_Free(self->nt);
1153 1152 self->nt = NULL;
1154 1153 return -1;
1155 1154 }
1156 1155 self->ntrev = (int)index_length(self);
1157 1156 self->ntlookups = 1;
1158 1157 self->ntmisses = 0;
1159 1158 }
1160 1159 return 0;
1161 1160 }
1162 1161
1163 1162 /*
1164 1163 * Return values:
1165 1164 *
1166 1165 * -3: error (exception set)
1167 1166 * -2: not found (no exception set)
1168 1167 * rest: valid rev
1169 1168 */
1170 1169 static int index_find_node(indexObject *self,
1171 1170 const char *node, Py_ssize_t nodelen)
1172 1171 {
1173 1172 int rev;
1174 1173
1175 1174 if (index_init_nt(self) == -1)
1176 1175 return -3;
1177 1176
1178 1177 self->ntlookups++;
1179 1178 rev = nt_find(self->nt, node, nodelen, 0);
1180 1179 if (rev >= -1)
1181 1180 return rev;
1182 1181
1183 1182 /*
1184 1183 * For the first handful of lookups, we scan the entire index,
1185 1184 * and cache only the matching nodes. This optimizes for cases
1186 1185 * like "hg tip", where only a few nodes are accessed.
1187 1186 *
1188 1187 * After that, we cache every node we visit, using a single
1189 1188 * scan amortized over multiple lookups. This gives the best
1190 1189 * bulk performance, e.g. for "hg log".
1191 1190 */
1192 1191 if (self->ntmisses++ < 4) {
1193 1192 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1194 1193 const char *n = index_node_existing(self, rev);
1195 1194 if (n == NULL)
1196 1195 return -3;
1197 1196 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1198 1197 if (nt_insert(self->nt, n, rev) == -1)
1199 1198 return -3;
1200 1199 break;
1201 1200 }
1202 1201 }
1203 1202 } else {
1204 1203 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1205 1204 const char *n = index_node_existing(self, rev);
1206 1205 if (n == NULL)
1207 1206 return -3;
1208 1207 if (nt_insert(self->nt, n, rev) == -1) {
1209 1208 self->ntrev = rev + 1;
1210 1209 return -3;
1211 1210 }
1212 1211 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1213 1212 break;
1214 1213 }
1215 1214 }
1216 1215 self->ntrev = rev;
1217 1216 }
1218 1217
1219 1218 if (rev >= 0)
1220 1219 return rev;
1221 1220 return -2;
1222 1221 }
1223 1222
1224 1223 static void raise_revlog_error(void)
1225 1224 {
1226 1225 PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
1227 1226
1228 1227 mod = PyImport_ImportModule("mercurial.error");
1229 1228 if (mod == NULL) {
1230 1229 goto cleanup;
1231 1230 }
1232 1231
1233 1232 dict = PyModule_GetDict(mod);
1234 1233 if (dict == NULL) {
1235 1234 goto cleanup;
1236 1235 }
1237 1236 Py_INCREF(dict);
1238 1237
1239 1238 errclass = PyDict_GetItemString(dict, "RevlogError");
1240 1239 if (errclass == NULL) {
1241 1240 PyErr_SetString(PyExc_SystemError,
1242 1241 "could not find RevlogError");
1243 1242 goto cleanup;
1244 1243 }
1245 1244
1246 1245 /* value of exception is ignored by callers */
1247 1246 PyErr_SetString(errclass, "RevlogError");
1248 1247
1249 1248 cleanup:
1250 1249 Py_XDECREF(dict);
1251 1250 Py_XDECREF(mod);
1252 1251 }
1253 1252
1254 1253 static PyObject *index_getitem(indexObject *self, PyObject *value)
1255 1254 {
1256 1255 char *node;
1257 1256 int rev;
1258 1257
1259 1258 if (PyInt_Check(value))
1260 1259 return index_get(self, PyInt_AS_LONG(value));
1261 1260
1262 1261 if (node_check(value, &node) == -1)
1263 1262 return NULL;
1264 1263 rev = index_find_node(self, node, 20);
1265 1264 if (rev >= -1)
1266 1265 return PyInt_FromLong(rev);
1267 1266 if (rev == -2)
1268 1267 raise_revlog_error();
1269 1268 return NULL;
1270 1269 }
1271 1270
1272 1271 /*
1273 1272 * Fully populate the radix tree.
1274 1273 */
1275 1274 static int index_populate_nt(indexObject *self) {
1276 1275 int rev;
1277 1276 if (self->ntrev > 0) {
1278 1277 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1279 1278 const char *n = index_node_existing(self, rev);
1280 1279 if (n == NULL)
1281 1280 return -1;
1282 1281 if (nt_insert(self->nt, n, rev) == -1)
1283 1282 return -1;
1284 1283 }
1285 1284 self->ntrev = -1;
1286 1285 }
1287 1286 return 0;
1288 1287 }
1289 1288
1290 1289 static PyObject *index_partialmatch(indexObject *self, PyObject *args)
1291 1290 {
1292 1291 const char *fullnode;
1293 1292 int nodelen;
1294 1293 char *node;
1295 1294 int rev, i;
1296 1295
1297 1296 if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &node, &nodelen))
1298 1297 return NULL;
1299 1298
1300 1299 if (nodelen < 1) {
1301 1300 PyErr_SetString(PyExc_ValueError, "key too short");
1302 1301 return NULL;
1303 1302 }
1304 1303
1305 1304 if (nodelen > 40) {
1306 1305 PyErr_SetString(PyExc_ValueError, "key too long");
1307 1306 return NULL;
1308 1307 }
1309 1308
1310 1309 for (i = 0; i < nodelen; i++)
1311 1310 hexdigit(node, i);
1312 1311 if (PyErr_Occurred()) {
1313 1312 /* input contains non-hex characters */
1314 1313 PyErr_Clear();
1315 1314 Py_RETURN_NONE;
1316 1315 }
1317 1316
1318 1317 if (index_init_nt(self) == -1)
1319 1318 return NULL;
1320 1319 if (index_populate_nt(self) == -1)
1321 1320 return NULL;
1322 1321 rev = nt_partialmatch(self->nt, node, nodelen);
1323 1322
1324 1323 switch (rev) {
1325 1324 case -4:
1326 1325 raise_revlog_error();
1327 1326 return NULL;
1328 1327 case -2:
1329 1328 Py_RETURN_NONE;
1330 1329 case -1:
1331 1330 return PyBytes_FromStringAndSize(nullid, 20);
1332 1331 }
1333 1332
1334 1333 fullnode = index_node_existing(self, rev);
1335 1334 if (fullnode == NULL) {
1336 1335 return NULL;
1337 1336 }
1338 1337 return PyBytes_FromStringAndSize(fullnode, 20);
1339 1338 }
1340 1339
1341 1340 static PyObject *index_shortest(indexObject *self, PyObject *args)
1342 1341 {
1343 1342 PyObject *val;
1344 1343 char *node;
1345 1344 int length;
1346 1345
1347 1346 if (!PyArg_ParseTuple(args, "O", &val))
1348 1347 return NULL;
1349 1348 if (node_check(val, &node) == -1)
1350 1349 return NULL;
1351 1350
1352 1351 self->ntlookups++;
1353 1352 if (index_init_nt(self) == -1)
1354 1353 return NULL;
1355 1354 if (index_populate_nt(self) == -1)
1356 1355 return NULL;
1357 1356 length = nt_shortest(self->nt, node);
1358 1357 if (length == -3)
1359 1358 return NULL;
1360 1359 if (length == -2) {
1361 1360 raise_revlog_error();
1362 1361 return NULL;
1363 1362 }
1364 1363 return PyInt_FromLong(length);
1365 1364 }
1366 1365
1367 1366 static PyObject *index_m_get(indexObject *self, PyObject *args)
1368 1367 {
1369 1368 PyObject *val;
1370 1369 char *node;
1371 1370 int rev;
1372 1371
1373 1372 if (!PyArg_ParseTuple(args, "O", &val))
1374 1373 return NULL;
1375 1374 if (node_check(val, &node) == -1)
1376 1375 return NULL;
1377 1376 rev = index_find_node(self, node, 20);
1378 1377 if (rev == -3)
1379 1378 return NULL;
1380 1379 if (rev == -2)
1381 1380 Py_RETURN_NONE;
1382 1381 return PyInt_FromLong(rev);
1383 1382 }
1384 1383
1385 1384 static int index_contains(indexObject *self, PyObject *value)
1386 1385 {
1387 1386 char *node;
1388 1387
1389 1388 if (PyInt_Check(value)) {
1390 1389 long rev = PyInt_AS_LONG(value);
1391 1390 return rev >= -1 && rev < index_length(self);
1392 1391 }
1393 1392
1394 1393 if (node_check(value, &node) == -1)
1395 1394 return -1;
1396 1395
1397 1396 switch (index_find_node(self, node, 20)) {
1398 1397 case -3:
1399 1398 return -1;
1400 1399 case -2:
1401 1400 return 0;
1402 1401 default:
1403 1402 return 1;
1404 1403 }
1405 1404 }
1406 1405
1407 1406 typedef uint64_t bitmask;
1408 1407
1409 1408 /*
1410 1409 * Given a disjoint set of revs, return all candidates for the
1411 1410 * greatest common ancestor. In revset notation, this is the set
1412 1411 * "heads(::a and ::b and ...)"
1413 1412 */
1414 1413 static PyObject *find_gca_candidates(indexObject *self, const int *revs,
1415 1414 int revcount)
1416 1415 {
1417 1416 const bitmask allseen = (1ull << revcount) - 1;
1418 1417 const bitmask poison = 1ull << revcount;
1419 1418 PyObject *gca = PyList_New(0);
1420 1419 int i, v, interesting;
1421 1420 int maxrev = -1;
1422 1421 bitmask sp;
1423 1422 bitmask *seen;
1424 1423
1425 1424 if (gca == NULL)
1426 1425 return PyErr_NoMemory();
1427 1426
1428 1427 for (i = 0; i < revcount; i++) {
1429 1428 if (revs[i] > maxrev)
1430 1429 maxrev = revs[i];
1431 1430 }
1432 1431
1433 1432 seen = calloc(sizeof(*seen), maxrev + 1);
1434 1433 if (seen == NULL) {
1435 1434 Py_DECREF(gca);
1436 1435 return PyErr_NoMemory();
1437 1436 }
1438 1437
1439 1438 for (i = 0; i < revcount; i++)
1440 1439 seen[revs[i]] = 1ull << i;
1441 1440
1442 1441 interesting = revcount;
1443 1442
1444 1443 for (v = maxrev; v >= 0 && interesting; v--) {
1445 1444 bitmask sv = seen[v];
1446 1445 int parents[2];
1447 1446
1448 1447 if (!sv)
1449 1448 continue;
1450 1449
1451 1450 if (sv < poison) {
1452 1451 interesting -= 1;
1453 1452 if (sv == allseen) {
1454 1453 PyObject *obj = PyInt_FromLong(v);
1455 1454 if (obj == NULL)
1456 1455 goto bail;
1457 1456 if (PyList_Append(gca, obj) == -1) {
1458 1457 Py_DECREF(obj);
1459 1458 goto bail;
1460 1459 }
1461 1460 sv |= poison;
1462 1461 for (i = 0; i < revcount; i++) {
1463 1462 if (revs[i] == v)
1464 1463 goto done;
1465 1464 }
1466 1465 }
1467 1466 }
1468 1467 if (index_get_parents(self, v, parents, maxrev) < 0)
1469 1468 goto bail;
1470 1469
1471 1470 for (i = 0; i < 2; i++) {
1472 1471 int p = parents[i];
1473 1472 if (p == -1)
1474 1473 continue;
1475 1474 sp = seen[p];
1476 1475 if (sv < poison) {
1477 1476 if (sp == 0) {
1478 1477 seen[p] = sv;
1479 1478 interesting++;
1480 1479 }
1481 1480 else if (sp != sv)
1482 1481 seen[p] |= sv;
1483 1482 } else {
1484 1483 if (sp && sp < poison)
1485 1484 interesting--;
1486 1485 seen[p] = sv;
1487 1486 }
1488 1487 }
1489 1488 }
1490 1489
1491 1490 done:
1492 1491 free(seen);
1493 1492 return gca;
1494 1493 bail:
1495 1494 free(seen);
1496 1495 Py_XDECREF(gca);
1497 1496 return NULL;
1498 1497 }
1499 1498
1500 1499 /*
1501 1500 * Given a disjoint set of revs, return the subset with the longest
1502 1501 * path to the root.
1503 1502 */
1504 1503 static PyObject *find_deepest(indexObject *self, PyObject *revs)
1505 1504 {
1506 1505 const Py_ssize_t revcount = PyList_GET_SIZE(revs);
1507 1506 static const Py_ssize_t capacity = 24;
1508 1507 int *depth, *interesting = NULL;
1509 1508 int i, j, v, ninteresting;
1510 1509 PyObject *dict = NULL, *keys = NULL;
1511 1510 long *seen = NULL;
1512 1511 int maxrev = -1;
1513 1512 long final;
1514 1513
1515 1514 if (revcount > capacity) {
1516 1515 PyErr_Format(PyExc_OverflowError,
1517 1516 "bitset size (%ld) > capacity (%ld)",
1518 1517 (long)revcount, (long)capacity);
1519 1518 return NULL;
1520 1519 }
1521 1520
1522 1521 for (i = 0; i < revcount; i++) {
1523 1522 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
1524 1523 if (n > maxrev)
1525 1524 maxrev = n;
1526 1525 }
1527 1526
1528 1527 depth = calloc(sizeof(*depth), maxrev + 1);
1529 1528 if (depth == NULL)
1530 1529 return PyErr_NoMemory();
1531 1530
1532 1531 seen = calloc(sizeof(*seen), maxrev + 1);
1533 1532 if (seen == NULL) {
1534 1533 PyErr_NoMemory();
1535 1534 goto bail;
1536 1535 }
1537 1536
1538 1537 interesting = calloc(sizeof(*interesting), 1 << revcount);
1539 1538 if (interesting == NULL) {
1540 1539 PyErr_NoMemory();
1541 1540 goto bail;
1542 1541 }
1543 1542
1544 1543 if (PyList_Sort(revs) == -1)
1545 1544 goto bail;
1546 1545
1547 1546 for (i = 0; i < revcount; i++) {
1548 1547 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
1549 1548 long b = 1l << i;
1550 1549 depth[n] = 1;
1551 1550 seen[n] = b;
1552 1551 interesting[b] = 1;
1553 1552 }
1554 1553
1555 1554 /* invariant: ninteresting is the number of non-zero entries in
1556 1555 * interesting. */
1557 1556 ninteresting = (int)revcount;
1558 1557
1559 1558 for (v = maxrev; v >= 0 && ninteresting > 1; v--) {
1560 1559 int dv = depth[v];
1561 1560 int parents[2];
1562 1561 long sv;
1563 1562
1564 1563 if (dv == 0)
1565 1564 continue;
1566 1565
1567 1566 sv = seen[v];
1568 1567 if (index_get_parents(self, v, parents, maxrev) < 0)
1569 1568 goto bail;
1570 1569
1571 1570 for (i = 0; i < 2; i++) {
1572 1571 int p = parents[i];
1573 1572 long sp;
1574 1573 int dp;
1575 1574
1576 1575 if (p == -1)
1577 1576 continue;
1578 1577
1579 1578 dp = depth[p];
1580 1579 sp = seen[p];
1581 1580 if (dp <= dv) {
1582 1581 depth[p] = dv + 1;
1583 1582 if (sp != sv) {
1584 1583 interesting[sv] += 1;
1585 1584 seen[p] = sv;
1586 1585 if (sp) {
1587 1586 interesting[sp] -= 1;
1588 1587 if (interesting[sp] == 0)
1589 1588 ninteresting -= 1;
1590 1589 }
1591 1590 }
1592 1591 }
1593 1592 else if (dv == dp - 1) {
1594 1593 long nsp = sp | sv;
1595 1594 if (nsp == sp)
1596 1595 continue;
1597 1596 seen[p] = nsp;
1598 1597 interesting[sp] -= 1;
1599 1598 if (interesting[sp] == 0)
1600 1599 ninteresting -= 1;
1601 1600 if (interesting[nsp] == 0)
1602 1601 ninteresting += 1;
1603 1602 interesting[nsp] += 1;
1604 1603 }
1605 1604 }
1606 1605 interesting[sv] -= 1;
1607 1606 if (interesting[sv] == 0)
1608 1607 ninteresting -= 1;
1609 1608 }
1610 1609
1611 1610 final = 0;
1612 1611 j = ninteresting;
1613 1612 for (i = 0; i < (int)(2 << revcount) && j > 0; i++) {
1614 1613 if (interesting[i] == 0)
1615 1614 continue;
1616 1615 final |= i;
1617 1616 j -= 1;
1618 1617 }
1619 1618 if (final == 0) {
1620 1619 keys = PyList_New(0);
1621 1620 goto bail;
1622 1621 }
1623 1622
1624 1623 dict = PyDict_New();
1625 1624 if (dict == NULL)
1626 1625 goto bail;
1627 1626
1628 1627 for (i = 0; i < revcount; i++) {
1629 1628 PyObject *key;
1630 1629
1631 1630 if ((final & (1 << i)) == 0)
1632 1631 continue;
1633 1632
1634 1633 key = PyList_GET_ITEM(revs, i);
1635 1634 Py_INCREF(key);
1636 1635 Py_INCREF(Py_None);
1637 1636 if (PyDict_SetItem(dict, key, Py_None) == -1) {
1638 1637 Py_DECREF(key);
1639 1638 Py_DECREF(Py_None);
1640 1639 goto bail;
1641 1640 }
1642 1641 }
1643 1642
1644 1643 keys = PyDict_Keys(dict);
1645 1644
1646 1645 bail:
1647 1646 free(depth);
1648 1647 free(seen);
1649 1648 free(interesting);
1650 1649 Py_XDECREF(dict);
1651 1650
1652 1651 return keys;
1653 1652 }
1654 1653
1655 1654 /*
1656 1655 * Given a (possibly overlapping) set of revs, return all the
1657 1656 * common ancestors heads: heads(::args[0] and ::a[1] and ...)
1658 1657 */
1659 1658 static PyObject *index_commonancestorsheads(indexObject *self, PyObject *args)
1660 1659 {
1661 1660 PyObject *ret = NULL;
1662 1661 Py_ssize_t argcount, i, len;
1663 1662 bitmask repeat = 0;
1664 1663 int revcount = 0;
1665 1664 int *revs;
1666 1665
1667 1666 argcount = PySequence_Length(args);
1668 1667 revs = PyMem_Malloc(argcount * sizeof(*revs));
1669 1668 if (argcount > 0 && revs == NULL)
1670 1669 return PyErr_NoMemory();
1671 1670 len = index_length(self);
1672 1671
1673 1672 for (i = 0; i < argcount; i++) {
1674 1673 static const int capacity = 24;
1675 1674 PyObject *obj = PySequence_GetItem(args, i);
1676 1675 bitmask x;
1677 1676 long val;
1678 1677
1679 1678 if (!PyInt_Check(obj)) {
1680 1679 PyErr_SetString(PyExc_TypeError,
1681 1680 "arguments must all be ints");
1682 1681 Py_DECREF(obj);
1683 1682 goto bail;
1684 1683 }
1685 1684 val = PyInt_AsLong(obj);
1686 1685 Py_DECREF(obj);
1687 1686 if (val == -1) {
1688 1687 ret = PyList_New(0);
1689 1688 goto done;
1690 1689 }
1691 1690 if (val < 0 || val >= len) {
1692 1691 PyErr_SetString(PyExc_IndexError,
1693 1692 "index out of range");
1694 1693 goto bail;
1695 1694 }
1696 1695 /* this cheesy bloom filter lets us avoid some more
1697 1696 * expensive duplicate checks in the common set-is-disjoint
1698 1697 * case */
1699 1698 x = 1ull << (val & 0x3f);
1700 1699 if (repeat & x) {
1701 1700 int k;
1702 1701 for (k = 0; k < revcount; k++) {
1703 1702 if (val == revs[k])
1704 1703 goto duplicate;
1705 1704 }
1706 1705 }
1707 1706 else repeat |= x;
1708 1707 if (revcount >= capacity) {
1709 1708 PyErr_Format(PyExc_OverflowError,
1710 1709 "bitset size (%d) > capacity (%d)",
1711 1710 revcount, capacity);
1712 1711 goto bail;
1713 1712 }
1714 1713 revs[revcount++] = (int)val;
1715 1714 duplicate:;
1716 1715 }
1717 1716
1718 1717 if (revcount == 0) {
1719 1718 ret = PyList_New(0);
1720 1719 goto done;
1721 1720 }
1722 1721 if (revcount == 1) {
1723 1722 PyObject *obj;
1724 1723 ret = PyList_New(1);
1725 1724 if (ret == NULL)
1726 1725 goto bail;
1727 1726 obj = PyInt_FromLong(revs[0]);
1728 1727 if (obj == NULL)
1729 1728 goto bail;
1730 1729 PyList_SET_ITEM(ret, 0, obj);
1731 1730 goto done;
1732 1731 }
1733 1732
1734 1733 ret = find_gca_candidates(self, revs, revcount);
1735 1734 if (ret == NULL)
1736 1735 goto bail;
1737 1736
1738 1737 done:
1739 1738 PyMem_Free(revs);
1740 1739 return ret;
1741 1740
1742 1741 bail:
1743 1742 PyMem_Free(revs);
1744 1743 Py_XDECREF(ret);
1745 1744 return NULL;
1746 1745 }
1747 1746
1748 1747 /*
1749 1748 * Given a (possibly overlapping) set of revs, return the greatest
1750 1749 * common ancestors: those with the longest path to the root.
1751 1750 */
1752 1751 static PyObject *index_ancestors(indexObject *self, PyObject *args)
1753 1752 {
1754 1753 PyObject *ret;
1755 1754 PyObject *gca = index_commonancestorsheads(self, args);
1756 1755 if (gca == NULL)
1757 1756 return NULL;
1758 1757
1759 1758 if (PyList_GET_SIZE(gca) <= 1) {
1760 1759 return gca;
1761 1760 }
1762 1761
1763 1762 ret = find_deepest(self, gca);
1764 1763 Py_DECREF(gca);
1765 1764 return ret;
1766 1765 }
1767 1766
1768 1767 /*
1769 1768 * Invalidate any trie entries introduced by added revs.
1770 1769 */
1771 1770 static void index_invalidate_added(indexObject *self, Py_ssize_t start)
1772 1771 {
1773 1772 Py_ssize_t i, len = PyList_GET_SIZE(self->added);
1774 1773
1775 1774 for (i = start; i < len; i++) {
1776 1775 PyObject *tuple = PyList_GET_ITEM(self->added, i);
1777 1776 PyObject *node = PyTuple_GET_ITEM(tuple, 7);
1778 1777
1779 1778 nt_delete_node(self->nt, PyBytes_AS_STRING(node));
1780 1779 }
1781 1780
1782 1781 if (start == 0)
1783 1782 Py_CLEAR(self->added);
1784 1783 }
1785 1784
1786 1785 /*
1787 1786 * Delete a numeric range of revs, which must be at the end of the
1788 1787 * range, but exclude the sentinel nullid entry.
1789 1788 */
1790 1789 static int index_slice_del(indexObject *self, PyObject *item)
1791 1790 {
1792 1791 Py_ssize_t start, stop, step, slicelength;
1793 1792 Py_ssize_t length = index_length(self) + 1;
1794 1793 int ret = 0;
1795 1794
1796 1795 /* Argument changed from PySliceObject* to PyObject* in Python 3. */
1797 1796 #ifdef IS_PY3K
1798 1797 if (PySlice_GetIndicesEx(item, length,
1799 1798 #else
1800 1799 if (PySlice_GetIndicesEx((PySliceObject*)item, length,
1801 1800 #endif
1802 1801 &start, &stop, &step, &slicelength) < 0)
1803 1802 return -1;
1804 1803
1805 1804 if (slicelength <= 0)
1806 1805 return 0;
1807 1806
1808 1807 if ((step < 0 && start < stop) || (step > 0 && start > stop))
1809 1808 stop = start;
1810 1809
1811 1810 if (step < 0) {
1812 1811 stop = start + 1;
1813 1812 start = stop + step*(slicelength - 1) - 1;
1814 1813 step = -step;
1815 1814 }
1816 1815
1817 1816 if (step != 1) {
1818 1817 PyErr_SetString(PyExc_ValueError,
1819 1818 "revlog index delete requires step size of 1");
1820 1819 return -1;
1821 1820 }
1822 1821
1823 1822 if (stop != length - 1) {
1824 1823 PyErr_SetString(PyExc_IndexError,
1825 1824 "revlog index deletion indices are invalid");
1826 1825 return -1;
1827 1826 }
1828 1827
1829 1828 if (start < self->length) {
1830 1829 if (self->nt) {
1831 1830 Py_ssize_t i;
1832 1831
1833 1832 for (i = start + 1; i < self->length; i++) {
1834 1833 const char *node = index_node_existing(self, i);
1835 1834 if (node == NULL)
1836 1835 return -1;
1837 1836
1838 1837 nt_delete_node(self->nt, node);
1839 1838 }
1840 1839 if (self->added)
1841 1840 index_invalidate_added(self, 0);
1842 1841 if (self->ntrev > start)
1843 1842 self->ntrev = (int)start;
1844 1843 }
1845 1844 self->length = start;
1846 1845 if (start < self->raw_length) {
1847 1846 if (self->cache) {
1848 1847 Py_ssize_t i;
1849 1848 for (i = start; i < self->raw_length; i++)
1850 1849 Py_CLEAR(self->cache[i]);
1851 1850 }
1852 1851 self->raw_length = start;
1853 1852 }
1854 1853 goto done;
1855 1854 }
1856 1855
1857 1856 if (self->nt) {
1858 1857 index_invalidate_added(self, start - self->length);
1859 1858 if (self->ntrev > start)
1860 1859 self->ntrev = (int)start;
1861 1860 }
1862 1861 if (self->added)
1863 1862 ret = PyList_SetSlice(self->added, start - self->length,
1864 1863 PyList_GET_SIZE(self->added), NULL);
1865 1864 done:
1866 1865 Py_CLEAR(self->headrevs);
1867 1866 return ret;
1868 1867 }
1869 1868
1870 1869 /*
1871 1870 * Supported ops:
1872 1871 *
1873 1872 * slice deletion
1874 1873 * string assignment (extend node->rev mapping)
1875 1874 * string deletion (shrink node->rev mapping)
1876 1875 */
1877 1876 static int index_assign_subscript(indexObject *self, PyObject *item,
1878 1877 PyObject *value)
1879 1878 {
1880 1879 char *node;
1881 1880 long rev;
1882 1881
1883 1882 if (PySlice_Check(item) && value == NULL)
1884 1883 return index_slice_del(self, item);
1885 1884
1886 1885 if (node_check(item, &node) == -1)
1887 1886 return -1;
1888 1887
1889 1888 if (value == NULL)
1890 1889 return self->nt ? nt_delete_node(self->nt, node) : 0;
1891 1890 rev = PyInt_AsLong(value);
1892 1891 if (rev > INT_MAX || rev < 0) {
1893 1892 if (!PyErr_Occurred())
1894 1893 PyErr_SetString(PyExc_ValueError, "rev out of range");
1895 1894 return -1;
1896 1895 }
1897 1896
1898 1897 if (index_init_nt(self) == -1)
1899 1898 return -1;
1900 1899 return nt_insert(self->nt, node, (int)rev);
1901 1900 }
1902 1901
1903 1902 /*
1904 1903 * Find all RevlogNG entries in an index that has inline data. Update
1905 1904 * the optional "offsets" table with those entries.
1906 1905 */
1907 1906 static Py_ssize_t inline_scan(indexObject *self, const char **offsets)
1908 1907 {
1909 1908 const char *data = (const char *)self->buf.buf;
1910 1909 Py_ssize_t pos = 0;
1911 1910 Py_ssize_t end = self->buf.len;
1912 1911 long incr = v1_hdrsize;
1913 1912 Py_ssize_t len = 0;
1914 1913
1915 1914 while (pos + v1_hdrsize <= end && pos >= 0) {
1916 1915 uint32_t comp_len;
1917 1916 /* 3rd element of header is length of compressed inline data */
1918 1917 comp_len = getbe32(data + pos + 8);
1919 1918 incr = v1_hdrsize + comp_len;
1920 1919 if (offsets)
1921 1920 offsets[len] = data + pos;
1922 1921 len++;
1923 1922 pos += incr;
1924 1923 }
1925 1924
1926 1925 if (pos != end) {
1927 1926 if (!PyErr_Occurred())
1928 1927 PyErr_SetString(PyExc_ValueError, "corrupt index file");
1929 1928 return -1;
1930 1929 }
1931 1930
1932 1931 return len;
1933 1932 }
1934 1933
1935 1934 static int index_init(indexObject *self, PyObject *args)
1936 1935 {
1937 1936 PyObject *data_obj, *inlined_obj;
1938 1937 Py_ssize_t size;
1939 1938
1940 1939 /* Initialize before argument-checking to avoid index_dealloc() crash. */
1941 1940 self->raw_length = 0;
1942 1941 self->added = NULL;
1943 1942 self->cache = NULL;
1944 1943 self->data = NULL;
1945 1944 memset(&self->buf, 0, sizeof(self->buf));
1946 1945 self->headrevs = NULL;
1947 1946 self->filteredrevs = Py_None;
1948 1947 Py_INCREF(Py_None);
1949 1948 self->nt = NULL;
1950 1949 self->offsets = NULL;
1951 1950
1952 1951 if (!PyArg_ParseTuple(args, "OO", &data_obj, &inlined_obj))
1953 1952 return -1;
1954 1953 if (!PyObject_CheckBuffer(data_obj)) {
1955 1954 PyErr_SetString(PyExc_TypeError,
1956 1955 "data does not support buffer interface");
1957 1956 return -1;
1958 1957 }
1959 1958
1960 1959 if (PyObject_GetBuffer(data_obj, &self->buf, PyBUF_SIMPLE) == -1)
1961 1960 return -1;
1962 1961 size = self->buf.len;
1963 1962
1964 1963 self->inlined = inlined_obj && PyObject_IsTrue(inlined_obj);
1965 1964 self->data = data_obj;
1966 1965
1967 1966 self->ntlookups = self->ntmisses = 0;
1968 1967 self->ntrev = -1;
1969 1968 Py_INCREF(self->data);
1970 1969
1971 1970 if (self->inlined) {
1972 1971 Py_ssize_t len = inline_scan(self, NULL);
1973 1972 if (len == -1)
1974 1973 goto bail;
1975 1974 self->raw_length = len;
1976 1975 self->length = len;
1977 1976 } else {
1978 1977 if (size % v1_hdrsize) {
1979 1978 PyErr_SetString(PyExc_ValueError, "corrupt index file");
1980 1979 goto bail;
1981 1980 }
1982 1981 self->raw_length = size / v1_hdrsize;
1983 1982 self->length = self->raw_length;
1984 1983 }
1985 1984
1986 1985 return 0;
1987 1986 bail:
1988 1987 return -1;
1989 1988 }
1990 1989
1991 1990 static PyObject *index_nodemap(indexObject *self)
1992 1991 {
1993 1992 Py_INCREF(self);
1994 1993 return (PyObject *)self;
1995 1994 }
1996 1995
1997 1996 static void _index_clearcaches(indexObject *self)
1998 1997 {
1999 1998 if (self->cache) {
2000 1999 Py_ssize_t i;
2001 2000
2002 2001 for (i = 0; i < self->raw_length; i++)
2003 2002 Py_CLEAR(self->cache[i]);
2004 2003 free(self->cache);
2005 2004 self->cache = NULL;
2006 2005 }
2007 2006 if (self->offsets) {
2008 2007 PyMem_Free(self->offsets);
2009 2008 self->offsets = NULL;
2010 2009 }
2011 2010 if (self->nt != NULL) {
2012 2011 free(self->nt->nodes);
2013 2012 PyMem_Free(self->nt);
2014 2013 }
2015 2014 self->nt = NULL;
2016 2015 Py_CLEAR(self->headrevs);
2017 2016 }
2018 2017
2019 2018 static PyObject *index_clearcaches(indexObject *self)
2020 2019 {
2021 2020 _index_clearcaches(self);
2022 2021 self->ntrev = -1;
2023 2022 self->ntlookups = self->ntmisses = 0;
2024 2023 Py_RETURN_NONE;
2025 2024 }
2026 2025
2027 2026 static void index_dealloc(indexObject *self)
2028 2027 {
2029 2028 _index_clearcaches(self);
2030 2029 Py_XDECREF(self->filteredrevs);
2031 2030 if (self->buf.buf) {
2032 2031 PyBuffer_Release(&self->buf);
2033 2032 memset(&self->buf, 0, sizeof(self->buf));
2034 2033 }
2035 2034 Py_XDECREF(self->data);
2036 2035 Py_XDECREF(self->added);
2037 2036 PyObject_Del(self);
2038 2037 }
2039 2038
2040 2039 static PySequenceMethods index_sequence_methods = {
2041 2040 (lenfunc)index_length, /* sq_length */
2042 2041 0, /* sq_concat */
2043 2042 0, /* sq_repeat */
2044 2043 (ssizeargfunc)index_get, /* sq_item */
2045 2044 0, /* sq_slice */
2046 2045 0, /* sq_ass_item */
2047 2046 0, /* sq_ass_slice */
2048 2047 (objobjproc)index_contains, /* sq_contains */
2049 2048 };
2050 2049
2051 2050 static PyMappingMethods index_mapping_methods = {
2052 2051 (lenfunc)index_length, /* mp_length */
2053 2052 (binaryfunc)index_getitem, /* mp_subscript */
2054 2053 (objobjargproc)index_assign_subscript, /* mp_ass_subscript */
2055 2054 };
2056 2055
2057 2056 static PyMethodDef index_methods[] = {
2058 2057 {"ancestors", (PyCFunction)index_ancestors, METH_VARARGS,
2059 2058 "return the gca set of the given revs"},
2060 2059 {"commonancestorsheads", (PyCFunction)index_commonancestorsheads,
2061 2060 METH_VARARGS,
2062 2061 "return the heads of the common ancestors of the given revs"},
2063 2062 {"clearcaches", (PyCFunction)index_clearcaches, METH_NOARGS,
2064 2063 "clear the index caches"},
2065 2064 {"get", (PyCFunction)index_m_get, METH_VARARGS,
2066 2065 "get an index entry"},
2067 2066 {"computephasesmapsets", (PyCFunction)compute_phases_map_sets,
2068 2067 METH_VARARGS, "compute phases"},
2069 2068 {"reachableroots2", (PyCFunction)reachableroots2, METH_VARARGS,
2070 2069 "reachableroots"},
2071 2070 {"headrevs", (PyCFunction)index_headrevs, METH_VARARGS,
2072 2071 "get head revisions"}, /* Can do filtering since 3.2 */
2073 2072 {"headrevsfiltered", (PyCFunction)index_headrevs, METH_VARARGS,
2074 2073 "get filtered head revisions"}, /* Can always do filtering */
2075 2074 {"deltachain", (PyCFunction)index_deltachain, METH_VARARGS,
2076 2075 "determine revisions with deltas to reconstruct fulltext"},
2077 2076 {"append", (PyCFunction)index_append, METH_O,
2078 2077 "append an index entry"},
2079 2078 {"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS,
2080 2079 "match a potentially ambiguous node ID"},
2081 2080 {"shortest", (PyCFunction)index_shortest, METH_VARARGS,
2082 2081 "find length of shortest hex nodeid of a binary ID"},
2083 2082 {"stats", (PyCFunction)index_stats, METH_NOARGS,
2084 2083 "stats for the index"},
2085 2084 {NULL} /* Sentinel */
2086 2085 };
2087 2086
2088 2087 static PyGetSetDef index_getset[] = {
2089 2088 {"nodemap", (getter)index_nodemap, NULL, "nodemap", NULL},
2090 2089 {NULL} /* Sentinel */
2091 2090 };
2092 2091
2093 2092 static PyTypeObject indexType = {
2094 2093 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2095 2094 "parsers.index", /* tp_name */
2096 2095 sizeof(indexObject), /* tp_basicsize */
2097 2096 0, /* tp_itemsize */
2098 2097 (destructor)index_dealloc, /* tp_dealloc */
2099 2098 0, /* tp_print */
2100 2099 0, /* tp_getattr */
2101 2100 0, /* tp_setattr */
2102 2101 0, /* tp_compare */
2103 2102 0, /* tp_repr */
2104 2103 0, /* tp_as_number */
2105 2104 &index_sequence_methods, /* tp_as_sequence */
2106 2105 &index_mapping_methods, /* tp_as_mapping */
2107 2106 0, /* tp_hash */
2108 2107 0, /* tp_call */
2109 2108 0, /* tp_str */
2110 2109 0, /* tp_getattro */
2111 2110 0, /* tp_setattro */
2112 2111 0, /* tp_as_buffer */
2113 2112 Py_TPFLAGS_DEFAULT, /* tp_flags */
2114 2113 "revlog index", /* tp_doc */
2115 2114 0, /* tp_traverse */
2116 2115 0, /* tp_clear */
2117 2116 0, /* tp_richcompare */
2118 2117 0, /* tp_weaklistoffset */
2119 2118 0, /* tp_iter */
2120 2119 0, /* tp_iternext */
2121 2120 index_methods, /* tp_methods */
2122 2121 0, /* tp_members */
2123 2122 index_getset, /* tp_getset */
2124 2123 0, /* tp_base */
2125 2124 0, /* tp_dict */
2126 2125 0, /* tp_descr_get */
2127 2126 0, /* tp_descr_set */
2128 2127 0, /* tp_dictoffset */
2129 2128 (initproc)index_init, /* tp_init */
2130 2129 0, /* tp_alloc */
2131 2130 };
2132 2131
2133 2132 /*
2134 2133 * returns a tuple of the form (index, index, cache) with elements as
2135 2134 * follows:
2136 2135 *
2137 2136 * index: an index object that lazily parses RevlogNG records
2138 2137 * cache: if data is inlined, a tuple (0, index_file_content), else None
2139 2138 * index_file_content could be a string, or a buffer
2140 2139 *
2141 2140 * added complications are for backwards compatibility
2142 2141 */
2143 2142 PyObject *parse_index2(PyObject *self, PyObject *args)
2144 2143 {
2145 2144 PyObject *tuple = NULL, *cache = NULL;
2146 2145 indexObject *idx;
2147 2146 int ret;
2148 2147
2149 2148 idx = PyObject_New(indexObject, &indexType);
2150 2149 if (idx == NULL)
2151 2150 goto bail;
2152 2151
2153 2152 ret = index_init(idx, args);
2154 2153 if (ret == -1)
2155 2154 goto bail;
2156 2155
2157 2156 if (idx->inlined) {
2158 2157 cache = Py_BuildValue("iO", 0, idx->data);
2159 2158 if (cache == NULL)
2160 2159 goto bail;
2161 2160 } else {
2162 2161 cache = Py_None;
2163 2162 Py_INCREF(cache);
2164 2163 }
2165 2164
2166 2165 tuple = Py_BuildValue("NN", idx, cache);
2167 2166 if (!tuple)
2168 2167 goto bail;
2169 2168 return tuple;
2170 2169
2171 2170 bail:
2172 2171 Py_XDECREF(idx);
2173 2172 Py_XDECREF(cache);
2174 2173 Py_XDECREF(tuple);
2175 2174 return NULL;
2176 2175 }
2177 2176
2178 2177 void revlog_module_init(PyObject *mod)
2179 2178 {
2180 2179 indexType.tp_new = PyType_GenericNew;
2181 2180 if (PyType_Ready(&indexType) < 0)
2182 2181 return;
2183 2182 Py_INCREF(&indexType);
2184 2183 PyModule_AddObject(mod, "index", (PyObject *)&indexType);
2185 2184
2186 2185 nullentry = Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0, 0,
2187 2186 -1, -1, -1, -1, nullid, 20);
2188 2187 if (nullentry)
2189 2188 PyObject_GC_UnTrack(nullentry);
2190 2189 }
General Comments 0
You need to be logged in to leave comments. Login now