##// END OF EJS Templates
cext: match format string for 32bit long platforms...
Joerg Sonnenberger -
r46697:c581b9ee default
parent child Browse files
Show More
@@ -1,2855 +1,2855 b''
1 1 /*
2 2 parsers.c - efficient content parsing
3 3
4 4 Copyright 2008 Matt Mackall <mpm@selenic.com> and others
5 5
6 6 This software may be used and distributed according to the terms of
7 7 the GNU General Public License, incorporated herein by reference.
8 8 */
9 9
10 10 #define PY_SSIZE_T_CLEAN
11 11 #include <Python.h>
12 12 #include <assert.h>
13 13 #include <ctype.h>
14 14 #include <limits.h>
15 15 #include <stddef.h>
16 16 #include <stdlib.h>
17 17 #include <string.h>
18 18
19 19 #include "bitmanipulation.h"
20 20 #include "charencode.h"
21 21 #include "revlog.h"
22 22 #include "util.h"
23 23
24 24 #ifdef IS_PY3K
25 25 /* The mapping of Python types is meant to be temporary to get Python
26 26 * 3 to compile. We should remove this once Python 3 support is fully
27 27 * supported and proper types are used in the extensions themselves. */
28 28 #define PyInt_Check PyLong_Check
29 29 #define PyInt_FromLong PyLong_FromLong
30 30 #define PyInt_FromSsize_t PyLong_FromSsize_t
31 31 #define PyInt_AsLong PyLong_AsLong
32 32 #endif
33 33
34 34 typedef struct indexObjectStruct indexObject;
35 35
36 36 typedef struct {
37 37 int children[16];
38 38 } nodetreenode;
39 39
40 40 typedef struct {
41 41 int abi_version;
42 42 Py_ssize_t (*index_length)(const indexObject *);
43 43 const char *(*index_node)(indexObject *, Py_ssize_t);
44 44 int (*index_parents)(PyObject *, int, int *);
45 45 } Revlog_CAPI;
46 46
47 47 /*
48 48 * A base-16 trie for fast node->rev mapping.
49 49 *
50 50 * Positive value is index of the next node in the trie
51 51 * Negative value is a leaf: -(rev + 2)
52 52 * Zero is empty
53 53 */
54 54 typedef struct {
55 55 indexObject *index;
56 56 nodetreenode *nodes;
57 57 unsigned length; /* # nodes in use */
58 58 unsigned capacity; /* # nodes allocated */
59 59 int depth; /* maximum depth of tree */
60 60 int splits; /* # splits performed */
61 61 } nodetree;
62 62
63 63 typedef struct {
64 64 PyObject_HEAD /* ; */
65 65 nodetree nt;
66 66 } nodetreeObject;
67 67
68 68 /*
69 69 * This class has two behaviors.
70 70 *
71 71 * When used in a list-like way (with integer keys), we decode an
72 72 * entry in a RevlogNG index file on demand. We have limited support for
73 73 * integer-keyed insert and delete, only at elements right before the
74 74 * end.
75 75 *
76 76 * With string keys, we lazily perform a reverse mapping from node to
77 77 * rev, using a base-16 trie.
78 78 */
79 79 struct indexObjectStruct {
80 80 PyObject_HEAD
81 81 /* Type-specific fields go here. */
82 82 PyObject *data; /* raw bytes of index */
83 83 Py_buffer buf; /* buffer of data */
84 84 const char **offsets; /* populated on demand */
85 85 Py_ssize_t length; /* current on-disk number of elements */
86 86 unsigned new_length; /* number of added elements */
87 87 unsigned added_length; /* space reserved for added elements */
88 88 char *added; /* populated on demand */
89 89 PyObject *headrevs; /* cache, invalidated on changes */
90 90 PyObject *filteredrevs; /* filtered revs set */
91 91 nodetree nt; /* base-16 trie */
92 92 int ntinitialized; /* 0 or 1 */
93 93 int ntrev; /* last rev scanned */
94 94 int ntlookups; /* # lookups */
95 95 int ntmisses; /* # lookups that miss the cache */
96 96 int inlined;
97 97 };
98 98
99 99 static Py_ssize_t index_length(const indexObject *self)
100 100 {
101 101 return self->length + self->new_length;
102 102 }
103 103
104 104 static PyObject *nullentry = NULL;
105 105 static const char nullid[20] = {0};
106 106 static const Py_ssize_t nullrev = -1;
107 107
108 108 static Py_ssize_t inline_scan(indexObject *self, const char **offsets);
109 109
110 110 static int index_find_node(indexObject *self, const char *node,
111 111 Py_ssize_t nodelen);
112 112
113 113 #if LONG_MAX == 0x7fffffffL
114 114 static const char *const tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
115 115 #else
116 116 static const char *const tuple_format = PY23("kiiiiiis#", "kiiiiiiy#");
117 117 #endif
118 118
119 119 /* A RevlogNG v1 index entry is 64 bytes long. */
120 120 static const long v1_hdrsize = 64;
121 121
122 122 static void raise_revlog_error(void)
123 123 {
124 124 PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
125 125
126 126 mod = PyImport_ImportModule("mercurial.error");
127 127 if (mod == NULL) {
128 128 goto cleanup;
129 129 }
130 130
131 131 dict = PyModule_GetDict(mod);
132 132 if (dict == NULL) {
133 133 goto cleanup;
134 134 }
135 135 Py_INCREF(dict);
136 136
137 137 errclass = PyDict_GetItemString(dict, "RevlogError");
138 138 if (errclass == NULL) {
139 139 PyErr_SetString(PyExc_SystemError,
140 140 "could not find RevlogError");
141 141 goto cleanup;
142 142 }
143 143
144 144 /* value of exception is ignored by callers */
145 145 PyErr_SetString(errclass, "RevlogError");
146 146
147 147 cleanup:
148 148 Py_XDECREF(dict);
149 149 Py_XDECREF(mod);
150 150 }
151 151
152 152 /*
153 153 * Return a pointer to the beginning of a RevlogNG record.
154 154 */
155 155 static const char *index_deref(indexObject *self, Py_ssize_t pos)
156 156 {
157 157 if (pos >= self->length)
158 158 return self->added + (pos - self->length) * v1_hdrsize;
159 159
160 160 if (self->inlined && pos > 0) {
161 161 if (self->offsets == NULL) {
162 162 Py_ssize_t ret;
163 163 self->offsets =
164 164 PyMem_Malloc(self->length * sizeof(*self->offsets));
165 165 if (self->offsets == NULL)
166 166 return (const char *)PyErr_NoMemory();
167 167 ret = inline_scan(self, self->offsets);
168 168 if (ret == -1) {
169 169 return NULL;
170 170 };
171 171 }
172 172 return self->offsets[pos];
173 173 }
174 174
175 175 return (const char *)(self->buf.buf) + pos * v1_hdrsize;
176 176 }
177 177
178 178 /*
179 179 * Get parents of the given rev.
180 180 *
181 181 * The specified rev must be valid and must not be nullrev. A returned
182 182 * parent revision may be nullrev, but is guaranteed to be in valid range.
183 183 */
184 184 static inline int index_get_parents(indexObject *self, Py_ssize_t rev, int *ps,
185 185 int maxrev)
186 186 {
187 187 const char *data = index_deref(self, rev);
188 188
189 189 ps[0] = getbe32(data + 24);
190 190 ps[1] = getbe32(data + 28);
191 191
192 192 /* If index file is corrupted, ps[] may point to invalid revisions. So
193 193 * there is a risk of buffer overflow to trust them unconditionally. */
194 194 if (ps[0] < -1 || ps[0] > maxrev || ps[1] < -1 || ps[1] > maxrev) {
195 195 PyErr_SetString(PyExc_ValueError, "parent out of range");
196 196 return -1;
197 197 }
198 198 return 0;
199 199 }
200 200
201 201 /*
202 202 * Get parents of the given rev.
203 203 *
204 204 * If the specified rev is out of range, IndexError will be raised. If the
205 205 * revlog entry is corrupted, ValueError may be raised.
206 206 *
207 207 * Returns 0 on success or -1 on failure.
208 208 */
209 209 static int HgRevlogIndex_GetParents(PyObject *op, int rev, int *ps)
210 210 {
211 211 int tiprev;
212 212 if (!op || !HgRevlogIndex_Check(op) || !ps) {
213 213 PyErr_BadInternalCall();
214 214 return -1;
215 215 }
216 216 tiprev = (int)index_length((indexObject *)op) - 1;
217 217 if (rev < -1 || rev > tiprev) {
218 218 PyErr_Format(PyExc_IndexError, "rev out of range: %d", rev);
219 219 return -1;
220 220 } else if (rev == -1) {
221 221 ps[0] = ps[1] = -1;
222 222 return 0;
223 223 } else {
224 224 return index_get_parents((indexObject *)op, rev, ps, tiprev);
225 225 }
226 226 }
227 227
228 228 static inline int64_t index_get_start(indexObject *self, Py_ssize_t rev)
229 229 {
230 230 const char *data;
231 231 uint64_t offset;
232 232
233 233 if (rev == nullrev)
234 234 return 0;
235 235
236 236 data = index_deref(self, rev);
237 237 offset = getbe32(data + 4);
238 238 if (rev == 0) {
239 239 /* mask out version number for the first entry */
240 240 offset &= 0xFFFF;
241 241 } else {
242 242 uint32_t offset_high = getbe32(data);
243 243 offset |= ((uint64_t)offset_high) << 32;
244 244 }
245 245 return (int64_t)(offset >> 16);
246 246 }
247 247
248 248 static inline int index_get_length(indexObject *self, Py_ssize_t rev)
249 249 {
250 250 const char *data;
251 251 int tmp;
252 252
253 253 if (rev == nullrev)
254 254 return 0;
255 255
256 256 data = index_deref(self, rev);
257 257
258 258 tmp = (int)getbe32(data + 8);
259 259 if (tmp < 0) {
260 260 PyErr_Format(PyExc_OverflowError,
261 261 "revlog entry size out of bound (%d)", tmp);
262 262 return -1;
263 263 }
264 264 return tmp;
265 265 }
266 266
267 267 /*
268 268 * RevlogNG format (all in big endian, data may be inlined):
269 269 * 6 bytes: offset
270 270 * 2 bytes: flags
271 271 * 4 bytes: compressed length
272 272 * 4 bytes: uncompressed length
273 273 * 4 bytes: base revision
274 274 * 4 bytes: link revision
275 275 * 4 bytes: parent 1 revision
276 276 * 4 bytes: parent 2 revision
277 277 * 32 bytes: nodeid (only 20 bytes used)
278 278 */
279 279 static PyObject *index_get(indexObject *self, Py_ssize_t pos)
280 280 {
281 281 uint64_t offset_flags;
282 282 int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
283 283 const char *c_node_id;
284 284 const char *data;
285 285 Py_ssize_t length = index_length(self);
286 286
287 287 if (pos == nullrev) {
288 288 Py_INCREF(nullentry);
289 289 return nullentry;
290 290 }
291 291
292 292 if (pos < 0 || pos >= length) {
293 293 PyErr_SetString(PyExc_IndexError, "revlog index out of range");
294 294 return NULL;
295 295 }
296 296
297 297 data = index_deref(self, pos);
298 298 if (data == NULL)
299 299 return NULL;
300 300
301 301 offset_flags = getbe32(data + 4);
302 302 /*
303 303 * The first entry on-disk needs the version number masked out,
304 304 * but this doesn't apply if entries are added to an empty index.
305 305 */
306 306 if (self->length && pos == 0)
307 307 offset_flags &= 0xFFFF;
308 308 else {
309 309 uint32_t offset_high = getbe32(data);
310 310 offset_flags |= ((uint64_t)offset_high) << 32;
311 311 }
312 312
313 313 comp_len = getbe32(data + 8);
314 314 uncomp_len = getbe32(data + 12);
315 315 base_rev = getbe32(data + 16);
316 316 link_rev = getbe32(data + 20);
317 317 parent_1 = getbe32(data + 24);
318 318 parent_2 = getbe32(data + 28);
319 319 c_node_id = data + 32;
320 320
321 321 return Py_BuildValue(tuple_format, offset_flags, comp_len, uncomp_len,
322 322 base_rev, link_rev, parent_1, parent_2, c_node_id,
323 323 (Py_ssize_t)20);
324 324 }
325 325
326 326 /*
327 327 * Return the 20-byte SHA of the node corresponding to the given rev.
328 328 */
329 329 static const char *index_node(indexObject *self, Py_ssize_t pos)
330 330 {
331 331 Py_ssize_t length = index_length(self);
332 332 const char *data;
333 333
334 334 if (pos == nullrev)
335 335 return nullid;
336 336
337 337 if (pos >= length)
338 338 return NULL;
339 339
340 340 data = index_deref(self, pos);
341 341 return data ? data + 32 : NULL;
342 342 }
343 343
344 344 /*
345 345 * Return the 20-byte SHA of the node corresponding to the given rev. The
346 346 * rev is assumed to be existing. If not, an exception is set.
347 347 */
348 348 static const char *index_node_existing(indexObject *self, Py_ssize_t pos)
349 349 {
350 350 const char *node = index_node(self, pos);
351 351 if (node == NULL) {
352 352 PyErr_Format(PyExc_IndexError, "could not access rev %d",
353 353 (int)pos);
354 354 }
355 355 return node;
356 356 }
357 357
358 358 static int nt_insert(nodetree *self, const char *node, int rev);
359 359
360 360 static int node_check(PyObject *obj, char **node)
361 361 {
362 362 Py_ssize_t nodelen;
363 363 if (PyBytes_AsStringAndSize(obj, node, &nodelen) == -1)
364 364 return -1;
365 365 if (nodelen == 20)
366 366 return 0;
367 367 PyErr_SetString(PyExc_ValueError, "20-byte hash required");
368 368 return -1;
369 369 }
370 370
371 371 static PyObject *index_append(indexObject *self, PyObject *obj)
372 372 {
373 unsigned long offset_flags;
373 uint64_t offset_flags;
374 374 int rev, comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
375 375 Py_ssize_t c_node_id_len;
376 376 const char *c_node_id;
377 377 char *data;
378 378
379 379 if (!PyArg_ParseTuple(obj, tuple_format, &offset_flags, &comp_len,
380 380 &uncomp_len, &base_rev, &link_rev, &parent_1,
381 381 &parent_2, &c_node_id, &c_node_id_len)) {
382 382 PyErr_SetString(PyExc_TypeError, "8-tuple required");
383 383 return NULL;
384 384 }
385 385 if (c_node_id_len != 20 && c_node_id_len != 32) {
386 386 PyErr_SetString(PyExc_TypeError, "invalid node");
387 387 return NULL;
388 388 }
389 389
390 390 if (self->new_length == self->added_length) {
391 391 size_t new_added_length =
392 392 self->added_length ? self->added_length * 2 : 4096;
393 393 void *new_added =
394 394 PyMem_Realloc(self->added, new_added_length * v1_hdrsize);
395 395 if (!new_added)
396 396 return PyErr_NoMemory();
397 397 self->added = new_added;
398 398 self->added_length = new_added_length;
399 399 }
400 400 rev = self->length + self->new_length;
401 401 data = self->added + v1_hdrsize * self->new_length++;
402 402 putbe32(offset_flags >> 32, data);
403 403 putbe32(offset_flags & 0xffffffffU, data + 4);
404 404 putbe32(comp_len, data + 8);
405 405 putbe32(uncomp_len, data + 12);
406 406 putbe32(base_rev, data + 16);
407 407 putbe32(link_rev, data + 20);
408 408 putbe32(parent_1, data + 24);
409 409 putbe32(parent_2, data + 28);
410 410 memcpy(data + 32, c_node_id, c_node_id_len);
411 411 memset(data + 32 + c_node_id_len, 0, 32 - c_node_id_len);
412 412
413 413 if (self->ntinitialized)
414 414 nt_insert(&self->nt, c_node_id, rev);
415 415
416 416 Py_CLEAR(self->headrevs);
417 417 Py_RETURN_NONE;
418 418 }
419 419
420 420 static PyObject *index_stats(indexObject *self)
421 421 {
422 422 PyObject *obj = PyDict_New();
423 423 PyObject *s = NULL;
424 424 PyObject *t = NULL;
425 425
426 426 if (obj == NULL)
427 427 return NULL;
428 428
429 429 #define istat(__n, __d) \
430 430 do { \
431 431 s = PyBytes_FromString(__d); \
432 432 t = PyInt_FromSsize_t(self->__n); \
433 433 if (!s || !t) \
434 434 goto bail; \
435 435 if (PyDict_SetItem(obj, s, t) == -1) \
436 436 goto bail; \
437 437 Py_CLEAR(s); \
438 438 Py_CLEAR(t); \
439 439 } while (0)
440 440
441 441 if (self->added_length)
442 442 istat(new_length, "index entries added");
443 443 istat(length, "revs in memory");
444 444 istat(ntlookups, "node trie lookups");
445 445 istat(ntmisses, "node trie misses");
446 446 istat(ntrev, "node trie last rev scanned");
447 447 if (self->ntinitialized) {
448 448 istat(nt.capacity, "node trie capacity");
449 449 istat(nt.depth, "node trie depth");
450 450 istat(nt.length, "node trie count");
451 451 istat(nt.splits, "node trie splits");
452 452 }
453 453
454 454 #undef istat
455 455
456 456 return obj;
457 457
458 458 bail:
459 459 Py_XDECREF(obj);
460 460 Py_XDECREF(s);
461 461 Py_XDECREF(t);
462 462 return NULL;
463 463 }
464 464
465 465 /*
466 466 * When we cache a list, we want to be sure the caller can't mutate
467 467 * the cached copy.
468 468 */
469 469 static PyObject *list_copy(PyObject *list)
470 470 {
471 471 Py_ssize_t len = PyList_GET_SIZE(list);
472 472 PyObject *newlist = PyList_New(len);
473 473 Py_ssize_t i;
474 474
475 475 if (newlist == NULL)
476 476 return NULL;
477 477
478 478 for (i = 0; i < len; i++) {
479 479 PyObject *obj = PyList_GET_ITEM(list, i);
480 480 Py_INCREF(obj);
481 481 PyList_SET_ITEM(newlist, i, obj);
482 482 }
483 483
484 484 return newlist;
485 485 }
486 486
487 487 static int check_filter(PyObject *filter, Py_ssize_t arg)
488 488 {
489 489 if (filter) {
490 490 PyObject *arglist, *result;
491 491 int isfiltered;
492 492
493 493 arglist = Py_BuildValue("(n)", arg);
494 494 if (!arglist) {
495 495 return -1;
496 496 }
497 497
498 498 result = PyEval_CallObject(filter, arglist);
499 499 Py_DECREF(arglist);
500 500 if (!result) {
501 501 return -1;
502 502 }
503 503
504 504 /* PyObject_IsTrue returns 1 if true, 0 if false, -1 if error,
505 505 * same as this function, so we can just return it directly.*/
506 506 isfiltered = PyObject_IsTrue(result);
507 507 Py_DECREF(result);
508 508 return isfiltered;
509 509 } else {
510 510 return 0;
511 511 }
512 512 }
513 513
514 514 static inline void set_phase_from_parents(char *phases, int parent_1,
515 515 int parent_2, Py_ssize_t i)
516 516 {
517 517 if (parent_1 >= 0 && phases[parent_1] > phases[i])
518 518 phases[i] = phases[parent_1];
519 519 if (parent_2 >= 0 && phases[parent_2] > phases[i])
520 520 phases[i] = phases[parent_2];
521 521 }
522 522
523 523 static PyObject *reachableroots2(indexObject *self, PyObject *args)
524 524 {
525 525
526 526 /* Input */
527 527 long minroot;
528 528 PyObject *includepatharg = NULL;
529 529 int includepath = 0;
530 530 /* heads and roots are lists */
531 531 PyObject *heads = NULL;
532 532 PyObject *roots = NULL;
533 533 PyObject *reachable = NULL;
534 534
535 535 PyObject *val;
536 536 Py_ssize_t len = index_length(self);
537 537 long revnum;
538 538 Py_ssize_t k;
539 539 Py_ssize_t i;
540 540 Py_ssize_t l;
541 541 int r;
542 542 int parents[2];
543 543
544 544 /* Internal data structure:
545 545 * tovisit: array of length len+1 (all revs + nullrev), filled upto
546 546 * lentovisit
547 547 *
548 548 * revstates: array of length len+1 (all revs + nullrev) */
549 549 int *tovisit = NULL;
550 550 long lentovisit = 0;
551 551 enum { RS_SEEN = 1, RS_ROOT = 2, RS_REACHABLE = 4 };
552 552 char *revstates = NULL;
553 553
554 554 /* Get arguments */
555 555 if (!PyArg_ParseTuple(args, "lO!O!O!", &minroot, &PyList_Type, &heads,
556 556 &PyList_Type, &roots, &PyBool_Type,
557 557 &includepatharg))
558 558 goto bail;
559 559
560 560 if (includepatharg == Py_True)
561 561 includepath = 1;
562 562
563 563 /* Initialize return set */
564 564 reachable = PyList_New(0);
565 565 if (reachable == NULL)
566 566 goto bail;
567 567
568 568 /* Initialize internal datastructures */
569 569 tovisit = (int *)malloc((len + 1) * sizeof(int));
570 570 if (tovisit == NULL) {
571 571 PyErr_NoMemory();
572 572 goto bail;
573 573 }
574 574
575 575 revstates = (char *)calloc(len + 1, 1);
576 576 if (revstates == NULL) {
577 577 PyErr_NoMemory();
578 578 goto bail;
579 579 }
580 580
581 581 l = PyList_GET_SIZE(roots);
582 582 for (i = 0; i < l; i++) {
583 583 revnum = PyInt_AsLong(PyList_GET_ITEM(roots, i));
584 584 if (revnum == -1 && PyErr_Occurred())
585 585 goto bail;
586 586 /* If root is out of range, e.g. wdir(), it must be unreachable
587 587 * from heads. So we can just ignore it. */
588 588 if (revnum + 1 < 0 || revnum + 1 >= len + 1)
589 589 continue;
590 590 revstates[revnum + 1] |= RS_ROOT;
591 591 }
592 592
593 593 /* Populate tovisit with all the heads */
594 594 l = PyList_GET_SIZE(heads);
595 595 for (i = 0; i < l; i++) {
596 596 revnum = PyInt_AsLong(PyList_GET_ITEM(heads, i));
597 597 if (revnum == -1 && PyErr_Occurred())
598 598 goto bail;
599 599 if (revnum + 1 < 0 || revnum + 1 >= len + 1) {
600 600 PyErr_SetString(PyExc_IndexError, "head out of range");
601 601 goto bail;
602 602 }
603 603 if (!(revstates[revnum + 1] & RS_SEEN)) {
604 604 tovisit[lentovisit++] = (int)revnum;
605 605 revstates[revnum + 1] |= RS_SEEN;
606 606 }
607 607 }
608 608
609 609 /* Visit the tovisit list and find the reachable roots */
610 610 k = 0;
611 611 while (k < lentovisit) {
612 612 /* Add the node to reachable if it is a root*/
613 613 revnum = tovisit[k++];
614 614 if (revstates[revnum + 1] & RS_ROOT) {
615 615 revstates[revnum + 1] |= RS_REACHABLE;
616 616 val = PyInt_FromLong(revnum);
617 617 if (val == NULL)
618 618 goto bail;
619 619 r = PyList_Append(reachable, val);
620 620 Py_DECREF(val);
621 621 if (r < 0)
622 622 goto bail;
623 623 if (includepath == 0)
624 624 continue;
625 625 }
626 626
627 627 /* Add its parents to the list of nodes to visit */
628 628 if (revnum == nullrev)
629 629 continue;
630 630 r = index_get_parents(self, revnum, parents, (int)len - 1);
631 631 if (r < 0)
632 632 goto bail;
633 633 for (i = 0; i < 2; i++) {
634 634 if (!(revstates[parents[i] + 1] & RS_SEEN) &&
635 635 parents[i] >= minroot) {
636 636 tovisit[lentovisit++] = parents[i];
637 637 revstates[parents[i] + 1] |= RS_SEEN;
638 638 }
639 639 }
640 640 }
641 641
642 642 /* Find all the nodes in between the roots we found and the heads
643 643 * and add them to the reachable set */
644 644 if (includepath == 1) {
645 645 long minidx = minroot;
646 646 if (minidx < 0)
647 647 minidx = 0;
648 648 for (i = minidx; i < len; i++) {
649 649 if (!(revstates[i + 1] & RS_SEEN))
650 650 continue;
651 651 r = index_get_parents(self, i, parents, (int)len - 1);
652 652 /* Corrupted index file, error is set from
653 653 * index_get_parents */
654 654 if (r < 0)
655 655 goto bail;
656 656 if (((revstates[parents[0] + 1] |
657 657 revstates[parents[1] + 1]) &
658 658 RS_REACHABLE) &&
659 659 !(revstates[i + 1] & RS_REACHABLE)) {
660 660 revstates[i + 1] |= RS_REACHABLE;
661 661 val = PyInt_FromSsize_t(i);
662 662 if (val == NULL)
663 663 goto bail;
664 664 r = PyList_Append(reachable, val);
665 665 Py_DECREF(val);
666 666 if (r < 0)
667 667 goto bail;
668 668 }
669 669 }
670 670 }
671 671
672 672 free(revstates);
673 673 free(tovisit);
674 674 return reachable;
675 675 bail:
676 676 Py_XDECREF(reachable);
677 677 free(revstates);
678 678 free(tovisit);
679 679 return NULL;
680 680 }
681 681
682 682 static int add_roots_get_min(indexObject *self, PyObject *roots, char *phases,
683 683 char phase)
684 684 {
685 685 Py_ssize_t len = index_length(self);
686 686 PyObject *item;
687 687 PyObject *iterator;
688 688 int rev, minrev = -1;
689 689 char *node;
690 690
691 691 if (!PySet_Check(roots)) {
692 692 PyErr_SetString(PyExc_TypeError,
693 693 "roots must be a set of nodes");
694 694 return -2;
695 695 }
696 696 iterator = PyObject_GetIter(roots);
697 697 if (iterator == NULL)
698 698 return -2;
699 699 while ((item = PyIter_Next(iterator))) {
700 700 if (node_check(item, &node) == -1)
701 701 goto failed;
702 702 rev = index_find_node(self, node, 20);
703 703 /* null is implicitly public, so negative is invalid */
704 704 if (rev < 0 || rev >= len)
705 705 goto failed;
706 706 phases[rev] = phase;
707 707 if (minrev == -1 || minrev > rev)
708 708 minrev = rev;
709 709 Py_DECREF(item);
710 710 }
711 711 Py_DECREF(iterator);
712 712 return minrev;
713 713 failed:
714 714 Py_DECREF(iterator);
715 715 Py_DECREF(item);
716 716 return -2;
717 717 }
718 718
719 719 static PyObject *compute_phases_map_sets(indexObject *self, PyObject *args)
720 720 {
721 721 /* 0: public (untracked), 1: draft, 2: secret, 32: archive,
722 722 96: internal */
723 723 static const char trackedphases[] = {1, 2, 32, 96};
724 724 PyObject *roots = Py_None;
725 725 PyObject *phasesetsdict = NULL;
726 726 PyObject *phasesets[4] = {NULL, NULL, NULL, NULL};
727 727 Py_ssize_t len = index_length(self);
728 728 char *phases = NULL;
729 729 int minphaserev = -1, rev, i;
730 730 const int numphases = (int)(sizeof(phasesets) / sizeof(phasesets[0]));
731 731
732 732 if (!PyArg_ParseTuple(args, "O", &roots))
733 733 return NULL;
734 734 if (roots == NULL || !PyDict_Check(roots)) {
735 735 PyErr_SetString(PyExc_TypeError, "roots must be a dictionary");
736 736 return NULL;
737 737 }
738 738
739 739 phases = calloc(len, 1);
740 740 if (phases == NULL) {
741 741 PyErr_NoMemory();
742 742 return NULL;
743 743 }
744 744
745 745 for (i = 0; i < numphases; ++i) {
746 746 PyObject *pyphase = PyInt_FromLong(trackedphases[i]);
747 747 PyObject *phaseroots = NULL;
748 748 if (pyphase == NULL)
749 749 goto release;
750 750 phaseroots = PyDict_GetItem(roots, pyphase);
751 751 Py_DECREF(pyphase);
752 752 if (phaseroots == NULL)
753 753 continue;
754 754 rev = add_roots_get_min(self, phaseroots, phases,
755 755 trackedphases[i]);
756 756 if (rev == -2)
757 757 goto release;
758 758 if (rev != -1 && (minphaserev == -1 || rev < minphaserev))
759 759 minphaserev = rev;
760 760 }
761 761
762 762 for (i = 0; i < numphases; ++i) {
763 763 phasesets[i] = PySet_New(NULL);
764 764 if (phasesets[i] == NULL)
765 765 goto release;
766 766 }
767 767
768 768 if (minphaserev == -1)
769 769 minphaserev = len;
770 770 for (rev = minphaserev; rev < len; ++rev) {
771 771 PyObject *pyphase = NULL;
772 772 PyObject *pyrev = NULL;
773 773 int parents[2];
774 774 /*
775 775 * The parent lookup could be skipped for phaseroots, but
776 776 * phase --force would historically not recompute them
777 777 * correctly, leaving descendents with a lower phase around.
778 778 * As such, unconditionally recompute the phase.
779 779 */
780 780 if (index_get_parents(self, rev, parents, (int)len - 1) < 0)
781 781 goto release;
782 782 set_phase_from_parents(phases, parents[0], parents[1], rev);
783 783 switch (phases[rev]) {
784 784 case 0:
785 785 continue;
786 786 case 1:
787 787 pyphase = phasesets[0];
788 788 break;
789 789 case 2:
790 790 pyphase = phasesets[1];
791 791 break;
792 792 case 32:
793 793 pyphase = phasesets[2];
794 794 break;
795 795 case 96:
796 796 pyphase = phasesets[3];
797 797 break;
798 798 default:
799 799 /* this should never happen since the phase number is
800 800 * specified by this function. */
801 801 PyErr_SetString(PyExc_SystemError,
802 802 "bad phase number in internal list");
803 803 goto release;
804 804 }
805 805 pyrev = PyInt_FromLong(rev);
806 806 if (pyrev == NULL)
807 807 goto release;
808 808 if (PySet_Add(pyphase, pyrev) == -1) {
809 809 Py_DECREF(pyrev);
810 810 goto release;
811 811 }
812 812 Py_DECREF(pyrev);
813 813 }
814 814
815 815 phasesetsdict = _dict_new_presized(numphases);
816 816 if (phasesetsdict == NULL)
817 817 goto release;
818 818 for (i = 0; i < numphases; ++i) {
819 819 PyObject *pyphase = PyInt_FromLong(trackedphases[i]);
820 820 if (pyphase == NULL)
821 821 goto release;
822 822 if (PyDict_SetItem(phasesetsdict, pyphase, phasesets[i]) ==
823 823 -1) {
824 824 Py_DECREF(pyphase);
825 825 goto release;
826 826 }
827 827 Py_DECREF(phasesets[i]);
828 828 phasesets[i] = NULL;
829 829 }
830 830
831 831 return Py_BuildValue("nN", len, phasesetsdict);
832 832
833 833 release:
834 834 for (i = 0; i < numphases; ++i)
835 835 Py_XDECREF(phasesets[i]);
836 836 Py_XDECREF(phasesetsdict);
837 837
838 838 free(phases);
839 839 return NULL;
840 840 }
841 841
842 842 static PyObject *index_headrevs(indexObject *self, PyObject *args)
843 843 {
844 844 Py_ssize_t i, j, len;
845 845 char *nothead = NULL;
846 846 PyObject *heads = NULL;
847 847 PyObject *filter = NULL;
848 848 PyObject *filteredrevs = Py_None;
849 849
850 850 if (!PyArg_ParseTuple(args, "|O", &filteredrevs)) {
851 851 return NULL;
852 852 }
853 853
854 854 if (self->headrevs && filteredrevs == self->filteredrevs)
855 855 return list_copy(self->headrevs);
856 856
857 857 Py_DECREF(self->filteredrevs);
858 858 self->filteredrevs = filteredrevs;
859 859 Py_INCREF(filteredrevs);
860 860
861 861 if (filteredrevs != Py_None) {
862 862 filter = PyObject_GetAttrString(filteredrevs, "__contains__");
863 863 if (!filter) {
864 864 PyErr_SetString(
865 865 PyExc_TypeError,
866 866 "filteredrevs has no attribute __contains__");
867 867 goto bail;
868 868 }
869 869 }
870 870
871 871 len = index_length(self);
872 872 heads = PyList_New(0);
873 873 if (heads == NULL)
874 874 goto bail;
875 875 if (len == 0) {
876 876 PyObject *nullid = PyInt_FromLong(-1);
877 877 if (nullid == NULL || PyList_Append(heads, nullid) == -1) {
878 878 Py_XDECREF(nullid);
879 879 goto bail;
880 880 }
881 881 goto done;
882 882 }
883 883
884 884 nothead = calloc(len, 1);
885 885 if (nothead == NULL) {
886 886 PyErr_NoMemory();
887 887 goto bail;
888 888 }
889 889
890 890 for (i = len - 1; i >= 0; i--) {
891 891 int isfiltered;
892 892 int parents[2];
893 893
894 894 /* If nothead[i] == 1, it means we've seen an unfiltered child
895 895 * of this node already, and therefore this node is not
896 896 * filtered. So we can skip the expensive check_filter step.
897 897 */
898 898 if (nothead[i] != 1) {
899 899 isfiltered = check_filter(filter, i);
900 900 if (isfiltered == -1) {
901 901 PyErr_SetString(PyExc_TypeError,
902 902 "unable to check filter");
903 903 goto bail;
904 904 }
905 905
906 906 if (isfiltered) {
907 907 nothead[i] = 1;
908 908 continue;
909 909 }
910 910 }
911 911
912 912 if (index_get_parents(self, i, parents, (int)len - 1) < 0)
913 913 goto bail;
914 914 for (j = 0; j < 2; j++) {
915 915 if (parents[j] >= 0)
916 916 nothead[parents[j]] = 1;
917 917 }
918 918 }
919 919
920 920 for (i = 0; i < len; i++) {
921 921 PyObject *head;
922 922
923 923 if (nothead[i])
924 924 continue;
925 925 head = PyInt_FromSsize_t(i);
926 926 if (head == NULL || PyList_Append(heads, head) == -1) {
927 927 Py_XDECREF(head);
928 928 goto bail;
929 929 }
930 930 }
931 931
932 932 done:
933 933 self->headrevs = heads;
934 934 Py_XDECREF(filter);
935 935 free(nothead);
936 936 return list_copy(self->headrevs);
937 937 bail:
938 938 Py_XDECREF(filter);
939 939 Py_XDECREF(heads);
940 940 free(nothead);
941 941 return NULL;
942 942 }
943 943
944 944 /**
945 945 * Obtain the base revision index entry.
946 946 *
947 947 * Callers must ensure that rev >= 0 or illegal memory access may occur.
948 948 */
949 949 static inline int index_baserev(indexObject *self, int rev)
950 950 {
951 951 const char *data;
952 952 int result;
953 953
954 954 data = index_deref(self, rev);
955 955 if (data == NULL)
956 956 return -2;
957 957 result = getbe32(data + 16);
958 958
959 959 if (result > rev) {
960 960 PyErr_Format(
961 961 PyExc_ValueError,
962 962 "corrupted revlog, revision base above revision: %d, %d",
963 963 rev, result);
964 964 return -2;
965 965 }
966 966 if (result < -1) {
967 967 PyErr_Format(
968 968 PyExc_ValueError,
969 969 "corrupted revlog, revision base out of range: %d, %d", rev,
970 970 result);
971 971 return -2;
972 972 }
973 973 return result;
974 974 }
975 975
976 976 /**
977 977 * Find if a revision is a snapshot or not
978 978 *
979 979 * Only relevant for sparse-revlog case.
980 980 * Callers must ensure that rev is in a valid range.
981 981 */
982 982 static int index_issnapshotrev(indexObject *self, Py_ssize_t rev)
983 983 {
984 984 int ps[2];
985 985 Py_ssize_t base;
986 986 while (rev >= 0) {
987 987 base = (Py_ssize_t)index_baserev(self, rev);
988 988 if (base == rev) {
989 989 base = -1;
990 990 }
991 991 if (base == -2) {
992 992 assert(PyErr_Occurred());
993 993 return -1;
994 994 }
995 995 if (base == -1) {
996 996 return 1;
997 997 }
998 998 if (index_get_parents(self, rev, ps, (int)rev) < 0) {
999 999 assert(PyErr_Occurred());
1000 1000 return -1;
1001 1001 };
1002 1002 if (base == ps[0] || base == ps[1]) {
1003 1003 return 0;
1004 1004 }
1005 1005 rev = base;
1006 1006 }
1007 1007 return rev == -1;
1008 1008 }
1009 1009
1010 1010 static PyObject *index_issnapshot(indexObject *self, PyObject *value)
1011 1011 {
1012 1012 long rev;
1013 1013 int issnap;
1014 1014 Py_ssize_t length = index_length(self);
1015 1015
1016 1016 if (!pylong_to_long(value, &rev)) {
1017 1017 return NULL;
1018 1018 }
1019 1019 if (rev < -1 || rev >= length) {
1020 1020 PyErr_Format(PyExc_ValueError, "revlog index out of range: %ld",
1021 1021 rev);
1022 1022 return NULL;
1023 1023 };
1024 1024 issnap = index_issnapshotrev(self, (Py_ssize_t)rev);
1025 1025 if (issnap < 0) {
1026 1026 return NULL;
1027 1027 };
1028 1028 return PyBool_FromLong((long)issnap);
1029 1029 }
1030 1030
1031 1031 static PyObject *index_findsnapshots(indexObject *self, PyObject *args)
1032 1032 {
1033 1033 Py_ssize_t start_rev;
1034 1034 PyObject *cache;
1035 1035 Py_ssize_t base;
1036 1036 Py_ssize_t rev;
1037 1037 PyObject *key = NULL;
1038 1038 PyObject *value = NULL;
1039 1039 const Py_ssize_t length = index_length(self);
1040 1040 if (!PyArg_ParseTuple(args, "O!n", &PyDict_Type, &cache, &start_rev)) {
1041 1041 return NULL;
1042 1042 }
1043 1043 for (rev = start_rev; rev < length; rev++) {
1044 1044 int issnap;
1045 1045 PyObject *allvalues = NULL;
1046 1046 issnap = index_issnapshotrev(self, rev);
1047 1047 if (issnap < 0) {
1048 1048 goto bail;
1049 1049 }
1050 1050 if (issnap == 0) {
1051 1051 continue;
1052 1052 }
1053 1053 base = (Py_ssize_t)index_baserev(self, rev);
1054 1054 if (base == rev) {
1055 1055 base = -1;
1056 1056 }
1057 1057 if (base == -2) {
1058 1058 assert(PyErr_Occurred());
1059 1059 goto bail;
1060 1060 }
1061 1061 key = PyInt_FromSsize_t(base);
1062 1062 allvalues = PyDict_GetItem(cache, key);
1063 1063 if (allvalues == NULL && PyErr_Occurred()) {
1064 1064 goto bail;
1065 1065 }
1066 1066 if (allvalues == NULL) {
1067 1067 int r;
1068 1068 allvalues = PyList_New(0);
1069 1069 if (!allvalues) {
1070 1070 goto bail;
1071 1071 }
1072 1072 r = PyDict_SetItem(cache, key, allvalues);
1073 1073 Py_DECREF(allvalues);
1074 1074 if (r < 0) {
1075 1075 goto bail;
1076 1076 }
1077 1077 }
1078 1078 value = PyInt_FromSsize_t(rev);
1079 1079 if (PyList_Append(allvalues, value)) {
1080 1080 goto bail;
1081 1081 }
1082 1082 Py_CLEAR(key);
1083 1083 Py_CLEAR(value);
1084 1084 }
1085 1085 Py_RETURN_NONE;
1086 1086 bail:
1087 1087 Py_XDECREF(key);
1088 1088 Py_XDECREF(value);
1089 1089 return NULL;
1090 1090 }
1091 1091
1092 1092 static PyObject *index_deltachain(indexObject *self, PyObject *args)
1093 1093 {
1094 1094 int rev, generaldelta;
1095 1095 PyObject *stoparg;
1096 1096 int stoprev, iterrev, baserev = -1;
1097 1097 int stopped;
1098 1098 PyObject *chain = NULL, *result = NULL;
1099 1099 const Py_ssize_t length = index_length(self);
1100 1100
1101 1101 if (!PyArg_ParseTuple(args, "iOi", &rev, &stoparg, &generaldelta)) {
1102 1102 return NULL;
1103 1103 }
1104 1104
1105 1105 if (PyInt_Check(stoparg)) {
1106 1106 stoprev = (int)PyInt_AsLong(stoparg);
1107 1107 if (stoprev == -1 && PyErr_Occurred()) {
1108 1108 return NULL;
1109 1109 }
1110 1110 } else if (stoparg == Py_None) {
1111 1111 stoprev = -2;
1112 1112 } else {
1113 1113 PyErr_SetString(PyExc_ValueError,
1114 1114 "stoprev must be integer or None");
1115 1115 return NULL;
1116 1116 }
1117 1117
1118 1118 if (rev < 0 || rev >= length) {
1119 1119 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
1120 1120 return NULL;
1121 1121 }
1122 1122
1123 1123 chain = PyList_New(0);
1124 1124 if (chain == NULL) {
1125 1125 return NULL;
1126 1126 }
1127 1127
1128 1128 baserev = index_baserev(self, rev);
1129 1129
1130 1130 /* This should never happen. */
1131 1131 if (baserev <= -2) {
1132 1132 /* Error should be set by index_deref() */
1133 1133 assert(PyErr_Occurred());
1134 1134 goto bail;
1135 1135 }
1136 1136
1137 1137 iterrev = rev;
1138 1138
1139 1139 while (iterrev != baserev && iterrev != stoprev) {
1140 1140 PyObject *value = PyInt_FromLong(iterrev);
1141 1141 if (value == NULL) {
1142 1142 goto bail;
1143 1143 }
1144 1144 if (PyList_Append(chain, value)) {
1145 1145 Py_DECREF(value);
1146 1146 goto bail;
1147 1147 }
1148 1148 Py_DECREF(value);
1149 1149
1150 1150 if (generaldelta) {
1151 1151 iterrev = baserev;
1152 1152 } else {
1153 1153 iterrev--;
1154 1154 }
1155 1155
1156 1156 if (iterrev < 0) {
1157 1157 break;
1158 1158 }
1159 1159
1160 1160 if (iterrev >= length) {
1161 1161 PyErr_SetString(PyExc_IndexError,
1162 1162 "revision outside index");
1163 1163 return NULL;
1164 1164 }
1165 1165
1166 1166 baserev = index_baserev(self, iterrev);
1167 1167
1168 1168 /* This should never happen. */
1169 1169 if (baserev <= -2) {
1170 1170 /* Error should be set by index_deref() */
1171 1171 assert(PyErr_Occurred());
1172 1172 goto bail;
1173 1173 }
1174 1174 }
1175 1175
1176 1176 if (iterrev == stoprev) {
1177 1177 stopped = 1;
1178 1178 } else {
1179 1179 PyObject *value = PyInt_FromLong(iterrev);
1180 1180 if (value == NULL) {
1181 1181 goto bail;
1182 1182 }
1183 1183 if (PyList_Append(chain, value)) {
1184 1184 Py_DECREF(value);
1185 1185 goto bail;
1186 1186 }
1187 1187 Py_DECREF(value);
1188 1188
1189 1189 stopped = 0;
1190 1190 }
1191 1191
1192 1192 if (PyList_Reverse(chain)) {
1193 1193 goto bail;
1194 1194 }
1195 1195
1196 1196 result = Py_BuildValue("OO", chain, stopped ? Py_True : Py_False);
1197 1197 Py_DECREF(chain);
1198 1198 return result;
1199 1199
1200 1200 bail:
1201 1201 Py_DECREF(chain);
1202 1202 return NULL;
1203 1203 }
1204 1204
1205 1205 static inline int64_t
1206 1206 index_segment_span(indexObject *self, Py_ssize_t start_rev, Py_ssize_t end_rev)
1207 1207 {
1208 1208 int64_t start_offset;
1209 1209 int64_t end_offset;
1210 1210 int end_size;
1211 1211 start_offset = index_get_start(self, start_rev);
1212 1212 if (start_offset < 0) {
1213 1213 return -1;
1214 1214 }
1215 1215 end_offset = index_get_start(self, end_rev);
1216 1216 if (end_offset < 0) {
1217 1217 return -1;
1218 1218 }
1219 1219 end_size = index_get_length(self, end_rev);
1220 1220 if (end_size < 0) {
1221 1221 return -1;
1222 1222 }
1223 1223 if (end_offset < start_offset) {
1224 1224 PyErr_Format(PyExc_ValueError,
1225 1225 "corrupted revlog index: inconsistent offset "
1226 1226 "between revisions (%zd) and (%zd)",
1227 1227 start_rev, end_rev);
1228 1228 return -1;
1229 1229 }
1230 1230 return (end_offset - start_offset) + (int64_t)end_size;
1231 1231 }
1232 1232
1233 1233 /* returns endidx so that revs[startidx:endidx] has no empty trailing revs */
1234 1234 static Py_ssize_t trim_endidx(indexObject *self, const Py_ssize_t *revs,
1235 1235 Py_ssize_t startidx, Py_ssize_t endidx)
1236 1236 {
1237 1237 int length;
1238 1238 while (endidx > 1 && endidx > startidx) {
1239 1239 length = index_get_length(self, revs[endidx - 1]);
1240 1240 if (length < 0) {
1241 1241 return -1;
1242 1242 }
1243 1243 if (length != 0) {
1244 1244 break;
1245 1245 }
1246 1246 endidx -= 1;
1247 1247 }
1248 1248 return endidx;
1249 1249 }
1250 1250
1251 1251 struct Gap {
1252 1252 int64_t size;
1253 1253 Py_ssize_t idx;
1254 1254 };
1255 1255
1256 1256 static int gap_compare(const void *left, const void *right)
1257 1257 {
1258 1258 const struct Gap *l_left = ((const struct Gap *)left);
1259 1259 const struct Gap *l_right = ((const struct Gap *)right);
1260 1260 if (l_left->size < l_right->size) {
1261 1261 return -1;
1262 1262 } else if (l_left->size > l_right->size) {
1263 1263 return 1;
1264 1264 }
1265 1265 return 0;
1266 1266 }
1267 1267 static int Py_ssize_t_compare(const void *left, const void *right)
1268 1268 {
1269 1269 const Py_ssize_t l_left = *(const Py_ssize_t *)left;
1270 1270 const Py_ssize_t l_right = *(const Py_ssize_t *)right;
1271 1271 if (l_left < l_right) {
1272 1272 return -1;
1273 1273 } else if (l_left > l_right) {
1274 1274 return 1;
1275 1275 }
1276 1276 return 0;
1277 1277 }
1278 1278
1279 1279 static PyObject *index_slicechunktodensity(indexObject *self, PyObject *args)
1280 1280 {
1281 1281 /* method arguments */
1282 1282 PyObject *list_revs = NULL; /* revisions in the chain */
1283 1283 double targetdensity = 0; /* min density to achieve */
1284 1284 Py_ssize_t mingapsize = 0; /* threshold to ignore gaps */
1285 1285
1286 1286 /* other core variables */
1287 1287 Py_ssize_t idxlen = index_length(self);
1288 1288 Py_ssize_t i; /* used for various iteration */
1289 1289 PyObject *result = NULL; /* the final return of the function */
1290 1290
1291 1291 /* generic information about the delta chain being slice */
1292 1292 Py_ssize_t num_revs = 0; /* size of the full delta chain */
1293 1293 Py_ssize_t *revs = NULL; /* native array of revision in the chain */
1294 1294 int64_t chainpayload = 0; /* sum of all delta in the chain */
1295 1295 int64_t deltachainspan = 0; /* distance from first byte to last byte */
1296 1296
1297 1297 /* variable used for slicing the delta chain */
1298 1298 int64_t readdata = 0; /* amount of data currently planned to be read */
1299 1299 double density = 0; /* ration of payload data compared to read ones */
1300 1300 int64_t previous_end;
1301 1301 struct Gap *gaps = NULL; /* array of notable gap in the chain */
1302 1302 Py_ssize_t num_gaps =
1303 1303 0; /* total number of notable gap recorded so far */
1304 1304 Py_ssize_t *selected_indices = NULL; /* indices of gap skipped over */
1305 1305 Py_ssize_t num_selected = 0; /* number of gaps skipped */
1306 1306 PyObject *chunk = NULL; /* individual slice */
1307 1307 PyObject *allchunks = NULL; /* all slices */
1308 1308 Py_ssize_t previdx;
1309 1309
1310 1310 /* parsing argument */
1311 1311 if (!PyArg_ParseTuple(args, "O!dn", &PyList_Type, &list_revs,
1312 1312 &targetdensity, &mingapsize)) {
1313 1313 goto bail;
1314 1314 }
1315 1315
1316 1316 /* If the delta chain contains a single element, we do not need slicing
1317 1317 */
1318 1318 num_revs = PyList_GET_SIZE(list_revs);
1319 1319 if (num_revs <= 1) {
1320 1320 result = PyTuple_Pack(1, list_revs);
1321 1321 goto done;
1322 1322 }
1323 1323
1324 1324 /* Turn the python list into a native integer array (for efficiency) */
1325 1325 revs = (Py_ssize_t *)calloc(num_revs, sizeof(Py_ssize_t));
1326 1326 if (revs == NULL) {
1327 1327 PyErr_NoMemory();
1328 1328 goto bail;
1329 1329 }
1330 1330 for (i = 0; i < num_revs; i++) {
1331 1331 Py_ssize_t revnum = PyInt_AsLong(PyList_GET_ITEM(list_revs, i));
1332 1332 if (revnum == -1 && PyErr_Occurred()) {
1333 1333 goto bail;
1334 1334 }
1335 1335 if (revnum < nullrev || revnum >= idxlen) {
1336 1336 PyErr_Format(PyExc_IndexError,
1337 1337 "index out of range: %zd", revnum);
1338 1338 goto bail;
1339 1339 }
1340 1340 revs[i] = revnum;
1341 1341 }
1342 1342
1343 1343 /* Compute and check various property of the unsliced delta chain */
1344 1344 deltachainspan = index_segment_span(self, revs[0], revs[num_revs - 1]);
1345 1345 if (deltachainspan < 0) {
1346 1346 goto bail;
1347 1347 }
1348 1348
1349 1349 if (deltachainspan <= mingapsize) {
1350 1350 result = PyTuple_Pack(1, list_revs);
1351 1351 goto done;
1352 1352 }
1353 1353 chainpayload = 0;
1354 1354 for (i = 0; i < num_revs; i++) {
1355 1355 int tmp = index_get_length(self, revs[i]);
1356 1356 if (tmp < 0) {
1357 1357 goto bail;
1358 1358 }
1359 1359 chainpayload += tmp;
1360 1360 }
1361 1361
1362 1362 readdata = deltachainspan;
1363 1363 density = 1.0;
1364 1364
1365 1365 if (0 < deltachainspan) {
1366 1366 density = (double)chainpayload / (double)deltachainspan;
1367 1367 }
1368 1368
1369 1369 if (density >= targetdensity) {
1370 1370 result = PyTuple_Pack(1, list_revs);
1371 1371 goto done;
1372 1372 }
1373 1373
1374 1374 /* if chain is too sparse, look for relevant gaps */
1375 1375 gaps = (struct Gap *)calloc(num_revs, sizeof(struct Gap));
1376 1376 if (gaps == NULL) {
1377 1377 PyErr_NoMemory();
1378 1378 goto bail;
1379 1379 }
1380 1380
1381 1381 previous_end = -1;
1382 1382 for (i = 0; i < num_revs; i++) {
1383 1383 int64_t revstart;
1384 1384 int revsize;
1385 1385 revstart = index_get_start(self, revs[i]);
1386 1386 if (revstart < 0) {
1387 1387 goto bail;
1388 1388 };
1389 1389 revsize = index_get_length(self, revs[i]);
1390 1390 if (revsize < 0) {
1391 1391 goto bail;
1392 1392 };
1393 1393 if (revsize == 0) {
1394 1394 continue;
1395 1395 }
1396 1396 if (previous_end >= 0) {
1397 1397 int64_t gapsize = revstart - previous_end;
1398 1398 if (gapsize > mingapsize) {
1399 1399 gaps[num_gaps].size = gapsize;
1400 1400 gaps[num_gaps].idx = i;
1401 1401 num_gaps += 1;
1402 1402 }
1403 1403 }
1404 1404 previous_end = revstart + revsize;
1405 1405 }
1406 1406 if (num_gaps == 0) {
1407 1407 result = PyTuple_Pack(1, list_revs);
1408 1408 goto done;
1409 1409 }
1410 1410 qsort(gaps, num_gaps, sizeof(struct Gap), &gap_compare);
1411 1411
1412 1412 /* Slice the largest gap first, they improve the density the most */
1413 1413 selected_indices =
1414 1414 (Py_ssize_t *)malloc((num_gaps + 1) * sizeof(Py_ssize_t));
1415 1415 if (selected_indices == NULL) {
1416 1416 PyErr_NoMemory();
1417 1417 goto bail;
1418 1418 }
1419 1419
1420 1420 for (i = num_gaps - 1; i >= 0; i--) {
1421 1421 selected_indices[num_selected] = gaps[i].idx;
1422 1422 readdata -= gaps[i].size;
1423 1423 num_selected += 1;
1424 1424 if (readdata <= 0) {
1425 1425 density = 1.0;
1426 1426 } else {
1427 1427 density = (double)chainpayload / (double)readdata;
1428 1428 }
1429 1429 if (density >= targetdensity) {
1430 1430 break;
1431 1431 }
1432 1432 }
1433 1433 qsort(selected_indices, num_selected, sizeof(Py_ssize_t),
1434 1434 &Py_ssize_t_compare);
1435 1435
1436 1436 /* create the resulting slice */
1437 1437 allchunks = PyList_New(0);
1438 1438 if (allchunks == NULL) {
1439 1439 goto bail;
1440 1440 }
1441 1441 previdx = 0;
1442 1442 selected_indices[num_selected] = num_revs;
1443 1443 for (i = 0; i <= num_selected; i++) {
1444 1444 Py_ssize_t idx = selected_indices[i];
1445 1445 Py_ssize_t endidx = trim_endidx(self, revs, previdx, idx);
1446 1446 if (endidx < 0) {
1447 1447 goto bail;
1448 1448 }
1449 1449 if (previdx < endidx) {
1450 1450 chunk = PyList_GetSlice(list_revs, previdx, endidx);
1451 1451 if (chunk == NULL) {
1452 1452 goto bail;
1453 1453 }
1454 1454 if (PyList_Append(allchunks, chunk) == -1) {
1455 1455 goto bail;
1456 1456 }
1457 1457 Py_DECREF(chunk);
1458 1458 chunk = NULL;
1459 1459 }
1460 1460 previdx = idx;
1461 1461 }
1462 1462 result = allchunks;
1463 1463 goto done;
1464 1464
1465 1465 bail:
1466 1466 Py_XDECREF(allchunks);
1467 1467 Py_XDECREF(chunk);
1468 1468 done:
1469 1469 free(revs);
1470 1470 free(gaps);
1471 1471 free(selected_indices);
1472 1472 return result;
1473 1473 }
1474 1474
1475 1475 static inline int nt_level(const char *node, Py_ssize_t level)
1476 1476 {
1477 1477 int v = node[level >> 1];
1478 1478 if (!(level & 1))
1479 1479 v >>= 4;
1480 1480 return v & 0xf;
1481 1481 }
1482 1482
1483 1483 /*
1484 1484 * Return values:
1485 1485 *
1486 1486 * -4: match is ambiguous (multiple candidates)
1487 1487 * -2: not found
1488 1488 * rest: valid rev
1489 1489 */
1490 1490 static int nt_find(nodetree *self, const char *node, Py_ssize_t nodelen,
1491 1491 int hex)
1492 1492 {
1493 1493 int (*getnybble)(const char *, Py_ssize_t) = hex ? hexdigit : nt_level;
1494 1494 int level, maxlevel, off;
1495 1495
1496 1496 if (nodelen == 20 && node[0] == '\0' && memcmp(node, nullid, 20) == 0)
1497 1497 return -1;
1498 1498
1499 1499 if (hex)
1500 1500 maxlevel = nodelen > 40 ? 40 : (int)nodelen;
1501 1501 else
1502 1502 maxlevel = nodelen > 20 ? 40 : ((int)nodelen * 2);
1503 1503
1504 1504 for (level = off = 0; level < maxlevel; level++) {
1505 1505 int k = getnybble(node, level);
1506 1506 nodetreenode *n = &self->nodes[off];
1507 1507 int v = n->children[k];
1508 1508
1509 1509 if (v < 0) {
1510 1510 const char *n;
1511 1511 Py_ssize_t i;
1512 1512
1513 1513 v = -(v + 2);
1514 1514 n = index_node(self->index, v);
1515 1515 if (n == NULL)
1516 1516 return -2;
1517 1517 for (i = level; i < maxlevel; i++)
1518 1518 if (getnybble(node, i) != nt_level(n, i))
1519 1519 return -2;
1520 1520 return v;
1521 1521 }
1522 1522 if (v == 0)
1523 1523 return -2;
1524 1524 off = v;
1525 1525 }
1526 1526 /* multiple matches against an ambiguous prefix */
1527 1527 return -4;
1528 1528 }
1529 1529
1530 1530 static int nt_new(nodetree *self)
1531 1531 {
1532 1532 if (self->length == self->capacity) {
1533 1533 unsigned newcapacity;
1534 1534 nodetreenode *newnodes;
1535 1535 newcapacity = self->capacity * 2;
1536 1536 if (newcapacity >= INT_MAX / sizeof(nodetreenode)) {
1537 1537 PyErr_SetString(PyExc_MemoryError,
1538 1538 "overflow in nt_new");
1539 1539 return -1;
1540 1540 }
1541 1541 newnodes =
1542 1542 realloc(self->nodes, newcapacity * sizeof(nodetreenode));
1543 1543 if (newnodes == NULL) {
1544 1544 PyErr_SetString(PyExc_MemoryError, "out of memory");
1545 1545 return -1;
1546 1546 }
1547 1547 self->capacity = newcapacity;
1548 1548 self->nodes = newnodes;
1549 1549 memset(&self->nodes[self->length], 0,
1550 1550 sizeof(nodetreenode) * (self->capacity - self->length));
1551 1551 }
1552 1552 return self->length++;
1553 1553 }
1554 1554
1555 1555 static int nt_insert(nodetree *self, const char *node, int rev)
1556 1556 {
1557 1557 int level = 0;
1558 1558 int off = 0;
1559 1559
1560 1560 while (level < 40) {
1561 1561 int k = nt_level(node, level);
1562 1562 nodetreenode *n;
1563 1563 int v;
1564 1564
1565 1565 n = &self->nodes[off];
1566 1566 v = n->children[k];
1567 1567
1568 1568 if (v == 0) {
1569 1569 n->children[k] = -rev - 2;
1570 1570 return 0;
1571 1571 }
1572 1572 if (v < 0) {
1573 1573 const char *oldnode =
1574 1574 index_node_existing(self->index, -(v + 2));
1575 1575 int noff;
1576 1576
1577 1577 if (oldnode == NULL)
1578 1578 return -1;
1579 1579 if (!memcmp(oldnode, node, 20)) {
1580 1580 n->children[k] = -rev - 2;
1581 1581 return 0;
1582 1582 }
1583 1583 noff = nt_new(self);
1584 1584 if (noff == -1)
1585 1585 return -1;
1586 1586 /* self->nodes may have been changed by realloc */
1587 1587 self->nodes[off].children[k] = noff;
1588 1588 off = noff;
1589 1589 n = &self->nodes[off];
1590 1590 n->children[nt_level(oldnode, ++level)] = v;
1591 1591 if (level > self->depth)
1592 1592 self->depth = level;
1593 1593 self->splits += 1;
1594 1594 } else {
1595 1595 level += 1;
1596 1596 off = v;
1597 1597 }
1598 1598 }
1599 1599
1600 1600 return -1;
1601 1601 }
1602 1602
1603 1603 static PyObject *ntobj_insert(nodetreeObject *self, PyObject *args)
1604 1604 {
1605 1605 Py_ssize_t rev;
1606 1606 const char *node;
1607 1607 Py_ssize_t length;
1608 1608 if (!PyArg_ParseTuple(args, "n", &rev))
1609 1609 return NULL;
1610 1610 length = index_length(self->nt.index);
1611 1611 if (rev < 0 || rev >= length) {
1612 1612 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
1613 1613 return NULL;
1614 1614 }
1615 1615 node = index_node_existing(self->nt.index, rev);
1616 1616 if (nt_insert(&self->nt, node, (int)rev) == -1)
1617 1617 return NULL;
1618 1618 Py_RETURN_NONE;
1619 1619 }
1620 1620
1621 1621 static int nt_delete_node(nodetree *self, const char *node)
1622 1622 {
1623 1623 /* rev==-2 happens to get encoded as 0, which is interpreted as not set
1624 1624 */
1625 1625 return nt_insert(self, node, -2);
1626 1626 }
1627 1627
1628 1628 static int nt_init(nodetree *self, indexObject *index, unsigned capacity)
1629 1629 {
1630 1630 /* Initialize before overflow-checking to avoid nt_dealloc() crash. */
1631 1631 self->nodes = NULL;
1632 1632
1633 1633 self->index = index;
1634 1634 /* The input capacity is in terms of revisions, while the field is in
1635 1635 * terms of nodetree nodes. */
1636 1636 self->capacity = (capacity < 4 ? 4 : capacity / 2);
1637 1637 self->depth = 0;
1638 1638 self->splits = 0;
1639 1639 if ((size_t)self->capacity > INT_MAX / sizeof(nodetreenode)) {
1640 1640 PyErr_SetString(PyExc_ValueError, "overflow in init_nt");
1641 1641 return -1;
1642 1642 }
1643 1643 self->nodes = calloc(self->capacity, sizeof(nodetreenode));
1644 1644 if (self->nodes == NULL) {
1645 1645 PyErr_NoMemory();
1646 1646 return -1;
1647 1647 }
1648 1648 self->length = 1;
1649 1649 return 0;
1650 1650 }
1651 1651
1652 1652 static int ntobj_init(nodetreeObject *self, PyObject *args)
1653 1653 {
1654 1654 PyObject *index;
1655 1655 unsigned capacity;
1656 1656 if (!PyArg_ParseTuple(args, "O!I", &HgRevlogIndex_Type, &index,
1657 1657 &capacity))
1658 1658 return -1;
1659 1659 Py_INCREF(index);
1660 1660 return nt_init(&self->nt, (indexObject *)index, capacity);
1661 1661 }
1662 1662
1663 1663 static int nt_partialmatch(nodetree *self, const char *node, Py_ssize_t nodelen)
1664 1664 {
1665 1665 return nt_find(self, node, nodelen, 1);
1666 1666 }
1667 1667
1668 1668 /*
1669 1669 * Find the length of the shortest unique prefix of node.
1670 1670 *
1671 1671 * Return values:
1672 1672 *
1673 1673 * -3: error (exception set)
1674 1674 * -2: not found (no exception set)
1675 1675 * rest: length of shortest prefix
1676 1676 */
1677 1677 static int nt_shortest(nodetree *self, const char *node)
1678 1678 {
1679 1679 int level, off;
1680 1680
1681 1681 for (level = off = 0; level < 40; level++) {
1682 1682 int k, v;
1683 1683 nodetreenode *n = &self->nodes[off];
1684 1684 k = nt_level(node, level);
1685 1685 v = n->children[k];
1686 1686 if (v < 0) {
1687 1687 const char *n;
1688 1688 v = -(v + 2);
1689 1689 n = index_node_existing(self->index, v);
1690 1690 if (n == NULL)
1691 1691 return -3;
1692 1692 if (memcmp(node, n, 20) != 0)
1693 1693 /*
1694 1694 * Found a unique prefix, but it wasn't for the
1695 1695 * requested node (i.e the requested node does
1696 1696 * not exist).
1697 1697 */
1698 1698 return -2;
1699 1699 return level + 1;
1700 1700 }
1701 1701 if (v == 0)
1702 1702 return -2;
1703 1703 off = v;
1704 1704 }
1705 1705 /*
1706 1706 * The node was still not unique after 40 hex digits, so this won't
1707 1707 * happen. Also, if we get here, then there's a programming error in
1708 1708 * this file that made us insert a node longer than 40 hex digits.
1709 1709 */
1710 1710 PyErr_SetString(PyExc_Exception, "broken node tree");
1711 1711 return -3;
1712 1712 }
1713 1713
1714 1714 static PyObject *ntobj_shortest(nodetreeObject *self, PyObject *args)
1715 1715 {
1716 1716 PyObject *val;
1717 1717 char *node;
1718 1718 int length;
1719 1719
1720 1720 if (!PyArg_ParseTuple(args, "O", &val))
1721 1721 return NULL;
1722 1722 if (node_check(val, &node) == -1)
1723 1723 return NULL;
1724 1724
1725 1725 length = nt_shortest(&self->nt, node);
1726 1726 if (length == -3)
1727 1727 return NULL;
1728 1728 if (length == -2) {
1729 1729 raise_revlog_error();
1730 1730 return NULL;
1731 1731 }
1732 1732 return PyInt_FromLong(length);
1733 1733 }
1734 1734
1735 1735 static void nt_dealloc(nodetree *self)
1736 1736 {
1737 1737 free(self->nodes);
1738 1738 self->nodes = NULL;
1739 1739 }
1740 1740
1741 1741 static void ntobj_dealloc(nodetreeObject *self)
1742 1742 {
1743 1743 Py_XDECREF(self->nt.index);
1744 1744 nt_dealloc(&self->nt);
1745 1745 PyObject_Del(self);
1746 1746 }
1747 1747
1748 1748 static PyMethodDef ntobj_methods[] = {
1749 1749 {"insert", (PyCFunction)ntobj_insert, METH_VARARGS,
1750 1750 "insert an index entry"},
1751 1751 {"shortest", (PyCFunction)ntobj_shortest, METH_VARARGS,
1752 1752 "find length of shortest hex nodeid of a binary ID"},
1753 1753 {NULL} /* Sentinel */
1754 1754 };
1755 1755
1756 1756 static PyTypeObject nodetreeType = {
1757 1757 PyVarObject_HEAD_INIT(NULL, 0) /* header */
1758 1758 "parsers.nodetree", /* tp_name */
1759 1759 sizeof(nodetreeObject), /* tp_basicsize */
1760 1760 0, /* tp_itemsize */
1761 1761 (destructor)ntobj_dealloc, /* tp_dealloc */
1762 1762 0, /* tp_print */
1763 1763 0, /* tp_getattr */
1764 1764 0, /* tp_setattr */
1765 1765 0, /* tp_compare */
1766 1766 0, /* tp_repr */
1767 1767 0, /* tp_as_number */
1768 1768 0, /* tp_as_sequence */
1769 1769 0, /* tp_as_mapping */
1770 1770 0, /* tp_hash */
1771 1771 0, /* tp_call */
1772 1772 0, /* tp_str */
1773 1773 0, /* tp_getattro */
1774 1774 0, /* tp_setattro */
1775 1775 0, /* tp_as_buffer */
1776 1776 Py_TPFLAGS_DEFAULT, /* tp_flags */
1777 1777 "nodetree", /* tp_doc */
1778 1778 0, /* tp_traverse */
1779 1779 0, /* tp_clear */
1780 1780 0, /* tp_richcompare */
1781 1781 0, /* tp_weaklistoffset */
1782 1782 0, /* tp_iter */
1783 1783 0, /* tp_iternext */
1784 1784 ntobj_methods, /* tp_methods */
1785 1785 0, /* tp_members */
1786 1786 0, /* tp_getset */
1787 1787 0, /* tp_base */
1788 1788 0, /* tp_dict */
1789 1789 0, /* tp_descr_get */
1790 1790 0, /* tp_descr_set */
1791 1791 0, /* tp_dictoffset */
1792 1792 (initproc)ntobj_init, /* tp_init */
1793 1793 0, /* tp_alloc */
1794 1794 };
1795 1795
1796 1796 static int index_init_nt(indexObject *self)
1797 1797 {
1798 1798 if (!self->ntinitialized) {
1799 1799 if (nt_init(&self->nt, self, (int)self->length) == -1) {
1800 1800 nt_dealloc(&self->nt);
1801 1801 return -1;
1802 1802 }
1803 1803 if (nt_insert(&self->nt, nullid, -1) == -1) {
1804 1804 nt_dealloc(&self->nt);
1805 1805 return -1;
1806 1806 }
1807 1807 self->ntinitialized = 1;
1808 1808 self->ntrev = (int)index_length(self);
1809 1809 self->ntlookups = 1;
1810 1810 self->ntmisses = 0;
1811 1811 }
1812 1812 return 0;
1813 1813 }
1814 1814
1815 1815 /*
1816 1816 * Return values:
1817 1817 *
1818 1818 * -3: error (exception set)
1819 1819 * -2: not found (no exception set)
1820 1820 * rest: valid rev
1821 1821 */
1822 1822 static int index_find_node(indexObject *self, const char *node,
1823 1823 Py_ssize_t nodelen)
1824 1824 {
1825 1825 int rev;
1826 1826
1827 1827 if (index_init_nt(self) == -1)
1828 1828 return -3;
1829 1829
1830 1830 self->ntlookups++;
1831 1831 rev = nt_find(&self->nt, node, nodelen, 0);
1832 1832 if (rev >= -1)
1833 1833 return rev;
1834 1834
1835 1835 /*
1836 1836 * For the first handful of lookups, we scan the entire index,
1837 1837 * and cache only the matching nodes. This optimizes for cases
1838 1838 * like "hg tip", where only a few nodes are accessed.
1839 1839 *
1840 1840 * After that, we cache every node we visit, using a single
1841 1841 * scan amortized over multiple lookups. This gives the best
1842 1842 * bulk performance, e.g. for "hg log".
1843 1843 */
1844 1844 if (self->ntmisses++ < 4) {
1845 1845 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1846 1846 const char *n = index_node_existing(self, rev);
1847 1847 if (n == NULL)
1848 1848 return -3;
1849 1849 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1850 1850 if (nt_insert(&self->nt, n, rev) == -1)
1851 1851 return -3;
1852 1852 break;
1853 1853 }
1854 1854 }
1855 1855 } else {
1856 1856 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1857 1857 const char *n = index_node_existing(self, rev);
1858 1858 if (n == NULL)
1859 1859 return -3;
1860 1860 if (nt_insert(&self->nt, n, rev) == -1) {
1861 1861 self->ntrev = rev + 1;
1862 1862 return -3;
1863 1863 }
1864 1864 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1865 1865 break;
1866 1866 }
1867 1867 }
1868 1868 self->ntrev = rev;
1869 1869 }
1870 1870
1871 1871 if (rev >= 0)
1872 1872 return rev;
1873 1873 return -2;
1874 1874 }
1875 1875
1876 1876 static PyObject *index_getitem(indexObject *self, PyObject *value)
1877 1877 {
1878 1878 char *node;
1879 1879 int rev;
1880 1880
1881 1881 if (PyInt_Check(value)) {
1882 1882 long idx;
1883 1883 if (!pylong_to_long(value, &idx)) {
1884 1884 return NULL;
1885 1885 }
1886 1886 return index_get(self, idx);
1887 1887 }
1888 1888
1889 1889 if (node_check(value, &node) == -1)
1890 1890 return NULL;
1891 1891 rev = index_find_node(self, node, 20);
1892 1892 if (rev >= -1)
1893 1893 return PyInt_FromLong(rev);
1894 1894 if (rev == -2)
1895 1895 raise_revlog_error();
1896 1896 return NULL;
1897 1897 }
1898 1898
1899 1899 /*
1900 1900 * Fully populate the radix tree.
1901 1901 */
1902 1902 static int index_populate_nt(indexObject *self)
1903 1903 {
1904 1904 int rev;
1905 1905 if (self->ntrev > 0) {
1906 1906 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1907 1907 const char *n = index_node_existing(self, rev);
1908 1908 if (n == NULL)
1909 1909 return -1;
1910 1910 if (nt_insert(&self->nt, n, rev) == -1)
1911 1911 return -1;
1912 1912 }
1913 1913 self->ntrev = -1;
1914 1914 }
1915 1915 return 0;
1916 1916 }
1917 1917
1918 1918 static PyObject *index_partialmatch(indexObject *self, PyObject *args)
1919 1919 {
1920 1920 const char *fullnode;
1921 1921 Py_ssize_t nodelen;
1922 1922 char *node;
1923 1923 int rev, i;
1924 1924
1925 1925 if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &node, &nodelen))
1926 1926 return NULL;
1927 1927
1928 1928 if (nodelen < 1) {
1929 1929 PyErr_SetString(PyExc_ValueError, "key too short");
1930 1930 return NULL;
1931 1931 }
1932 1932
1933 1933 if (nodelen > 40) {
1934 1934 PyErr_SetString(PyExc_ValueError, "key too long");
1935 1935 return NULL;
1936 1936 }
1937 1937
1938 1938 for (i = 0; i < nodelen; i++)
1939 1939 hexdigit(node, i);
1940 1940 if (PyErr_Occurred()) {
1941 1941 /* input contains non-hex characters */
1942 1942 PyErr_Clear();
1943 1943 Py_RETURN_NONE;
1944 1944 }
1945 1945
1946 1946 if (index_init_nt(self) == -1)
1947 1947 return NULL;
1948 1948 if (index_populate_nt(self) == -1)
1949 1949 return NULL;
1950 1950 rev = nt_partialmatch(&self->nt, node, nodelen);
1951 1951
1952 1952 switch (rev) {
1953 1953 case -4:
1954 1954 raise_revlog_error();
1955 1955 return NULL;
1956 1956 case -2:
1957 1957 Py_RETURN_NONE;
1958 1958 case -1:
1959 1959 return PyBytes_FromStringAndSize(nullid, 20);
1960 1960 }
1961 1961
1962 1962 fullnode = index_node_existing(self, rev);
1963 1963 if (fullnode == NULL) {
1964 1964 return NULL;
1965 1965 }
1966 1966 return PyBytes_FromStringAndSize(fullnode, 20);
1967 1967 }
1968 1968
1969 1969 static PyObject *index_shortest(indexObject *self, PyObject *args)
1970 1970 {
1971 1971 PyObject *val;
1972 1972 char *node;
1973 1973 int length;
1974 1974
1975 1975 if (!PyArg_ParseTuple(args, "O", &val))
1976 1976 return NULL;
1977 1977 if (node_check(val, &node) == -1)
1978 1978 return NULL;
1979 1979
1980 1980 self->ntlookups++;
1981 1981 if (index_init_nt(self) == -1)
1982 1982 return NULL;
1983 1983 if (index_populate_nt(self) == -1)
1984 1984 return NULL;
1985 1985 length = nt_shortest(&self->nt, node);
1986 1986 if (length == -3)
1987 1987 return NULL;
1988 1988 if (length == -2) {
1989 1989 raise_revlog_error();
1990 1990 return NULL;
1991 1991 }
1992 1992 return PyInt_FromLong(length);
1993 1993 }
1994 1994
1995 1995 static PyObject *index_m_get(indexObject *self, PyObject *args)
1996 1996 {
1997 1997 PyObject *val;
1998 1998 char *node;
1999 1999 int rev;
2000 2000
2001 2001 if (!PyArg_ParseTuple(args, "O", &val))
2002 2002 return NULL;
2003 2003 if (node_check(val, &node) == -1)
2004 2004 return NULL;
2005 2005 rev = index_find_node(self, node, 20);
2006 2006 if (rev == -3)
2007 2007 return NULL;
2008 2008 if (rev == -2)
2009 2009 Py_RETURN_NONE;
2010 2010 return PyInt_FromLong(rev);
2011 2011 }
2012 2012
2013 2013 static int index_contains(indexObject *self, PyObject *value)
2014 2014 {
2015 2015 char *node;
2016 2016
2017 2017 if (PyInt_Check(value)) {
2018 2018 long rev;
2019 2019 if (!pylong_to_long(value, &rev)) {
2020 2020 return -1;
2021 2021 }
2022 2022 return rev >= -1 && rev < index_length(self);
2023 2023 }
2024 2024
2025 2025 if (node_check(value, &node) == -1)
2026 2026 return -1;
2027 2027
2028 2028 switch (index_find_node(self, node, 20)) {
2029 2029 case -3:
2030 2030 return -1;
2031 2031 case -2:
2032 2032 return 0;
2033 2033 default:
2034 2034 return 1;
2035 2035 }
2036 2036 }
2037 2037
2038 2038 static PyObject *index_m_has_node(indexObject *self, PyObject *args)
2039 2039 {
2040 2040 int ret = index_contains(self, args);
2041 2041 if (ret < 0)
2042 2042 return NULL;
2043 2043 return PyBool_FromLong((long)ret);
2044 2044 }
2045 2045
2046 2046 static PyObject *index_m_rev(indexObject *self, PyObject *val)
2047 2047 {
2048 2048 char *node;
2049 2049 int rev;
2050 2050
2051 2051 if (node_check(val, &node) == -1)
2052 2052 return NULL;
2053 2053 rev = index_find_node(self, node, 20);
2054 2054 if (rev >= -1)
2055 2055 return PyInt_FromLong(rev);
2056 2056 if (rev == -2)
2057 2057 raise_revlog_error();
2058 2058 return NULL;
2059 2059 }
2060 2060
2061 2061 typedef uint64_t bitmask;
2062 2062
2063 2063 /*
2064 2064 * Given a disjoint set of revs, return all candidates for the
2065 2065 * greatest common ancestor. In revset notation, this is the set
2066 2066 * "heads(::a and ::b and ...)"
2067 2067 */
2068 2068 static PyObject *find_gca_candidates(indexObject *self, const int *revs,
2069 2069 int revcount)
2070 2070 {
2071 2071 const bitmask allseen = (1ull << revcount) - 1;
2072 2072 const bitmask poison = 1ull << revcount;
2073 2073 PyObject *gca = PyList_New(0);
2074 2074 int i, v, interesting;
2075 2075 int maxrev = -1;
2076 2076 bitmask sp;
2077 2077 bitmask *seen;
2078 2078
2079 2079 if (gca == NULL)
2080 2080 return PyErr_NoMemory();
2081 2081
2082 2082 for (i = 0; i < revcount; i++) {
2083 2083 if (revs[i] > maxrev)
2084 2084 maxrev = revs[i];
2085 2085 }
2086 2086
2087 2087 seen = calloc(sizeof(*seen), maxrev + 1);
2088 2088 if (seen == NULL) {
2089 2089 Py_DECREF(gca);
2090 2090 return PyErr_NoMemory();
2091 2091 }
2092 2092
2093 2093 for (i = 0; i < revcount; i++)
2094 2094 seen[revs[i]] = 1ull << i;
2095 2095
2096 2096 interesting = revcount;
2097 2097
2098 2098 for (v = maxrev; v >= 0 && interesting; v--) {
2099 2099 bitmask sv = seen[v];
2100 2100 int parents[2];
2101 2101
2102 2102 if (!sv)
2103 2103 continue;
2104 2104
2105 2105 if (sv < poison) {
2106 2106 interesting -= 1;
2107 2107 if (sv == allseen) {
2108 2108 PyObject *obj = PyInt_FromLong(v);
2109 2109 if (obj == NULL)
2110 2110 goto bail;
2111 2111 if (PyList_Append(gca, obj) == -1) {
2112 2112 Py_DECREF(obj);
2113 2113 goto bail;
2114 2114 }
2115 2115 sv |= poison;
2116 2116 for (i = 0; i < revcount; i++) {
2117 2117 if (revs[i] == v)
2118 2118 goto done;
2119 2119 }
2120 2120 }
2121 2121 }
2122 2122 if (index_get_parents(self, v, parents, maxrev) < 0)
2123 2123 goto bail;
2124 2124
2125 2125 for (i = 0; i < 2; i++) {
2126 2126 int p = parents[i];
2127 2127 if (p == -1)
2128 2128 continue;
2129 2129 sp = seen[p];
2130 2130 if (sv < poison) {
2131 2131 if (sp == 0) {
2132 2132 seen[p] = sv;
2133 2133 interesting++;
2134 2134 } else if (sp != sv)
2135 2135 seen[p] |= sv;
2136 2136 } else {
2137 2137 if (sp && sp < poison)
2138 2138 interesting--;
2139 2139 seen[p] = sv;
2140 2140 }
2141 2141 }
2142 2142 }
2143 2143
2144 2144 done:
2145 2145 free(seen);
2146 2146 return gca;
2147 2147 bail:
2148 2148 free(seen);
2149 2149 Py_XDECREF(gca);
2150 2150 return NULL;
2151 2151 }
2152 2152
2153 2153 /*
2154 2154 * Given a disjoint set of revs, return the subset with the longest
2155 2155 * path to the root.
2156 2156 */
2157 2157 static PyObject *find_deepest(indexObject *self, PyObject *revs)
2158 2158 {
2159 2159 const Py_ssize_t revcount = PyList_GET_SIZE(revs);
2160 2160 static const Py_ssize_t capacity = 24;
2161 2161 int *depth, *interesting = NULL;
2162 2162 int i, j, v, ninteresting;
2163 2163 PyObject *dict = NULL, *keys = NULL;
2164 2164 long *seen = NULL;
2165 2165 int maxrev = -1;
2166 2166 long final;
2167 2167
2168 2168 if (revcount > capacity) {
2169 2169 PyErr_Format(PyExc_OverflowError,
2170 2170 "bitset size (%ld) > capacity (%ld)",
2171 2171 (long)revcount, (long)capacity);
2172 2172 return NULL;
2173 2173 }
2174 2174
2175 2175 for (i = 0; i < revcount; i++) {
2176 2176 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
2177 2177 if (n > maxrev)
2178 2178 maxrev = n;
2179 2179 }
2180 2180
2181 2181 depth = calloc(sizeof(*depth), maxrev + 1);
2182 2182 if (depth == NULL)
2183 2183 return PyErr_NoMemory();
2184 2184
2185 2185 seen = calloc(sizeof(*seen), maxrev + 1);
2186 2186 if (seen == NULL) {
2187 2187 PyErr_NoMemory();
2188 2188 goto bail;
2189 2189 }
2190 2190
2191 2191 interesting = calloc(sizeof(*interesting), ((size_t)1) << revcount);
2192 2192 if (interesting == NULL) {
2193 2193 PyErr_NoMemory();
2194 2194 goto bail;
2195 2195 }
2196 2196
2197 2197 if (PyList_Sort(revs) == -1)
2198 2198 goto bail;
2199 2199
2200 2200 for (i = 0; i < revcount; i++) {
2201 2201 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
2202 2202 long b = 1l << i;
2203 2203 depth[n] = 1;
2204 2204 seen[n] = b;
2205 2205 interesting[b] = 1;
2206 2206 }
2207 2207
2208 2208 /* invariant: ninteresting is the number of non-zero entries in
2209 2209 * interesting. */
2210 2210 ninteresting = (int)revcount;
2211 2211
2212 2212 for (v = maxrev; v >= 0 && ninteresting > 1; v--) {
2213 2213 int dv = depth[v];
2214 2214 int parents[2];
2215 2215 long sv;
2216 2216
2217 2217 if (dv == 0)
2218 2218 continue;
2219 2219
2220 2220 sv = seen[v];
2221 2221 if (index_get_parents(self, v, parents, maxrev) < 0)
2222 2222 goto bail;
2223 2223
2224 2224 for (i = 0; i < 2; i++) {
2225 2225 int p = parents[i];
2226 2226 long sp;
2227 2227 int dp;
2228 2228
2229 2229 if (p == -1)
2230 2230 continue;
2231 2231
2232 2232 dp = depth[p];
2233 2233 sp = seen[p];
2234 2234 if (dp <= dv) {
2235 2235 depth[p] = dv + 1;
2236 2236 if (sp != sv) {
2237 2237 interesting[sv] += 1;
2238 2238 seen[p] = sv;
2239 2239 if (sp) {
2240 2240 interesting[sp] -= 1;
2241 2241 if (interesting[sp] == 0)
2242 2242 ninteresting -= 1;
2243 2243 }
2244 2244 }
2245 2245 } else if (dv == dp - 1) {
2246 2246 long nsp = sp | sv;
2247 2247 if (nsp == sp)
2248 2248 continue;
2249 2249 seen[p] = nsp;
2250 2250 interesting[sp] -= 1;
2251 2251 if (interesting[sp] == 0)
2252 2252 ninteresting -= 1;
2253 2253 if (interesting[nsp] == 0)
2254 2254 ninteresting += 1;
2255 2255 interesting[nsp] += 1;
2256 2256 }
2257 2257 }
2258 2258 interesting[sv] -= 1;
2259 2259 if (interesting[sv] == 0)
2260 2260 ninteresting -= 1;
2261 2261 }
2262 2262
2263 2263 final = 0;
2264 2264 j = ninteresting;
2265 2265 for (i = 0; i < (int)(2 << revcount) && j > 0; i++) {
2266 2266 if (interesting[i] == 0)
2267 2267 continue;
2268 2268 final |= i;
2269 2269 j -= 1;
2270 2270 }
2271 2271 if (final == 0) {
2272 2272 keys = PyList_New(0);
2273 2273 goto bail;
2274 2274 }
2275 2275
2276 2276 dict = PyDict_New();
2277 2277 if (dict == NULL)
2278 2278 goto bail;
2279 2279
2280 2280 for (i = 0; i < revcount; i++) {
2281 2281 PyObject *key;
2282 2282
2283 2283 if ((final & (1 << i)) == 0)
2284 2284 continue;
2285 2285
2286 2286 key = PyList_GET_ITEM(revs, i);
2287 2287 Py_INCREF(key);
2288 2288 Py_INCREF(Py_None);
2289 2289 if (PyDict_SetItem(dict, key, Py_None) == -1) {
2290 2290 Py_DECREF(key);
2291 2291 Py_DECREF(Py_None);
2292 2292 goto bail;
2293 2293 }
2294 2294 }
2295 2295
2296 2296 keys = PyDict_Keys(dict);
2297 2297
2298 2298 bail:
2299 2299 free(depth);
2300 2300 free(seen);
2301 2301 free(interesting);
2302 2302 Py_XDECREF(dict);
2303 2303
2304 2304 return keys;
2305 2305 }
2306 2306
2307 2307 /*
2308 2308 * Given a (possibly overlapping) set of revs, return all the
2309 2309 * common ancestors heads: heads(::args[0] and ::a[1] and ...)
2310 2310 */
2311 2311 static PyObject *index_commonancestorsheads(indexObject *self, PyObject *args)
2312 2312 {
2313 2313 PyObject *ret = NULL;
2314 2314 Py_ssize_t argcount, i, len;
2315 2315 bitmask repeat = 0;
2316 2316 int revcount = 0;
2317 2317 int *revs;
2318 2318
2319 2319 argcount = PySequence_Length(args);
2320 2320 revs = PyMem_Malloc(argcount * sizeof(*revs));
2321 2321 if (argcount > 0 && revs == NULL)
2322 2322 return PyErr_NoMemory();
2323 2323 len = index_length(self);
2324 2324
2325 2325 for (i = 0; i < argcount; i++) {
2326 2326 static const int capacity = 24;
2327 2327 PyObject *obj = PySequence_GetItem(args, i);
2328 2328 bitmask x;
2329 2329 long val;
2330 2330
2331 2331 if (!PyInt_Check(obj)) {
2332 2332 PyErr_SetString(PyExc_TypeError,
2333 2333 "arguments must all be ints");
2334 2334 Py_DECREF(obj);
2335 2335 goto bail;
2336 2336 }
2337 2337 val = PyInt_AsLong(obj);
2338 2338 Py_DECREF(obj);
2339 2339 if (val == -1) {
2340 2340 ret = PyList_New(0);
2341 2341 goto done;
2342 2342 }
2343 2343 if (val < 0 || val >= len) {
2344 2344 PyErr_SetString(PyExc_IndexError, "index out of range");
2345 2345 goto bail;
2346 2346 }
2347 2347 /* this cheesy bloom filter lets us avoid some more
2348 2348 * expensive duplicate checks in the common set-is-disjoint
2349 2349 * case */
2350 2350 x = 1ull << (val & 0x3f);
2351 2351 if (repeat & x) {
2352 2352 int k;
2353 2353 for (k = 0; k < revcount; k++) {
2354 2354 if (val == revs[k])
2355 2355 goto duplicate;
2356 2356 }
2357 2357 } else
2358 2358 repeat |= x;
2359 2359 if (revcount >= capacity) {
2360 2360 PyErr_Format(PyExc_OverflowError,
2361 2361 "bitset size (%d) > capacity (%d)",
2362 2362 revcount, capacity);
2363 2363 goto bail;
2364 2364 }
2365 2365 revs[revcount++] = (int)val;
2366 2366 duplicate:;
2367 2367 }
2368 2368
2369 2369 if (revcount == 0) {
2370 2370 ret = PyList_New(0);
2371 2371 goto done;
2372 2372 }
2373 2373 if (revcount == 1) {
2374 2374 PyObject *obj;
2375 2375 ret = PyList_New(1);
2376 2376 if (ret == NULL)
2377 2377 goto bail;
2378 2378 obj = PyInt_FromLong(revs[0]);
2379 2379 if (obj == NULL)
2380 2380 goto bail;
2381 2381 PyList_SET_ITEM(ret, 0, obj);
2382 2382 goto done;
2383 2383 }
2384 2384
2385 2385 ret = find_gca_candidates(self, revs, revcount);
2386 2386 if (ret == NULL)
2387 2387 goto bail;
2388 2388
2389 2389 done:
2390 2390 PyMem_Free(revs);
2391 2391 return ret;
2392 2392
2393 2393 bail:
2394 2394 PyMem_Free(revs);
2395 2395 Py_XDECREF(ret);
2396 2396 return NULL;
2397 2397 }
2398 2398
2399 2399 /*
2400 2400 * Given a (possibly overlapping) set of revs, return the greatest
2401 2401 * common ancestors: those with the longest path to the root.
2402 2402 */
2403 2403 static PyObject *index_ancestors(indexObject *self, PyObject *args)
2404 2404 {
2405 2405 PyObject *ret;
2406 2406 PyObject *gca = index_commonancestorsheads(self, args);
2407 2407 if (gca == NULL)
2408 2408 return NULL;
2409 2409
2410 2410 if (PyList_GET_SIZE(gca) <= 1) {
2411 2411 return gca;
2412 2412 }
2413 2413
2414 2414 ret = find_deepest(self, gca);
2415 2415 Py_DECREF(gca);
2416 2416 return ret;
2417 2417 }
2418 2418
2419 2419 /*
2420 2420 * Invalidate any trie entries introduced by added revs.
2421 2421 */
2422 2422 static void index_invalidate_added(indexObject *self, Py_ssize_t start)
2423 2423 {
2424 2424 Py_ssize_t i, len;
2425 2425
2426 2426 len = self->length + self->new_length;
2427 2427 i = start - self->length;
2428 2428 if (i < 0)
2429 2429 return;
2430 2430
2431 2431 for (i = start; i < len; i++)
2432 2432 nt_delete_node(&self->nt, index_deref(self, i) + 32);
2433 2433
2434 2434 self->new_length = start - self->length;
2435 2435 }
2436 2436
2437 2437 /*
2438 2438 * Delete a numeric range of revs, which must be at the end of the
2439 2439 * range.
2440 2440 */
2441 2441 static int index_slice_del(indexObject *self, PyObject *item)
2442 2442 {
2443 2443 Py_ssize_t start, stop, step, slicelength;
2444 2444 Py_ssize_t length = index_length(self) + 1;
2445 2445 int ret = 0;
2446 2446
2447 2447 /* Argument changed from PySliceObject* to PyObject* in Python 3. */
2448 2448 #ifdef IS_PY3K
2449 2449 if (PySlice_GetIndicesEx(item, length, &start, &stop, &step,
2450 2450 &slicelength) < 0)
2451 2451 #else
2452 2452 if (PySlice_GetIndicesEx((PySliceObject *)item, length, &start, &stop,
2453 2453 &step, &slicelength) < 0)
2454 2454 #endif
2455 2455 return -1;
2456 2456
2457 2457 if (slicelength <= 0)
2458 2458 return 0;
2459 2459
2460 2460 if ((step < 0 && start < stop) || (step > 0 && start > stop))
2461 2461 stop = start;
2462 2462
2463 2463 if (step < 0) {
2464 2464 stop = start + 1;
2465 2465 start = stop + step * (slicelength - 1) - 1;
2466 2466 step = -step;
2467 2467 }
2468 2468
2469 2469 if (step != 1) {
2470 2470 PyErr_SetString(PyExc_ValueError,
2471 2471 "revlog index delete requires step size of 1");
2472 2472 return -1;
2473 2473 }
2474 2474
2475 2475 if (stop != length - 1) {
2476 2476 PyErr_SetString(PyExc_IndexError,
2477 2477 "revlog index deletion indices are invalid");
2478 2478 return -1;
2479 2479 }
2480 2480
2481 2481 if (start < self->length) {
2482 2482 if (self->ntinitialized) {
2483 2483 Py_ssize_t i;
2484 2484
2485 2485 for (i = start; i < self->length; i++) {
2486 2486 const char *node = index_node_existing(self, i);
2487 2487 if (node == NULL)
2488 2488 return -1;
2489 2489
2490 2490 nt_delete_node(&self->nt, node);
2491 2491 }
2492 2492 if (self->new_length)
2493 2493 index_invalidate_added(self, self->length);
2494 2494 if (self->ntrev > start)
2495 2495 self->ntrev = (int)start;
2496 2496 } else if (self->new_length) {
2497 2497 self->new_length = 0;
2498 2498 }
2499 2499
2500 2500 self->length = start;
2501 2501 goto done;
2502 2502 }
2503 2503
2504 2504 if (self->ntinitialized) {
2505 2505 index_invalidate_added(self, start);
2506 2506 if (self->ntrev > start)
2507 2507 self->ntrev = (int)start;
2508 2508 } else {
2509 2509 self->new_length = start - self->length;
2510 2510 }
2511 2511 done:
2512 2512 Py_CLEAR(self->headrevs);
2513 2513 return ret;
2514 2514 }
2515 2515
2516 2516 /*
2517 2517 * Supported ops:
2518 2518 *
2519 2519 * slice deletion
2520 2520 * string assignment (extend node->rev mapping)
2521 2521 * string deletion (shrink node->rev mapping)
2522 2522 */
2523 2523 static int index_assign_subscript(indexObject *self, PyObject *item,
2524 2524 PyObject *value)
2525 2525 {
2526 2526 char *node;
2527 2527 long rev;
2528 2528
2529 2529 if (PySlice_Check(item) && value == NULL)
2530 2530 return index_slice_del(self, item);
2531 2531
2532 2532 if (node_check(item, &node) == -1)
2533 2533 return -1;
2534 2534
2535 2535 if (value == NULL)
2536 2536 return self->ntinitialized ? nt_delete_node(&self->nt, node)
2537 2537 : 0;
2538 2538 rev = PyInt_AsLong(value);
2539 2539 if (rev > INT_MAX || rev < 0) {
2540 2540 if (!PyErr_Occurred())
2541 2541 PyErr_SetString(PyExc_ValueError, "rev out of range");
2542 2542 return -1;
2543 2543 }
2544 2544
2545 2545 if (index_init_nt(self) == -1)
2546 2546 return -1;
2547 2547 return nt_insert(&self->nt, node, (int)rev);
2548 2548 }
2549 2549
2550 2550 /*
2551 2551 * Find all RevlogNG entries in an index that has inline data. Update
2552 2552 * the optional "offsets" table with those entries.
2553 2553 */
2554 2554 static Py_ssize_t inline_scan(indexObject *self, const char **offsets)
2555 2555 {
2556 2556 const char *data = (const char *)self->buf.buf;
2557 2557 Py_ssize_t pos = 0;
2558 2558 Py_ssize_t end = self->buf.len;
2559 2559 long incr = v1_hdrsize;
2560 2560 Py_ssize_t len = 0;
2561 2561
2562 2562 while (pos + v1_hdrsize <= end && pos >= 0) {
2563 2563 uint32_t comp_len;
2564 2564 /* 3rd element of header is length of compressed inline data */
2565 2565 comp_len = getbe32(data + pos + 8);
2566 2566 incr = v1_hdrsize + comp_len;
2567 2567 if (offsets)
2568 2568 offsets[len] = data + pos;
2569 2569 len++;
2570 2570 pos += incr;
2571 2571 }
2572 2572
2573 2573 if (pos != end) {
2574 2574 if (!PyErr_Occurred())
2575 2575 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2576 2576 return -1;
2577 2577 }
2578 2578
2579 2579 return len;
2580 2580 }
2581 2581
2582 2582 static int index_init(indexObject *self, PyObject *args)
2583 2583 {
2584 2584 PyObject *data_obj, *inlined_obj;
2585 2585 Py_ssize_t size;
2586 2586
2587 2587 /* Initialize before argument-checking to avoid index_dealloc() crash.
2588 2588 */
2589 2589 self->added = NULL;
2590 2590 self->new_length = 0;
2591 2591 self->added_length = 0;
2592 2592 self->data = NULL;
2593 2593 memset(&self->buf, 0, sizeof(self->buf));
2594 2594 self->headrevs = NULL;
2595 2595 self->filteredrevs = Py_None;
2596 2596 Py_INCREF(Py_None);
2597 2597 self->ntinitialized = 0;
2598 2598 self->offsets = NULL;
2599 2599
2600 2600 if (!PyArg_ParseTuple(args, "OO", &data_obj, &inlined_obj))
2601 2601 return -1;
2602 2602 if (!PyObject_CheckBuffer(data_obj)) {
2603 2603 PyErr_SetString(PyExc_TypeError,
2604 2604 "data does not support buffer interface");
2605 2605 return -1;
2606 2606 }
2607 2607
2608 2608 if (PyObject_GetBuffer(data_obj, &self->buf, PyBUF_SIMPLE) == -1)
2609 2609 return -1;
2610 2610 size = self->buf.len;
2611 2611
2612 2612 self->inlined = inlined_obj && PyObject_IsTrue(inlined_obj);
2613 2613 self->data = data_obj;
2614 2614
2615 2615 self->ntlookups = self->ntmisses = 0;
2616 2616 self->ntrev = -1;
2617 2617 Py_INCREF(self->data);
2618 2618
2619 2619 if (self->inlined) {
2620 2620 Py_ssize_t len = inline_scan(self, NULL);
2621 2621 if (len == -1)
2622 2622 goto bail;
2623 2623 self->length = len;
2624 2624 } else {
2625 2625 if (size % v1_hdrsize) {
2626 2626 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2627 2627 goto bail;
2628 2628 }
2629 2629 self->length = size / v1_hdrsize;
2630 2630 }
2631 2631
2632 2632 return 0;
2633 2633 bail:
2634 2634 return -1;
2635 2635 }
2636 2636
2637 2637 static PyObject *index_nodemap(indexObject *self)
2638 2638 {
2639 2639 Py_INCREF(self);
2640 2640 return (PyObject *)self;
2641 2641 }
2642 2642
2643 2643 static void _index_clearcaches(indexObject *self)
2644 2644 {
2645 2645 if (self->offsets) {
2646 2646 PyMem_Free((void *)self->offsets);
2647 2647 self->offsets = NULL;
2648 2648 }
2649 2649 if (self->ntinitialized) {
2650 2650 nt_dealloc(&self->nt);
2651 2651 }
2652 2652 self->ntinitialized = 0;
2653 2653 Py_CLEAR(self->headrevs);
2654 2654 }
2655 2655
2656 2656 static PyObject *index_clearcaches(indexObject *self)
2657 2657 {
2658 2658 _index_clearcaches(self);
2659 2659 self->ntrev = -1;
2660 2660 self->ntlookups = self->ntmisses = 0;
2661 2661 Py_RETURN_NONE;
2662 2662 }
2663 2663
2664 2664 static void index_dealloc(indexObject *self)
2665 2665 {
2666 2666 _index_clearcaches(self);
2667 2667 Py_XDECREF(self->filteredrevs);
2668 2668 if (self->buf.buf) {
2669 2669 PyBuffer_Release(&self->buf);
2670 2670 memset(&self->buf, 0, sizeof(self->buf));
2671 2671 }
2672 2672 Py_XDECREF(self->data);
2673 2673 PyMem_Free(self->added);
2674 2674 PyObject_Del(self);
2675 2675 }
2676 2676
2677 2677 static PySequenceMethods index_sequence_methods = {
2678 2678 (lenfunc)index_length, /* sq_length */
2679 2679 0, /* sq_concat */
2680 2680 0, /* sq_repeat */
2681 2681 (ssizeargfunc)index_get, /* sq_item */
2682 2682 0, /* sq_slice */
2683 2683 0, /* sq_ass_item */
2684 2684 0, /* sq_ass_slice */
2685 2685 (objobjproc)index_contains, /* sq_contains */
2686 2686 };
2687 2687
2688 2688 static PyMappingMethods index_mapping_methods = {
2689 2689 (lenfunc)index_length, /* mp_length */
2690 2690 (binaryfunc)index_getitem, /* mp_subscript */
2691 2691 (objobjargproc)index_assign_subscript, /* mp_ass_subscript */
2692 2692 };
2693 2693
2694 2694 static PyMethodDef index_methods[] = {
2695 2695 {"ancestors", (PyCFunction)index_ancestors, METH_VARARGS,
2696 2696 "return the gca set of the given revs"},
2697 2697 {"commonancestorsheads", (PyCFunction)index_commonancestorsheads,
2698 2698 METH_VARARGS,
2699 2699 "return the heads of the common ancestors of the given revs"},
2700 2700 {"clearcaches", (PyCFunction)index_clearcaches, METH_NOARGS,
2701 2701 "clear the index caches"},
2702 2702 {"get", (PyCFunction)index_m_get, METH_VARARGS, "get an index entry"},
2703 2703 {"get_rev", (PyCFunction)index_m_get, METH_VARARGS,
2704 2704 "return `rev` associated with a node or None"},
2705 2705 {"has_node", (PyCFunction)index_m_has_node, METH_O,
2706 2706 "return True if the node exist in the index"},
2707 2707 {"rev", (PyCFunction)index_m_rev, METH_O,
2708 2708 "return `rev` associated with a node or raise RevlogError"},
2709 2709 {"computephasesmapsets", (PyCFunction)compute_phases_map_sets, METH_VARARGS,
2710 2710 "compute phases"},
2711 2711 {"reachableroots2", (PyCFunction)reachableroots2, METH_VARARGS,
2712 2712 "reachableroots"},
2713 2713 {"headrevs", (PyCFunction)index_headrevs, METH_VARARGS,
2714 2714 "get head revisions"}, /* Can do filtering since 3.2 */
2715 2715 {"headrevsfiltered", (PyCFunction)index_headrevs, METH_VARARGS,
2716 2716 "get filtered head revisions"}, /* Can always do filtering */
2717 2717 {"issnapshot", (PyCFunction)index_issnapshot, METH_O,
2718 2718 "True if the object is a snapshot"},
2719 2719 {"findsnapshots", (PyCFunction)index_findsnapshots, METH_VARARGS,
2720 2720 "Gather snapshot data in a cache dict"},
2721 2721 {"deltachain", (PyCFunction)index_deltachain, METH_VARARGS,
2722 2722 "determine revisions with deltas to reconstruct fulltext"},
2723 2723 {"slicechunktodensity", (PyCFunction)index_slicechunktodensity,
2724 2724 METH_VARARGS, "determine revisions with deltas to reconstruct fulltext"},
2725 2725 {"append", (PyCFunction)index_append, METH_O, "append an index entry"},
2726 2726 {"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS,
2727 2727 "match a potentially ambiguous node ID"},
2728 2728 {"shortest", (PyCFunction)index_shortest, METH_VARARGS,
2729 2729 "find length of shortest hex nodeid of a binary ID"},
2730 2730 {"stats", (PyCFunction)index_stats, METH_NOARGS, "stats for the index"},
2731 2731 {NULL} /* Sentinel */
2732 2732 };
2733 2733
2734 2734 static PyGetSetDef index_getset[] = {
2735 2735 {"nodemap", (getter)index_nodemap, NULL, "nodemap", NULL},
2736 2736 {NULL} /* Sentinel */
2737 2737 };
2738 2738
2739 2739 PyTypeObject HgRevlogIndex_Type = {
2740 2740 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2741 2741 "parsers.index", /* tp_name */
2742 2742 sizeof(indexObject), /* tp_basicsize */
2743 2743 0, /* tp_itemsize */
2744 2744 (destructor)index_dealloc, /* tp_dealloc */
2745 2745 0, /* tp_print */
2746 2746 0, /* tp_getattr */
2747 2747 0, /* tp_setattr */
2748 2748 0, /* tp_compare */
2749 2749 0, /* tp_repr */
2750 2750 0, /* tp_as_number */
2751 2751 &index_sequence_methods, /* tp_as_sequence */
2752 2752 &index_mapping_methods, /* tp_as_mapping */
2753 2753 0, /* tp_hash */
2754 2754 0, /* tp_call */
2755 2755 0, /* tp_str */
2756 2756 0, /* tp_getattro */
2757 2757 0, /* tp_setattro */
2758 2758 0, /* tp_as_buffer */
2759 2759 Py_TPFLAGS_DEFAULT, /* tp_flags */
2760 2760 "revlog index", /* tp_doc */
2761 2761 0, /* tp_traverse */
2762 2762 0, /* tp_clear */
2763 2763 0, /* tp_richcompare */
2764 2764 0, /* tp_weaklistoffset */
2765 2765 0, /* tp_iter */
2766 2766 0, /* tp_iternext */
2767 2767 index_methods, /* tp_methods */
2768 2768 0, /* tp_members */
2769 2769 index_getset, /* tp_getset */
2770 2770 0, /* tp_base */
2771 2771 0, /* tp_dict */
2772 2772 0, /* tp_descr_get */
2773 2773 0, /* tp_descr_set */
2774 2774 0, /* tp_dictoffset */
2775 2775 (initproc)index_init, /* tp_init */
2776 2776 0, /* tp_alloc */
2777 2777 };
2778 2778
2779 2779 /*
2780 2780 * returns a tuple of the form (index, index, cache) with elements as
2781 2781 * follows:
2782 2782 *
2783 2783 * index: an index object that lazily parses RevlogNG records
2784 2784 * cache: if data is inlined, a tuple (0, index_file_content), else None
2785 2785 * index_file_content could be a string, or a buffer
2786 2786 *
2787 2787 * added complications are for backwards compatibility
2788 2788 */
2789 2789 PyObject *parse_index2(PyObject *self, PyObject *args)
2790 2790 {
2791 2791 PyObject *cache = NULL;
2792 2792 indexObject *idx;
2793 2793 int ret;
2794 2794
2795 2795 idx = PyObject_New(indexObject, &HgRevlogIndex_Type);
2796 2796 if (idx == NULL)
2797 2797 goto bail;
2798 2798
2799 2799 ret = index_init(idx, args);
2800 2800 if (ret == -1)
2801 2801 goto bail;
2802 2802
2803 2803 if (idx->inlined) {
2804 2804 cache = Py_BuildValue("iO", 0, idx->data);
2805 2805 if (cache == NULL)
2806 2806 goto bail;
2807 2807 } else {
2808 2808 cache = Py_None;
2809 2809 Py_INCREF(cache);
2810 2810 }
2811 2811
2812 2812 return Py_BuildValue("NN", idx, cache);
2813 2813
2814 2814 bail:
2815 2815 Py_XDECREF(idx);
2816 2816 Py_XDECREF(cache);
2817 2817 return NULL;
2818 2818 }
2819 2819
2820 2820 static Revlog_CAPI CAPI = {
2821 2821 /* increment the abi_version field upon each change in the Revlog_CAPI
2822 2822 struct or in the ABI of the listed functions */
2823 2823 2,
2824 2824 index_length,
2825 2825 index_node,
2826 2826 HgRevlogIndex_GetParents,
2827 2827 };
2828 2828
2829 2829 void revlog_module_init(PyObject *mod)
2830 2830 {
2831 2831 PyObject *caps = NULL;
2832 2832 HgRevlogIndex_Type.tp_new = PyType_GenericNew;
2833 2833 if (PyType_Ready(&HgRevlogIndex_Type) < 0)
2834 2834 return;
2835 2835 Py_INCREF(&HgRevlogIndex_Type);
2836 2836 PyModule_AddObject(mod, "index", (PyObject *)&HgRevlogIndex_Type);
2837 2837
2838 2838 nodetreeType.tp_new = PyType_GenericNew;
2839 2839 if (PyType_Ready(&nodetreeType) < 0)
2840 2840 return;
2841 2841 Py_INCREF(&nodetreeType);
2842 2842 PyModule_AddObject(mod, "nodetree", (PyObject *)&nodetreeType);
2843 2843
2844 2844 if (!nullentry) {
2845 2845 nullentry =
2846 2846 Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0, 0, -1,
2847 2847 -1, -1, -1, nullid, (Py_ssize_t)20);
2848 2848 }
2849 2849 if (nullentry)
2850 2850 PyObject_GC_UnTrack(nullentry);
2851 2851
2852 2852 caps = PyCapsule_New(&CAPI, "mercurial.cext.parsers.revlog_CAPI", NULL);
2853 2853 if (caps != NULL)
2854 2854 PyModule_AddObject(mod, "revlog_CAPI", caps);
2855 2855 }
General Comments 0
You need to be logged in to leave comments. Login now