##// END OF EJS Templates
cext: shut-up sign compare warnings...
Joerg Sonnenberger -
r46760:e4f6dae0 default
parent child Browse files
Show More
@@ -1,2866 +1,2866 b''
1 1 /*
2 2 parsers.c - efficient content parsing
3 3
4 4 Copyright 2008 Matt Mackall <mpm@selenic.com> and others
5 5
6 6 This software may be used and distributed according to the terms of
7 7 the GNU General Public License, incorporated herein by reference.
8 8 */
9 9
10 10 #define PY_SSIZE_T_CLEAN
11 11 #include <Python.h>
12 12 #include <assert.h>
13 13 #include <ctype.h>
14 14 #include <limits.h>
15 15 #include <stddef.h>
16 16 #include <stdlib.h>
17 17 #include <string.h>
18 18
19 19 #include "bitmanipulation.h"
20 20 #include "charencode.h"
21 21 #include "revlog.h"
22 22 #include "util.h"
23 23
24 24 #ifdef IS_PY3K
25 25 /* The mapping of Python types is meant to be temporary to get Python
26 26 * 3 to compile. We should remove this once Python 3 support is fully
27 27 * supported and proper types are used in the extensions themselves. */
28 28 #define PyInt_Check PyLong_Check
29 29 #define PyInt_FromLong PyLong_FromLong
30 30 #define PyInt_FromSsize_t PyLong_FromSsize_t
31 31 #define PyInt_AsLong PyLong_AsLong
32 32 #endif
33 33
34 34 typedef struct indexObjectStruct indexObject;
35 35
36 36 typedef struct {
37 37 int children[16];
38 38 } nodetreenode;
39 39
40 40 typedef struct {
41 41 int abi_version;
42 42 Py_ssize_t (*index_length)(const indexObject *);
43 43 const char *(*index_node)(indexObject *, Py_ssize_t);
44 44 int (*index_parents)(PyObject *, int, int *);
45 45 } Revlog_CAPI;
46 46
47 47 /*
48 48 * A base-16 trie for fast node->rev mapping.
49 49 *
50 50 * Positive value is index of the next node in the trie
51 51 * Negative value is a leaf: -(rev + 2)
52 52 * Zero is empty
53 53 */
54 54 typedef struct {
55 55 indexObject *index;
56 56 nodetreenode *nodes;
57 57 Py_ssize_t nodelen;
58 58 unsigned length; /* # nodes in use */
59 59 unsigned capacity; /* # nodes allocated */
60 60 int depth; /* maximum depth of tree */
61 61 int splits; /* # splits performed */
62 62 } nodetree;
63 63
64 64 typedef struct {
65 65 PyObject_HEAD /* ; */
66 66 nodetree nt;
67 67 } nodetreeObject;
68 68
69 69 /*
70 70 * This class has two behaviors.
71 71 *
72 72 * When used in a list-like way (with integer keys), we decode an
73 73 * entry in a RevlogNG index file on demand. We have limited support for
74 74 * integer-keyed insert and delete, only at elements right before the
75 75 * end.
76 76 *
77 77 * With string keys, we lazily perform a reverse mapping from node to
78 78 * rev, using a base-16 trie.
79 79 */
80 80 struct indexObjectStruct {
81 81 PyObject_HEAD
82 82 /* Type-specific fields go here. */
83 83 PyObject *data; /* raw bytes of index */
84 84 Py_ssize_t nodelen; /* digest size of the hash, 20 for SHA-1 */
85 85 PyObject *nullentry; /* fast path for references to null */
86 86 Py_buffer buf; /* buffer of data */
87 87 const char **offsets; /* populated on demand */
88 88 Py_ssize_t length; /* current on-disk number of elements */
89 89 unsigned new_length; /* number of added elements */
90 90 unsigned added_length; /* space reserved for added elements */
91 91 char *added; /* populated on demand */
92 92 PyObject *headrevs; /* cache, invalidated on changes */
93 93 PyObject *filteredrevs; /* filtered revs set */
94 94 nodetree nt; /* base-16 trie */
95 95 int ntinitialized; /* 0 or 1 */
96 96 int ntrev; /* last rev scanned */
97 97 int ntlookups; /* # lookups */
98 98 int ntmisses; /* # lookups that miss the cache */
99 99 int inlined;
100 100 };
101 101
102 102 static Py_ssize_t index_length(const indexObject *self)
103 103 {
104 104 return self->length + self->new_length;
105 105 }
106 106
107 107 static const char nullid[32] = {0};
108 108 static const Py_ssize_t nullrev = -1;
109 109
110 110 static Py_ssize_t inline_scan(indexObject *self, const char **offsets);
111 111
112 112 static int index_find_node(indexObject *self, const char *node);
113 113
114 114 #if LONG_MAX == 0x7fffffffL
115 115 static const char *const tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
116 116 #else
117 117 static const char *const tuple_format = PY23("kiiiiiis#", "kiiiiiiy#");
118 118 #endif
119 119
120 120 /* A RevlogNG v1 index entry is 64 bytes long. */
121 121 static const long v1_hdrsize = 64;
122 122
123 123 static void raise_revlog_error(void)
124 124 {
125 125 PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
126 126
127 127 mod = PyImport_ImportModule("mercurial.error");
128 128 if (mod == NULL) {
129 129 goto cleanup;
130 130 }
131 131
132 132 dict = PyModule_GetDict(mod);
133 133 if (dict == NULL) {
134 134 goto cleanup;
135 135 }
136 136 Py_INCREF(dict);
137 137
138 138 errclass = PyDict_GetItemString(dict, "RevlogError");
139 139 if (errclass == NULL) {
140 140 PyErr_SetString(PyExc_SystemError,
141 141 "could not find RevlogError");
142 142 goto cleanup;
143 143 }
144 144
145 145 /* value of exception is ignored by callers */
146 146 PyErr_SetString(errclass, "RevlogError");
147 147
148 148 cleanup:
149 149 Py_XDECREF(dict);
150 150 Py_XDECREF(mod);
151 151 }
152 152
153 153 /*
154 154 * Return a pointer to the beginning of a RevlogNG record.
155 155 */
156 156 static const char *index_deref(indexObject *self, Py_ssize_t pos)
157 157 {
158 158 if (pos >= self->length)
159 159 return self->added + (pos - self->length) * v1_hdrsize;
160 160
161 161 if (self->inlined && pos > 0) {
162 162 if (self->offsets == NULL) {
163 163 Py_ssize_t ret;
164 164 self->offsets =
165 165 PyMem_Malloc(self->length * sizeof(*self->offsets));
166 166 if (self->offsets == NULL)
167 167 return (const char *)PyErr_NoMemory();
168 168 ret = inline_scan(self, self->offsets);
169 169 if (ret == -1) {
170 170 return NULL;
171 171 };
172 172 }
173 173 return self->offsets[pos];
174 174 }
175 175
176 176 return (const char *)(self->buf.buf) + pos * v1_hdrsize;
177 177 }
178 178
179 179 /*
180 180 * Get parents of the given rev.
181 181 *
182 182 * The specified rev must be valid and must not be nullrev. A returned
183 183 * parent revision may be nullrev, but is guaranteed to be in valid range.
184 184 */
185 185 static inline int index_get_parents(indexObject *self, Py_ssize_t rev, int *ps,
186 186 int maxrev)
187 187 {
188 188 const char *data = index_deref(self, rev);
189 189
190 190 ps[0] = getbe32(data + 24);
191 191 ps[1] = getbe32(data + 28);
192 192
193 193 /* If index file is corrupted, ps[] may point to invalid revisions. So
194 194 * there is a risk of buffer overflow to trust them unconditionally. */
195 195 if (ps[0] < -1 || ps[0] > maxrev || ps[1] < -1 || ps[1] > maxrev) {
196 196 PyErr_SetString(PyExc_ValueError, "parent out of range");
197 197 return -1;
198 198 }
199 199 return 0;
200 200 }
201 201
202 202 /*
203 203 * Get parents of the given rev.
204 204 *
205 205 * If the specified rev is out of range, IndexError will be raised. If the
206 206 * revlog entry is corrupted, ValueError may be raised.
207 207 *
208 208 * Returns 0 on success or -1 on failure.
209 209 */
210 210 static int HgRevlogIndex_GetParents(PyObject *op, int rev, int *ps)
211 211 {
212 212 int tiprev;
213 213 if (!op || !HgRevlogIndex_Check(op) || !ps) {
214 214 PyErr_BadInternalCall();
215 215 return -1;
216 216 }
217 217 tiprev = (int)index_length((indexObject *)op) - 1;
218 218 if (rev < -1 || rev > tiprev) {
219 219 PyErr_Format(PyExc_IndexError, "rev out of range: %d", rev);
220 220 return -1;
221 221 } else if (rev == -1) {
222 222 ps[0] = ps[1] = -1;
223 223 return 0;
224 224 } else {
225 225 return index_get_parents((indexObject *)op, rev, ps, tiprev);
226 226 }
227 227 }
228 228
229 229 static inline int64_t index_get_start(indexObject *self, Py_ssize_t rev)
230 230 {
231 231 const char *data;
232 232 uint64_t offset;
233 233
234 234 if (rev == nullrev)
235 235 return 0;
236 236
237 237 data = index_deref(self, rev);
238 238 offset = getbe32(data + 4);
239 239 if (rev == 0) {
240 240 /* mask out version number for the first entry */
241 241 offset &= 0xFFFF;
242 242 } else {
243 243 uint32_t offset_high = getbe32(data);
244 244 offset |= ((uint64_t)offset_high) << 32;
245 245 }
246 246 return (int64_t)(offset >> 16);
247 247 }
248 248
249 249 static inline int index_get_length(indexObject *self, Py_ssize_t rev)
250 250 {
251 251 const char *data;
252 252 int tmp;
253 253
254 254 if (rev == nullrev)
255 255 return 0;
256 256
257 257 data = index_deref(self, rev);
258 258
259 259 tmp = (int)getbe32(data + 8);
260 260 if (tmp < 0) {
261 261 PyErr_Format(PyExc_OverflowError,
262 262 "revlog entry size out of bound (%d)", tmp);
263 263 return -1;
264 264 }
265 265 return tmp;
266 266 }
267 267
268 268 /*
269 269 * RevlogNG format (all in big endian, data may be inlined):
270 270 * 6 bytes: offset
271 271 * 2 bytes: flags
272 272 * 4 bytes: compressed length
273 273 * 4 bytes: uncompressed length
274 274 * 4 bytes: base revision
275 275 * 4 bytes: link revision
276 276 * 4 bytes: parent 1 revision
277 277 * 4 bytes: parent 2 revision
278 278 * 32 bytes: nodeid (only 20 bytes used with SHA-1)
279 279 */
280 280 static PyObject *index_get(indexObject *self, Py_ssize_t pos)
281 281 {
282 282 uint64_t offset_flags;
283 283 int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
284 284 const char *c_node_id;
285 285 const char *data;
286 286 Py_ssize_t length = index_length(self);
287 287
288 288 if (pos == nullrev) {
289 289 Py_INCREF(self->nullentry);
290 290 return self->nullentry;
291 291 }
292 292
293 293 if (pos < 0 || pos >= length) {
294 294 PyErr_SetString(PyExc_IndexError, "revlog index out of range");
295 295 return NULL;
296 296 }
297 297
298 298 data = index_deref(self, pos);
299 299 if (data == NULL)
300 300 return NULL;
301 301
302 302 offset_flags = getbe32(data + 4);
303 303 /*
304 304 * The first entry on-disk needs the version number masked out,
305 305 * but this doesn't apply if entries are added to an empty index.
306 306 */
307 307 if (self->length && pos == 0)
308 308 offset_flags &= 0xFFFF;
309 309 else {
310 310 uint32_t offset_high = getbe32(data);
311 311 offset_flags |= ((uint64_t)offset_high) << 32;
312 312 }
313 313
314 314 comp_len = getbe32(data + 8);
315 315 uncomp_len = getbe32(data + 12);
316 316 base_rev = getbe32(data + 16);
317 317 link_rev = getbe32(data + 20);
318 318 parent_1 = getbe32(data + 24);
319 319 parent_2 = getbe32(data + 28);
320 320 c_node_id = data + 32;
321 321
322 322 return Py_BuildValue(tuple_format, offset_flags, comp_len, uncomp_len,
323 323 base_rev, link_rev, parent_1, parent_2, c_node_id,
324 324 self->nodelen);
325 325 }
326 326
327 327 /*
328 328 * Return the hash of node corresponding to the given rev.
329 329 */
330 330 static const char *index_node(indexObject *self, Py_ssize_t pos)
331 331 {
332 332 Py_ssize_t length = index_length(self);
333 333 const char *data;
334 334
335 335 if (pos == nullrev)
336 336 return nullid;
337 337
338 338 if (pos >= length)
339 339 return NULL;
340 340
341 341 data = index_deref(self, pos);
342 342 return data ? data + 32 : NULL;
343 343 }
344 344
345 345 /*
346 346 * Return the hash of the node corresponding to the given rev. The
347 347 * rev is assumed to be existing. If not, an exception is set.
348 348 */
349 349 static const char *index_node_existing(indexObject *self, Py_ssize_t pos)
350 350 {
351 351 const char *node = index_node(self, pos);
352 352 if (node == NULL) {
353 353 PyErr_Format(PyExc_IndexError, "could not access rev %d",
354 354 (int)pos);
355 355 }
356 356 return node;
357 357 }
358 358
359 359 static int nt_insert(nodetree *self, const char *node, int rev);
360 360
361 361 static int node_check(Py_ssize_t nodelen, PyObject *obj, char **node)
362 362 {
363 363 Py_ssize_t thisnodelen;
364 364 if (PyBytes_AsStringAndSize(obj, node, &thisnodelen) == -1)
365 365 return -1;
366 366 if (nodelen == thisnodelen)
367 367 return 0;
368 368 PyErr_Format(PyExc_ValueError, "node len %zd != expected node len %zd",
369 369 thisnodelen, nodelen);
370 370 return -1;
371 371 }
372 372
373 373 static PyObject *index_append(indexObject *self, PyObject *obj)
374 374 {
375 375 uint64_t offset_flags;
376 376 int rev, comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
377 377 Py_ssize_t c_node_id_len;
378 378 const char *c_node_id;
379 379 char *data;
380 380
381 381 if (!PyArg_ParseTuple(obj, tuple_format, &offset_flags, &comp_len,
382 382 &uncomp_len, &base_rev, &link_rev, &parent_1,
383 383 &parent_2, &c_node_id, &c_node_id_len)) {
384 384 PyErr_SetString(PyExc_TypeError, "8-tuple required");
385 385 return NULL;
386 386 }
387 387 if (c_node_id_len != self->nodelen) {
388 388 PyErr_SetString(PyExc_TypeError, "invalid node");
389 389 return NULL;
390 390 }
391 391
392 392 if (self->new_length == self->added_length) {
393 393 size_t new_added_length =
394 394 self->added_length ? self->added_length * 2 : 4096;
395 395 void *new_added =
396 396 PyMem_Realloc(self->added, new_added_length * v1_hdrsize);
397 397 if (!new_added)
398 398 return PyErr_NoMemory();
399 399 self->added = new_added;
400 400 self->added_length = new_added_length;
401 401 }
402 402 rev = self->length + self->new_length;
403 403 data = self->added + v1_hdrsize * self->new_length++;
404 404 putbe32(offset_flags >> 32, data);
405 405 putbe32(offset_flags & 0xffffffffU, data + 4);
406 406 putbe32(comp_len, data + 8);
407 407 putbe32(uncomp_len, data + 12);
408 408 putbe32(base_rev, data + 16);
409 409 putbe32(link_rev, data + 20);
410 410 putbe32(parent_1, data + 24);
411 411 putbe32(parent_2, data + 28);
412 412 memcpy(data + 32, c_node_id, c_node_id_len);
413 413 memset(data + 32 + c_node_id_len, 0, 32 - c_node_id_len);
414 414
415 415 if (self->ntinitialized)
416 416 nt_insert(&self->nt, c_node_id, rev);
417 417
418 418 Py_CLEAR(self->headrevs);
419 419 Py_RETURN_NONE;
420 420 }
421 421
422 422 static PyObject *index_stats(indexObject *self)
423 423 {
424 424 PyObject *obj = PyDict_New();
425 425 PyObject *s = NULL;
426 426 PyObject *t = NULL;
427 427
428 428 if (obj == NULL)
429 429 return NULL;
430 430
431 431 #define istat(__n, __d) \
432 432 do { \
433 433 s = PyBytes_FromString(__d); \
434 434 t = PyInt_FromSsize_t(self->__n); \
435 435 if (!s || !t) \
436 436 goto bail; \
437 437 if (PyDict_SetItem(obj, s, t) == -1) \
438 438 goto bail; \
439 439 Py_CLEAR(s); \
440 440 Py_CLEAR(t); \
441 441 } while (0)
442 442
443 443 if (self->added_length)
444 444 istat(new_length, "index entries added");
445 445 istat(length, "revs in memory");
446 446 istat(ntlookups, "node trie lookups");
447 447 istat(ntmisses, "node trie misses");
448 448 istat(ntrev, "node trie last rev scanned");
449 449 if (self->ntinitialized) {
450 450 istat(nt.capacity, "node trie capacity");
451 451 istat(nt.depth, "node trie depth");
452 452 istat(nt.length, "node trie count");
453 453 istat(nt.splits, "node trie splits");
454 454 }
455 455
456 456 #undef istat
457 457
458 458 return obj;
459 459
460 460 bail:
461 461 Py_XDECREF(obj);
462 462 Py_XDECREF(s);
463 463 Py_XDECREF(t);
464 464 return NULL;
465 465 }
466 466
467 467 /*
468 468 * When we cache a list, we want to be sure the caller can't mutate
469 469 * the cached copy.
470 470 */
471 471 static PyObject *list_copy(PyObject *list)
472 472 {
473 473 Py_ssize_t len = PyList_GET_SIZE(list);
474 474 PyObject *newlist = PyList_New(len);
475 475 Py_ssize_t i;
476 476
477 477 if (newlist == NULL)
478 478 return NULL;
479 479
480 480 for (i = 0; i < len; i++) {
481 481 PyObject *obj = PyList_GET_ITEM(list, i);
482 482 Py_INCREF(obj);
483 483 PyList_SET_ITEM(newlist, i, obj);
484 484 }
485 485
486 486 return newlist;
487 487 }
488 488
489 489 static int check_filter(PyObject *filter, Py_ssize_t arg)
490 490 {
491 491 if (filter) {
492 492 PyObject *arglist, *result;
493 493 int isfiltered;
494 494
495 495 arglist = Py_BuildValue("(n)", arg);
496 496 if (!arglist) {
497 497 return -1;
498 498 }
499 499
500 500 result = PyEval_CallObject(filter, arglist);
501 501 Py_DECREF(arglist);
502 502 if (!result) {
503 503 return -1;
504 504 }
505 505
506 506 /* PyObject_IsTrue returns 1 if true, 0 if false, -1 if error,
507 507 * same as this function, so we can just return it directly.*/
508 508 isfiltered = PyObject_IsTrue(result);
509 509 Py_DECREF(result);
510 510 return isfiltered;
511 511 } else {
512 512 return 0;
513 513 }
514 514 }
515 515
516 516 static inline void set_phase_from_parents(char *phases, int parent_1,
517 517 int parent_2, Py_ssize_t i)
518 518 {
519 519 if (parent_1 >= 0 && phases[parent_1] > phases[i])
520 520 phases[i] = phases[parent_1];
521 521 if (parent_2 >= 0 && phases[parent_2] > phases[i])
522 522 phases[i] = phases[parent_2];
523 523 }
524 524
525 525 static PyObject *reachableroots2(indexObject *self, PyObject *args)
526 526 {
527 527
528 528 /* Input */
529 529 long minroot;
530 530 PyObject *includepatharg = NULL;
531 531 int includepath = 0;
532 532 /* heads and roots are lists */
533 533 PyObject *heads = NULL;
534 534 PyObject *roots = NULL;
535 535 PyObject *reachable = NULL;
536 536
537 537 PyObject *val;
538 538 Py_ssize_t len = index_length(self);
539 539 long revnum;
540 540 Py_ssize_t k;
541 541 Py_ssize_t i;
542 542 Py_ssize_t l;
543 543 int r;
544 544 int parents[2];
545 545
546 546 /* Internal data structure:
547 547 * tovisit: array of length len+1 (all revs + nullrev), filled upto
548 548 * lentovisit
549 549 *
550 550 * revstates: array of length len+1 (all revs + nullrev) */
551 551 int *tovisit = NULL;
552 552 long lentovisit = 0;
553 553 enum { RS_SEEN = 1, RS_ROOT = 2, RS_REACHABLE = 4 };
554 554 char *revstates = NULL;
555 555
556 556 /* Get arguments */
557 557 if (!PyArg_ParseTuple(args, "lO!O!O!", &minroot, &PyList_Type, &heads,
558 558 &PyList_Type, &roots, &PyBool_Type,
559 559 &includepatharg))
560 560 goto bail;
561 561
562 562 if (includepatharg == Py_True)
563 563 includepath = 1;
564 564
565 565 /* Initialize return set */
566 566 reachable = PyList_New(0);
567 567 if (reachable == NULL)
568 568 goto bail;
569 569
570 570 /* Initialize internal datastructures */
571 571 tovisit = (int *)malloc((len + 1) * sizeof(int));
572 572 if (tovisit == NULL) {
573 573 PyErr_NoMemory();
574 574 goto bail;
575 575 }
576 576
577 577 revstates = (char *)calloc(len + 1, 1);
578 578 if (revstates == NULL) {
579 579 PyErr_NoMemory();
580 580 goto bail;
581 581 }
582 582
583 583 l = PyList_GET_SIZE(roots);
584 584 for (i = 0; i < l; i++) {
585 585 revnum = PyInt_AsLong(PyList_GET_ITEM(roots, i));
586 586 if (revnum == -1 && PyErr_Occurred())
587 587 goto bail;
588 588 /* If root is out of range, e.g. wdir(), it must be unreachable
589 589 * from heads. So we can just ignore it. */
590 590 if (revnum + 1 < 0 || revnum + 1 >= len + 1)
591 591 continue;
592 592 revstates[revnum + 1] |= RS_ROOT;
593 593 }
594 594
595 595 /* Populate tovisit with all the heads */
596 596 l = PyList_GET_SIZE(heads);
597 597 for (i = 0; i < l; i++) {
598 598 revnum = PyInt_AsLong(PyList_GET_ITEM(heads, i));
599 599 if (revnum == -1 && PyErr_Occurred())
600 600 goto bail;
601 601 if (revnum + 1 < 0 || revnum + 1 >= len + 1) {
602 602 PyErr_SetString(PyExc_IndexError, "head out of range");
603 603 goto bail;
604 604 }
605 605 if (!(revstates[revnum + 1] & RS_SEEN)) {
606 606 tovisit[lentovisit++] = (int)revnum;
607 607 revstates[revnum + 1] |= RS_SEEN;
608 608 }
609 609 }
610 610
611 611 /* Visit the tovisit list and find the reachable roots */
612 612 k = 0;
613 613 while (k < lentovisit) {
614 614 /* Add the node to reachable if it is a root*/
615 615 revnum = tovisit[k++];
616 616 if (revstates[revnum + 1] & RS_ROOT) {
617 617 revstates[revnum + 1] |= RS_REACHABLE;
618 618 val = PyInt_FromLong(revnum);
619 619 if (val == NULL)
620 620 goto bail;
621 621 r = PyList_Append(reachable, val);
622 622 Py_DECREF(val);
623 623 if (r < 0)
624 624 goto bail;
625 625 if (includepath == 0)
626 626 continue;
627 627 }
628 628
629 629 /* Add its parents to the list of nodes to visit */
630 630 if (revnum == nullrev)
631 631 continue;
632 632 r = index_get_parents(self, revnum, parents, (int)len - 1);
633 633 if (r < 0)
634 634 goto bail;
635 635 for (i = 0; i < 2; i++) {
636 636 if (!(revstates[parents[i] + 1] & RS_SEEN) &&
637 637 parents[i] >= minroot) {
638 638 tovisit[lentovisit++] = parents[i];
639 639 revstates[parents[i] + 1] |= RS_SEEN;
640 640 }
641 641 }
642 642 }
643 643
644 644 /* Find all the nodes in between the roots we found and the heads
645 645 * and add them to the reachable set */
646 646 if (includepath == 1) {
647 647 long minidx = minroot;
648 648 if (minidx < 0)
649 649 minidx = 0;
650 650 for (i = minidx; i < len; i++) {
651 651 if (!(revstates[i + 1] & RS_SEEN))
652 652 continue;
653 653 r = index_get_parents(self, i, parents, (int)len - 1);
654 654 /* Corrupted index file, error is set from
655 655 * index_get_parents */
656 656 if (r < 0)
657 657 goto bail;
658 658 if (((revstates[parents[0] + 1] |
659 659 revstates[parents[1] + 1]) &
660 660 RS_REACHABLE) &&
661 661 !(revstates[i + 1] & RS_REACHABLE)) {
662 662 revstates[i + 1] |= RS_REACHABLE;
663 663 val = PyInt_FromSsize_t(i);
664 664 if (val == NULL)
665 665 goto bail;
666 666 r = PyList_Append(reachable, val);
667 667 Py_DECREF(val);
668 668 if (r < 0)
669 669 goto bail;
670 670 }
671 671 }
672 672 }
673 673
674 674 free(revstates);
675 675 free(tovisit);
676 676 return reachable;
677 677 bail:
678 678 Py_XDECREF(reachable);
679 679 free(revstates);
680 680 free(tovisit);
681 681 return NULL;
682 682 }
683 683
684 684 static int add_roots_get_min(indexObject *self, PyObject *roots, char *phases,
685 685 char phase)
686 686 {
687 687 Py_ssize_t len = index_length(self);
688 688 PyObject *item;
689 689 PyObject *iterator;
690 690 int rev, minrev = -1;
691 691 char *node;
692 692
693 693 if (!PySet_Check(roots)) {
694 694 PyErr_SetString(PyExc_TypeError,
695 695 "roots must be a set of nodes");
696 696 return -2;
697 697 }
698 698 iterator = PyObject_GetIter(roots);
699 699 if (iterator == NULL)
700 700 return -2;
701 701 while ((item = PyIter_Next(iterator))) {
702 702 if (node_check(self->nodelen, item, &node) == -1)
703 703 goto failed;
704 704 rev = index_find_node(self, node);
705 705 /* null is implicitly public, so negative is invalid */
706 706 if (rev < 0 || rev >= len)
707 707 goto failed;
708 708 phases[rev] = phase;
709 709 if (minrev == -1 || minrev > rev)
710 710 minrev = rev;
711 711 Py_DECREF(item);
712 712 }
713 713 Py_DECREF(iterator);
714 714 return minrev;
715 715 failed:
716 716 Py_DECREF(iterator);
717 717 Py_DECREF(item);
718 718 return -2;
719 719 }
720 720
721 721 static PyObject *compute_phases_map_sets(indexObject *self, PyObject *args)
722 722 {
723 723 /* 0: public (untracked), 1: draft, 2: secret, 32: archive,
724 724 96: internal */
725 725 static const char trackedphases[] = {1, 2, 32, 96};
726 726 PyObject *roots = Py_None;
727 727 PyObject *phasesetsdict = NULL;
728 728 PyObject *phasesets[4] = {NULL, NULL, NULL, NULL};
729 729 Py_ssize_t len = index_length(self);
730 730 char *phases = NULL;
731 731 int minphaserev = -1, rev, i;
732 732 const int numphases = (int)(sizeof(phasesets) / sizeof(phasesets[0]));
733 733
734 734 if (!PyArg_ParseTuple(args, "O", &roots))
735 735 return NULL;
736 736 if (roots == NULL || !PyDict_Check(roots)) {
737 737 PyErr_SetString(PyExc_TypeError, "roots must be a dictionary");
738 738 return NULL;
739 739 }
740 740
741 741 phases = calloc(len, 1);
742 742 if (phases == NULL) {
743 743 PyErr_NoMemory();
744 744 return NULL;
745 745 }
746 746
747 747 for (i = 0; i < numphases; ++i) {
748 748 PyObject *pyphase = PyInt_FromLong(trackedphases[i]);
749 749 PyObject *phaseroots = NULL;
750 750 if (pyphase == NULL)
751 751 goto release;
752 752 phaseroots = PyDict_GetItem(roots, pyphase);
753 753 Py_DECREF(pyphase);
754 754 if (phaseroots == NULL)
755 755 continue;
756 756 rev = add_roots_get_min(self, phaseroots, phases,
757 757 trackedphases[i]);
758 758 if (rev == -2)
759 759 goto release;
760 760 if (rev != -1 && (minphaserev == -1 || rev < minphaserev))
761 761 minphaserev = rev;
762 762 }
763 763
764 764 for (i = 0; i < numphases; ++i) {
765 765 phasesets[i] = PySet_New(NULL);
766 766 if (phasesets[i] == NULL)
767 767 goto release;
768 768 }
769 769
770 770 if (minphaserev == -1)
771 771 minphaserev = len;
772 772 for (rev = minphaserev; rev < len; ++rev) {
773 773 PyObject *pyphase = NULL;
774 774 PyObject *pyrev = NULL;
775 775 int parents[2];
776 776 /*
777 777 * The parent lookup could be skipped for phaseroots, but
778 778 * phase --force would historically not recompute them
779 779 * correctly, leaving descendents with a lower phase around.
780 780 * As such, unconditionally recompute the phase.
781 781 */
782 782 if (index_get_parents(self, rev, parents, (int)len - 1) < 0)
783 783 goto release;
784 784 set_phase_from_parents(phases, parents[0], parents[1], rev);
785 785 switch (phases[rev]) {
786 786 case 0:
787 787 continue;
788 788 case 1:
789 789 pyphase = phasesets[0];
790 790 break;
791 791 case 2:
792 792 pyphase = phasesets[1];
793 793 break;
794 794 case 32:
795 795 pyphase = phasesets[2];
796 796 break;
797 797 case 96:
798 798 pyphase = phasesets[3];
799 799 break;
800 800 default:
801 801 /* this should never happen since the phase number is
802 802 * specified by this function. */
803 803 PyErr_SetString(PyExc_SystemError,
804 804 "bad phase number in internal list");
805 805 goto release;
806 806 }
807 807 pyrev = PyInt_FromLong(rev);
808 808 if (pyrev == NULL)
809 809 goto release;
810 810 if (PySet_Add(pyphase, pyrev) == -1) {
811 811 Py_DECREF(pyrev);
812 812 goto release;
813 813 }
814 814 Py_DECREF(pyrev);
815 815 }
816 816
817 817 phasesetsdict = _dict_new_presized(numphases);
818 818 if (phasesetsdict == NULL)
819 819 goto release;
820 820 for (i = 0; i < numphases; ++i) {
821 821 PyObject *pyphase = PyInt_FromLong(trackedphases[i]);
822 822 if (pyphase == NULL)
823 823 goto release;
824 824 if (PyDict_SetItem(phasesetsdict, pyphase, phasesets[i]) ==
825 825 -1) {
826 826 Py_DECREF(pyphase);
827 827 goto release;
828 828 }
829 829 Py_DECREF(phasesets[i]);
830 830 phasesets[i] = NULL;
831 831 }
832 832
833 833 return Py_BuildValue("nN", len, phasesetsdict);
834 834
835 835 release:
836 836 for (i = 0; i < numphases; ++i)
837 837 Py_XDECREF(phasesets[i]);
838 838 Py_XDECREF(phasesetsdict);
839 839
840 840 free(phases);
841 841 return NULL;
842 842 }
843 843
844 844 static PyObject *index_headrevs(indexObject *self, PyObject *args)
845 845 {
846 846 Py_ssize_t i, j, len;
847 847 char *nothead = NULL;
848 848 PyObject *heads = NULL;
849 849 PyObject *filter = NULL;
850 850 PyObject *filteredrevs = Py_None;
851 851
852 852 if (!PyArg_ParseTuple(args, "|O", &filteredrevs)) {
853 853 return NULL;
854 854 }
855 855
856 856 if (self->headrevs && filteredrevs == self->filteredrevs)
857 857 return list_copy(self->headrevs);
858 858
859 859 Py_DECREF(self->filteredrevs);
860 860 self->filteredrevs = filteredrevs;
861 861 Py_INCREF(filteredrevs);
862 862
863 863 if (filteredrevs != Py_None) {
864 864 filter = PyObject_GetAttrString(filteredrevs, "__contains__");
865 865 if (!filter) {
866 866 PyErr_SetString(
867 867 PyExc_TypeError,
868 868 "filteredrevs has no attribute __contains__");
869 869 goto bail;
870 870 }
871 871 }
872 872
873 873 len = index_length(self);
874 874 heads = PyList_New(0);
875 875 if (heads == NULL)
876 876 goto bail;
877 877 if (len == 0) {
878 878 PyObject *nullid = PyInt_FromLong(-1);
879 879 if (nullid == NULL || PyList_Append(heads, nullid) == -1) {
880 880 Py_XDECREF(nullid);
881 881 goto bail;
882 882 }
883 883 goto done;
884 884 }
885 885
886 886 nothead = calloc(len, 1);
887 887 if (nothead == NULL) {
888 888 PyErr_NoMemory();
889 889 goto bail;
890 890 }
891 891
892 892 for (i = len - 1; i >= 0; i--) {
893 893 int isfiltered;
894 894 int parents[2];
895 895
896 896 /* If nothead[i] == 1, it means we've seen an unfiltered child
897 897 * of this node already, and therefore this node is not
898 898 * filtered. So we can skip the expensive check_filter step.
899 899 */
900 900 if (nothead[i] != 1) {
901 901 isfiltered = check_filter(filter, i);
902 902 if (isfiltered == -1) {
903 903 PyErr_SetString(PyExc_TypeError,
904 904 "unable to check filter");
905 905 goto bail;
906 906 }
907 907
908 908 if (isfiltered) {
909 909 nothead[i] = 1;
910 910 continue;
911 911 }
912 912 }
913 913
914 914 if (index_get_parents(self, i, parents, (int)len - 1) < 0)
915 915 goto bail;
916 916 for (j = 0; j < 2; j++) {
917 917 if (parents[j] >= 0)
918 918 nothead[parents[j]] = 1;
919 919 }
920 920 }
921 921
922 922 for (i = 0; i < len; i++) {
923 923 PyObject *head;
924 924
925 925 if (nothead[i])
926 926 continue;
927 927 head = PyInt_FromSsize_t(i);
928 928 if (head == NULL || PyList_Append(heads, head) == -1) {
929 929 Py_XDECREF(head);
930 930 goto bail;
931 931 }
932 932 }
933 933
934 934 done:
935 935 self->headrevs = heads;
936 936 Py_XDECREF(filter);
937 937 free(nothead);
938 938 return list_copy(self->headrevs);
939 939 bail:
940 940 Py_XDECREF(filter);
941 941 Py_XDECREF(heads);
942 942 free(nothead);
943 943 return NULL;
944 944 }
945 945
946 946 /**
947 947 * Obtain the base revision index entry.
948 948 *
949 949 * Callers must ensure that rev >= 0 or illegal memory access may occur.
950 950 */
951 951 static inline int index_baserev(indexObject *self, int rev)
952 952 {
953 953 const char *data;
954 954 int result;
955 955
956 956 data = index_deref(self, rev);
957 957 if (data == NULL)
958 958 return -2;
959 959 result = getbe32(data + 16);
960 960
961 961 if (result > rev) {
962 962 PyErr_Format(
963 963 PyExc_ValueError,
964 964 "corrupted revlog, revision base above revision: %d, %d",
965 965 rev, result);
966 966 return -2;
967 967 }
968 968 if (result < -1) {
969 969 PyErr_Format(
970 970 PyExc_ValueError,
971 971 "corrupted revlog, revision base out of range: %d, %d", rev,
972 972 result);
973 973 return -2;
974 974 }
975 975 return result;
976 976 }
977 977
978 978 /**
979 979 * Find if a revision is a snapshot or not
980 980 *
981 981 * Only relevant for sparse-revlog case.
982 982 * Callers must ensure that rev is in a valid range.
983 983 */
984 984 static int index_issnapshotrev(indexObject *self, Py_ssize_t rev)
985 985 {
986 986 int ps[2];
987 987 Py_ssize_t base;
988 988 while (rev >= 0) {
989 989 base = (Py_ssize_t)index_baserev(self, rev);
990 990 if (base == rev) {
991 991 base = -1;
992 992 }
993 993 if (base == -2) {
994 994 assert(PyErr_Occurred());
995 995 return -1;
996 996 }
997 997 if (base == -1) {
998 998 return 1;
999 999 }
1000 1000 if (index_get_parents(self, rev, ps, (int)rev) < 0) {
1001 1001 assert(PyErr_Occurred());
1002 1002 return -1;
1003 1003 };
1004 1004 if (base == ps[0] || base == ps[1]) {
1005 1005 return 0;
1006 1006 }
1007 1007 rev = base;
1008 1008 }
1009 1009 return rev == -1;
1010 1010 }
1011 1011
1012 1012 static PyObject *index_issnapshot(indexObject *self, PyObject *value)
1013 1013 {
1014 1014 long rev;
1015 1015 int issnap;
1016 1016 Py_ssize_t length = index_length(self);
1017 1017
1018 1018 if (!pylong_to_long(value, &rev)) {
1019 1019 return NULL;
1020 1020 }
1021 1021 if (rev < -1 || rev >= length) {
1022 1022 PyErr_Format(PyExc_ValueError, "revlog index out of range: %ld",
1023 1023 rev);
1024 1024 return NULL;
1025 1025 };
1026 1026 issnap = index_issnapshotrev(self, (Py_ssize_t)rev);
1027 1027 if (issnap < 0) {
1028 1028 return NULL;
1029 1029 };
1030 1030 return PyBool_FromLong((long)issnap);
1031 1031 }
1032 1032
1033 1033 static PyObject *index_findsnapshots(indexObject *self, PyObject *args)
1034 1034 {
1035 1035 Py_ssize_t start_rev;
1036 1036 PyObject *cache;
1037 1037 Py_ssize_t base;
1038 1038 Py_ssize_t rev;
1039 1039 PyObject *key = NULL;
1040 1040 PyObject *value = NULL;
1041 1041 const Py_ssize_t length = index_length(self);
1042 1042 if (!PyArg_ParseTuple(args, "O!n", &PyDict_Type, &cache, &start_rev)) {
1043 1043 return NULL;
1044 1044 }
1045 1045 for (rev = start_rev; rev < length; rev++) {
1046 1046 int issnap;
1047 1047 PyObject *allvalues = NULL;
1048 1048 issnap = index_issnapshotrev(self, rev);
1049 1049 if (issnap < 0) {
1050 1050 goto bail;
1051 1051 }
1052 1052 if (issnap == 0) {
1053 1053 continue;
1054 1054 }
1055 1055 base = (Py_ssize_t)index_baserev(self, rev);
1056 1056 if (base == rev) {
1057 1057 base = -1;
1058 1058 }
1059 1059 if (base == -2) {
1060 1060 assert(PyErr_Occurred());
1061 1061 goto bail;
1062 1062 }
1063 1063 key = PyInt_FromSsize_t(base);
1064 1064 allvalues = PyDict_GetItem(cache, key);
1065 1065 if (allvalues == NULL && PyErr_Occurred()) {
1066 1066 goto bail;
1067 1067 }
1068 1068 if (allvalues == NULL) {
1069 1069 int r;
1070 1070 allvalues = PyList_New(0);
1071 1071 if (!allvalues) {
1072 1072 goto bail;
1073 1073 }
1074 1074 r = PyDict_SetItem(cache, key, allvalues);
1075 1075 Py_DECREF(allvalues);
1076 1076 if (r < 0) {
1077 1077 goto bail;
1078 1078 }
1079 1079 }
1080 1080 value = PyInt_FromSsize_t(rev);
1081 1081 if (PyList_Append(allvalues, value)) {
1082 1082 goto bail;
1083 1083 }
1084 1084 Py_CLEAR(key);
1085 1085 Py_CLEAR(value);
1086 1086 }
1087 1087 Py_RETURN_NONE;
1088 1088 bail:
1089 1089 Py_XDECREF(key);
1090 1090 Py_XDECREF(value);
1091 1091 return NULL;
1092 1092 }
1093 1093
1094 1094 static PyObject *index_deltachain(indexObject *self, PyObject *args)
1095 1095 {
1096 1096 int rev, generaldelta;
1097 1097 PyObject *stoparg;
1098 1098 int stoprev, iterrev, baserev = -1;
1099 1099 int stopped;
1100 1100 PyObject *chain = NULL, *result = NULL;
1101 1101 const Py_ssize_t length = index_length(self);
1102 1102
1103 1103 if (!PyArg_ParseTuple(args, "iOi", &rev, &stoparg, &generaldelta)) {
1104 1104 return NULL;
1105 1105 }
1106 1106
1107 1107 if (PyInt_Check(stoparg)) {
1108 1108 stoprev = (int)PyInt_AsLong(stoparg);
1109 1109 if (stoprev == -1 && PyErr_Occurred()) {
1110 1110 return NULL;
1111 1111 }
1112 1112 } else if (stoparg == Py_None) {
1113 1113 stoprev = -2;
1114 1114 } else {
1115 1115 PyErr_SetString(PyExc_ValueError,
1116 1116 "stoprev must be integer or None");
1117 1117 return NULL;
1118 1118 }
1119 1119
1120 1120 if (rev < 0 || rev >= length) {
1121 1121 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
1122 1122 return NULL;
1123 1123 }
1124 1124
1125 1125 chain = PyList_New(0);
1126 1126 if (chain == NULL) {
1127 1127 return NULL;
1128 1128 }
1129 1129
1130 1130 baserev = index_baserev(self, rev);
1131 1131
1132 1132 /* This should never happen. */
1133 1133 if (baserev <= -2) {
1134 1134 /* Error should be set by index_deref() */
1135 1135 assert(PyErr_Occurred());
1136 1136 goto bail;
1137 1137 }
1138 1138
1139 1139 iterrev = rev;
1140 1140
1141 1141 while (iterrev != baserev && iterrev != stoprev) {
1142 1142 PyObject *value = PyInt_FromLong(iterrev);
1143 1143 if (value == NULL) {
1144 1144 goto bail;
1145 1145 }
1146 1146 if (PyList_Append(chain, value)) {
1147 1147 Py_DECREF(value);
1148 1148 goto bail;
1149 1149 }
1150 1150 Py_DECREF(value);
1151 1151
1152 1152 if (generaldelta) {
1153 1153 iterrev = baserev;
1154 1154 } else {
1155 1155 iterrev--;
1156 1156 }
1157 1157
1158 1158 if (iterrev < 0) {
1159 1159 break;
1160 1160 }
1161 1161
1162 1162 if (iterrev >= length) {
1163 1163 PyErr_SetString(PyExc_IndexError,
1164 1164 "revision outside index");
1165 1165 return NULL;
1166 1166 }
1167 1167
1168 1168 baserev = index_baserev(self, iterrev);
1169 1169
1170 1170 /* This should never happen. */
1171 1171 if (baserev <= -2) {
1172 1172 /* Error should be set by index_deref() */
1173 1173 assert(PyErr_Occurred());
1174 1174 goto bail;
1175 1175 }
1176 1176 }
1177 1177
1178 1178 if (iterrev == stoprev) {
1179 1179 stopped = 1;
1180 1180 } else {
1181 1181 PyObject *value = PyInt_FromLong(iterrev);
1182 1182 if (value == NULL) {
1183 1183 goto bail;
1184 1184 }
1185 1185 if (PyList_Append(chain, value)) {
1186 1186 Py_DECREF(value);
1187 1187 goto bail;
1188 1188 }
1189 1189 Py_DECREF(value);
1190 1190
1191 1191 stopped = 0;
1192 1192 }
1193 1193
1194 1194 if (PyList_Reverse(chain)) {
1195 1195 goto bail;
1196 1196 }
1197 1197
1198 1198 result = Py_BuildValue("OO", chain, stopped ? Py_True : Py_False);
1199 1199 Py_DECREF(chain);
1200 1200 return result;
1201 1201
1202 1202 bail:
1203 1203 Py_DECREF(chain);
1204 1204 return NULL;
1205 1205 }
1206 1206
1207 1207 static inline int64_t
1208 1208 index_segment_span(indexObject *self, Py_ssize_t start_rev, Py_ssize_t end_rev)
1209 1209 {
1210 1210 int64_t start_offset;
1211 1211 int64_t end_offset;
1212 1212 int end_size;
1213 1213 start_offset = index_get_start(self, start_rev);
1214 1214 if (start_offset < 0) {
1215 1215 return -1;
1216 1216 }
1217 1217 end_offset = index_get_start(self, end_rev);
1218 1218 if (end_offset < 0) {
1219 1219 return -1;
1220 1220 }
1221 1221 end_size = index_get_length(self, end_rev);
1222 1222 if (end_size < 0) {
1223 1223 return -1;
1224 1224 }
1225 1225 if (end_offset < start_offset) {
1226 1226 PyErr_Format(PyExc_ValueError,
1227 1227 "corrupted revlog index: inconsistent offset "
1228 1228 "between revisions (%zd) and (%zd)",
1229 1229 start_rev, end_rev);
1230 1230 return -1;
1231 1231 }
1232 1232 return (end_offset - start_offset) + (int64_t)end_size;
1233 1233 }
1234 1234
1235 1235 /* returns endidx so that revs[startidx:endidx] has no empty trailing revs */
1236 1236 static Py_ssize_t trim_endidx(indexObject *self, const Py_ssize_t *revs,
1237 1237 Py_ssize_t startidx, Py_ssize_t endidx)
1238 1238 {
1239 1239 int length;
1240 1240 while (endidx > 1 && endidx > startidx) {
1241 1241 length = index_get_length(self, revs[endidx - 1]);
1242 1242 if (length < 0) {
1243 1243 return -1;
1244 1244 }
1245 1245 if (length != 0) {
1246 1246 break;
1247 1247 }
1248 1248 endidx -= 1;
1249 1249 }
1250 1250 return endidx;
1251 1251 }
1252 1252
1253 1253 struct Gap {
1254 1254 int64_t size;
1255 1255 Py_ssize_t idx;
1256 1256 };
1257 1257
1258 1258 static int gap_compare(const void *left, const void *right)
1259 1259 {
1260 1260 const struct Gap *l_left = ((const struct Gap *)left);
1261 1261 const struct Gap *l_right = ((const struct Gap *)right);
1262 1262 if (l_left->size < l_right->size) {
1263 1263 return -1;
1264 1264 } else if (l_left->size > l_right->size) {
1265 1265 return 1;
1266 1266 }
1267 1267 return 0;
1268 1268 }
1269 1269 static int Py_ssize_t_compare(const void *left, const void *right)
1270 1270 {
1271 1271 const Py_ssize_t l_left = *(const Py_ssize_t *)left;
1272 1272 const Py_ssize_t l_right = *(const Py_ssize_t *)right;
1273 1273 if (l_left < l_right) {
1274 1274 return -1;
1275 1275 } else if (l_left > l_right) {
1276 1276 return 1;
1277 1277 }
1278 1278 return 0;
1279 1279 }
1280 1280
1281 1281 static PyObject *index_slicechunktodensity(indexObject *self, PyObject *args)
1282 1282 {
1283 1283 /* method arguments */
1284 1284 PyObject *list_revs = NULL; /* revisions in the chain */
1285 1285 double targetdensity = 0; /* min density to achieve */
1286 1286 Py_ssize_t mingapsize = 0; /* threshold to ignore gaps */
1287 1287
1288 1288 /* other core variables */
1289 1289 Py_ssize_t idxlen = index_length(self);
1290 1290 Py_ssize_t i; /* used for various iteration */
1291 1291 PyObject *result = NULL; /* the final return of the function */
1292 1292
1293 1293 /* generic information about the delta chain being slice */
1294 1294 Py_ssize_t num_revs = 0; /* size of the full delta chain */
1295 1295 Py_ssize_t *revs = NULL; /* native array of revision in the chain */
1296 1296 int64_t chainpayload = 0; /* sum of all delta in the chain */
1297 1297 int64_t deltachainspan = 0; /* distance from first byte to last byte */
1298 1298
1299 1299 /* variable used for slicing the delta chain */
1300 1300 int64_t readdata = 0; /* amount of data currently planned to be read */
1301 1301 double density = 0; /* ration of payload data compared to read ones */
1302 1302 int64_t previous_end;
1303 1303 struct Gap *gaps = NULL; /* array of notable gap in the chain */
1304 1304 Py_ssize_t num_gaps =
1305 1305 0; /* total number of notable gap recorded so far */
1306 1306 Py_ssize_t *selected_indices = NULL; /* indices of gap skipped over */
1307 1307 Py_ssize_t num_selected = 0; /* number of gaps skipped */
1308 1308 PyObject *chunk = NULL; /* individual slice */
1309 1309 PyObject *allchunks = NULL; /* all slices */
1310 1310 Py_ssize_t previdx;
1311 1311
1312 1312 /* parsing argument */
1313 1313 if (!PyArg_ParseTuple(args, "O!dn", &PyList_Type, &list_revs,
1314 1314 &targetdensity, &mingapsize)) {
1315 1315 goto bail;
1316 1316 }
1317 1317
1318 1318 /* If the delta chain contains a single element, we do not need slicing
1319 1319 */
1320 1320 num_revs = PyList_GET_SIZE(list_revs);
1321 1321 if (num_revs <= 1) {
1322 1322 result = PyTuple_Pack(1, list_revs);
1323 1323 goto done;
1324 1324 }
1325 1325
1326 1326 /* Turn the python list into a native integer array (for efficiency) */
1327 1327 revs = (Py_ssize_t *)calloc(num_revs, sizeof(Py_ssize_t));
1328 1328 if (revs == NULL) {
1329 1329 PyErr_NoMemory();
1330 1330 goto bail;
1331 1331 }
1332 1332 for (i = 0; i < num_revs; i++) {
1333 1333 Py_ssize_t revnum = PyInt_AsLong(PyList_GET_ITEM(list_revs, i));
1334 1334 if (revnum == -1 && PyErr_Occurred()) {
1335 1335 goto bail;
1336 1336 }
1337 1337 if (revnum < nullrev || revnum >= idxlen) {
1338 1338 PyErr_Format(PyExc_IndexError,
1339 1339 "index out of range: %zd", revnum);
1340 1340 goto bail;
1341 1341 }
1342 1342 revs[i] = revnum;
1343 1343 }
1344 1344
1345 1345 /* Compute and check various property of the unsliced delta chain */
1346 1346 deltachainspan = index_segment_span(self, revs[0], revs[num_revs - 1]);
1347 1347 if (deltachainspan < 0) {
1348 1348 goto bail;
1349 1349 }
1350 1350
1351 1351 if (deltachainspan <= mingapsize) {
1352 1352 result = PyTuple_Pack(1, list_revs);
1353 1353 goto done;
1354 1354 }
1355 1355 chainpayload = 0;
1356 1356 for (i = 0; i < num_revs; i++) {
1357 1357 int tmp = index_get_length(self, revs[i]);
1358 1358 if (tmp < 0) {
1359 1359 goto bail;
1360 1360 }
1361 1361 chainpayload += tmp;
1362 1362 }
1363 1363
1364 1364 readdata = deltachainspan;
1365 1365 density = 1.0;
1366 1366
1367 1367 if (0 < deltachainspan) {
1368 1368 density = (double)chainpayload / (double)deltachainspan;
1369 1369 }
1370 1370
1371 1371 if (density >= targetdensity) {
1372 1372 result = PyTuple_Pack(1, list_revs);
1373 1373 goto done;
1374 1374 }
1375 1375
1376 1376 /* if chain is too sparse, look for relevant gaps */
1377 1377 gaps = (struct Gap *)calloc(num_revs, sizeof(struct Gap));
1378 1378 if (gaps == NULL) {
1379 1379 PyErr_NoMemory();
1380 1380 goto bail;
1381 1381 }
1382 1382
1383 1383 previous_end = -1;
1384 1384 for (i = 0; i < num_revs; i++) {
1385 1385 int64_t revstart;
1386 1386 int revsize;
1387 1387 revstart = index_get_start(self, revs[i]);
1388 1388 if (revstart < 0) {
1389 1389 goto bail;
1390 1390 };
1391 1391 revsize = index_get_length(self, revs[i]);
1392 1392 if (revsize < 0) {
1393 1393 goto bail;
1394 1394 };
1395 1395 if (revsize == 0) {
1396 1396 continue;
1397 1397 }
1398 1398 if (previous_end >= 0) {
1399 1399 int64_t gapsize = revstart - previous_end;
1400 1400 if (gapsize > mingapsize) {
1401 1401 gaps[num_gaps].size = gapsize;
1402 1402 gaps[num_gaps].idx = i;
1403 1403 num_gaps += 1;
1404 1404 }
1405 1405 }
1406 1406 previous_end = revstart + revsize;
1407 1407 }
1408 1408 if (num_gaps == 0) {
1409 1409 result = PyTuple_Pack(1, list_revs);
1410 1410 goto done;
1411 1411 }
1412 1412 qsort(gaps, num_gaps, sizeof(struct Gap), &gap_compare);
1413 1413
1414 1414 /* Slice the largest gap first, they improve the density the most */
1415 1415 selected_indices =
1416 1416 (Py_ssize_t *)malloc((num_gaps + 1) * sizeof(Py_ssize_t));
1417 1417 if (selected_indices == NULL) {
1418 1418 PyErr_NoMemory();
1419 1419 goto bail;
1420 1420 }
1421 1421
1422 1422 for (i = num_gaps - 1; i >= 0; i--) {
1423 1423 selected_indices[num_selected] = gaps[i].idx;
1424 1424 readdata -= gaps[i].size;
1425 1425 num_selected += 1;
1426 1426 if (readdata <= 0) {
1427 1427 density = 1.0;
1428 1428 } else {
1429 1429 density = (double)chainpayload / (double)readdata;
1430 1430 }
1431 1431 if (density >= targetdensity) {
1432 1432 break;
1433 1433 }
1434 1434 }
1435 1435 qsort(selected_indices, num_selected, sizeof(Py_ssize_t),
1436 1436 &Py_ssize_t_compare);
1437 1437
1438 1438 /* create the resulting slice */
1439 1439 allchunks = PyList_New(0);
1440 1440 if (allchunks == NULL) {
1441 1441 goto bail;
1442 1442 }
1443 1443 previdx = 0;
1444 1444 selected_indices[num_selected] = num_revs;
1445 1445 for (i = 0; i <= num_selected; i++) {
1446 1446 Py_ssize_t idx = selected_indices[i];
1447 1447 Py_ssize_t endidx = trim_endidx(self, revs, previdx, idx);
1448 1448 if (endidx < 0) {
1449 1449 goto bail;
1450 1450 }
1451 1451 if (previdx < endidx) {
1452 1452 chunk = PyList_GetSlice(list_revs, previdx, endidx);
1453 1453 if (chunk == NULL) {
1454 1454 goto bail;
1455 1455 }
1456 1456 if (PyList_Append(allchunks, chunk) == -1) {
1457 1457 goto bail;
1458 1458 }
1459 1459 Py_DECREF(chunk);
1460 1460 chunk = NULL;
1461 1461 }
1462 1462 previdx = idx;
1463 1463 }
1464 1464 result = allchunks;
1465 1465 goto done;
1466 1466
1467 1467 bail:
1468 1468 Py_XDECREF(allchunks);
1469 1469 Py_XDECREF(chunk);
1470 1470 done:
1471 1471 free(revs);
1472 1472 free(gaps);
1473 1473 free(selected_indices);
1474 1474 return result;
1475 1475 }
1476 1476
1477 1477 static inline int nt_level(const char *node, Py_ssize_t level)
1478 1478 {
1479 1479 int v = node[level >> 1];
1480 1480 if (!(level & 1))
1481 1481 v >>= 4;
1482 1482 return v & 0xf;
1483 1483 }
1484 1484
1485 1485 /*
1486 1486 * Return values:
1487 1487 *
1488 1488 * -4: match is ambiguous (multiple candidates)
1489 1489 * -2: not found
1490 1490 * rest: valid rev
1491 1491 */
1492 1492 static int nt_find(nodetree *self, const char *node, Py_ssize_t nodelen,
1493 1493 int hex)
1494 1494 {
1495 1495 int (*getnybble)(const char *, Py_ssize_t) = hex ? hexdigit : nt_level;
1496 1496 int level, maxlevel, off;
1497 1497
1498 1498 /* If the input is binary, do a fast check for the nullid first. */
1499 1499 if (!hex && nodelen == self->nodelen && node[0] == '\0' &&
1500 1500 node[1] == '\0' && memcmp(node, nullid, self->nodelen) == 0)
1501 1501 return -1;
1502 1502
1503 1503 if (hex)
1504 1504 maxlevel = nodelen;
1505 1505 else
1506 1506 maxlevel = 2 * nodelen;
1507 1507 if (maxlevel > 2 * self->nodelen)
1508 1508 maxlevel = 2 * self->nodelen;
1509 1509
1510 1510 for (level = off = 0; level < maxlevel; level++) {
1511 1511 int k = getnybble(node, level);
1512 1512 nodetreenode *n = &self->nodes[off];
1513 1513 int v = n->children[k];
1514 1514
1515 1515 if (v < 0) {
1516 1516 const char *n;
1517 1517 Py_ssize_t i;
1518 1518
1519 1519 v = -(v + 2);
1520 1520 n = index_node(self->index, v);
1521 1521 if (n == NULL)
1522 1522 return -2;
1523 1523 for (i = level; i < maxlevel; i++)
1524 1524 if (getnybble(node, i) != nt_level(n, i))
1525 1525 return -2;
1526 1526 return v;
1527 1527 }
1528 1528 if (v == 0)
1529 1529 return -2;
1530 1530 off = v;
1531 1531 }
1532 1532 /* multiple matches against an ambiguous prefix */
1533 1533 return -4;
1534 1534 }
1535 1535
1536 1536 static int nt_new(nodetree *self)
1537 1537 {
1538 1538 if (self->length == self->capacity) {
1539 1539 unsigned newcapacity;
1540 1540 nodetreenode *newnodes;
1541 1541 newcapacity = self->capacity * 2;
1542 1542 if (newcapacity >= INT_MAX / sizeof(nodetreenode)) {
1543 1543 PyErr_SetString(PyExc_MemoryError,
1544 1544 "overflow in nt_new");
1545 1545 return -1;
1546 1546 }
1547 1547 newnodes =
1548 1548 realloc(self->nodes, newcapacity * sizeof(nodetreenode));
1549 1549 if (newnodes == NULL) {
1550 1550 PyErr_SetString(PyExc_MemoryError, "out of memory");
1551 1551 return -1;
1552 1552 }
1553 1553 self->capacity = newcapacity;
1554 1554 self->nodes = newnodes;
1555 1555 memset(&self->nodes[self->length], 0,
1556 1556 sizeof(nodetreenode) * (self->capacity - self->length));
1557 1557 }
1558 1558 return self->length++;
1559 1559 }
1560 1560
1561 1561 static int nt_insert(nodetree *self, const char *node, int rev)
1562 1562 {
1563 1563 int level = 0;
1564 1564 int off = 0;
1565 1565
1566 1566 while (level < 2 * self->nodelen) {
1567 1567 int k = nt_level(node, level);
1568 1568 nodetreenode *n;
1569 1569 int v;
1570 1570
1571 1571 n = &self->nodes[off];
1572 1572 v = n->children[k];
1573 1573
1574 1574 if (v == 0) {
1575 1575 n->children[k] = -rev - 2;
1576 1576 return 0;
1577 1577 }
1578 1578 if (v < 0) {
1579 1579 const char *oldnode =
1580 1580 index_node_existing(self->index, -(v + 2));
1581 1581 int noff;
1582 1582
1583 1583 if (oldnode == NULL)
1584 1584 return -1;
1585 1585 if (!memcmp(oldnode, node, self->nodelen)) {
1586 1586 n->children[k] = -rev - 2;
1587 1587 return 0;
1588 1588 }
1589 1589 noff = nt_new(self);
1590 1590 if (noff == -1)
1591 1591 return -1;
1592 1592 /* self->nodes may have been changed by realloc */
1593 1593 self->nodes[off].children[k] = noff;
1594 1594 off = noff;
1595 1595 n = &self->nodes[off];
1596 1596 n->children[nt_level(oldnode, ++level)] = v;
1597 1597 if (level > self->depth)
1598 1598 self->depth = level;
1599 1599 self->splits += 1;
1600 1600 } else {
1601 1601 level += 1;
1602 1602 off = v;
1603 1603 }
1604 1604 }
1605 1605
1606 1606 return -1;
1607 1607 }
1608 1608
1609 1609 static PyObject *ntobj_insert(nodetreeObject *self, PyObject *args)
1610 1610 {
1611 1611 Py_ssize_t rev;
1612 1612 const char *node;
1613 1613 Py_ssize_t length;
1614 1614 if (!PyArg_ParseTuple(args, "n", &rev))
1615 1615 return NULL;
1616 1616 length = index_length(self->nt.index);
1617 1617 if (rev < 0 || rev >= length) {
1618 1618 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
1619 1619 return NULL;
1620 1620 }
1621 1621 node = index_node_existing(self->nt.index, rev);
1622 1622 if (nt_insert(&self->nt, node, (int)rev) == -1)
1623 1623 return NULL;
1624 1624 Py_RETURN_NONE;
1625 1625 }
1626 1626
1627 1627 static int nt_delete_node(nodetree *self, const char *node)
1628 1628 {
1629 1629 /* rev==-2 happens to get encoded as 0, which is interpreted as not set
1630 1630 */
1631 1631 return nt_insert(self, node, -2);
1632 1632 }
1633 1633
1634 1634 static int nt_init(nodetree *self, indexObject *index, unsigned capacity)
1635 1635 {
1636 1636 /* Initialize before overflow-checking to avoid nt_dealloc() crash. */
1637 1637 self->nodes = NULL;
1638 1638
1639 1639 self->index = index;
1640 1640 /* The input capacity is in terms of revisions, while the field is in
1641 1641 * terms of nodetree nodes. */
1642 1642 self->capacity = (capacity < 4 ? 4 : capacity / 2);
1643 1643 self->nodelen = index->nodelen;
1644 1644 self->depth = 0;
1645 1645 self->splits = 0;
1646 1646 if ((size_t)self->capacity > INT_MAX / sizeof(nodetreenode)) {
1647 1647 PyErr_SetString(PyExc_ValueError, "overflow in init_nt");
1648 1648 return -1;
1649 1649 }
1650 1650 self->nodes = calloc(self->capacity, sizeof(nodetreenode));
1651 1651 if (self->nodes == NULL) {
1652 1652 PyErr_NoMemory();
1653 1653 return -1;
1654 1654 }
1655 1655 self->length = 1;
1656 1656 return 0;
1657 1657 }
1658 1658
1659 1659 static int ntobj_init(nodetreeObject *self, PyObject *args)
1660 1660 {
1661 1661 PyObject *index;
1662 1662 unsigned capacity;
1663 1663 if (!PyArg_ParseTuple(args, "O!I", &HgRevlogIndex_Type, &index,
1664 1664 &capacity))
1665 1665 return -1;
1666 1666 Py_INCREF(index);
1667 1667 return nt_init(&self->nt, (indexObject *)index, capacity);
1668 1668 }
1669 1669
1670 1670 static int nt_partialmatch(nodetree *self, const char *node, Py_ssize_t nodelen)
1671 1671 {
1672 1672 return nt_find(self, node, nodelen, 1);
1673 1673 }
1674 1674
1675 1675 /*
1676 1676 * Find the length of the shortest unique prefix of node.
1677 1677 *
1678 1678 * Return values:
1679 1679 *
1680 1680 * -3: error (exception set)
1681 1681 * -2: not found (no exception set)
1682 1682 * rest: length of shortest prefix
1683 1683 */
1684 1684 static int nt_shortest(nodetree *self, const char *node)
1685 1685 {
1686 1686 int level, off;
1687 1687
1688 1688 for (level = off = 0; level < 2 * self->nodelen; level++) {
1689 1689 int k, v;
1690 1690 nodetreenode *n = &self->nodes[off];
1691 1691 k = nt_level(node, level);
1692 1692 v = n->children[k];
1693 1693 if (v < 0) {
1694 1694 const char *n;
1695 1695 v = -(v + 2);
1696 1696 n = index_node_existing(self->index, v);
1697 1697 if (n == NULL)
1698 1698 return -3;
1699 1699 if (memcmp(node, n, self->nodelen) != 0)
1700 1700 /*
1701 1701 * Found a unique prefix, but it wasn't for the
1702 1702 * requested node (i.e the requested node does
1703 1703 * not exist).
1704 1704 */
1705 1705 return -2;
1706 1706 return level + 1;
1707 1707 }
1708 1708 if (v == 0)
1709 1709 return -2;
1710 1710 off = v;
1711 1711 }
1712 1712 /*
1713 1713 * The node was still not unique after 40 hex digits, so this won't
1714 1714 * happen. Also, if we get here, then there's a programming error in
1715 1715 * this file that made us insert a node longer than 40 hex digits.
1716 1716 */
1717 1717 PyErr_SetString(PyExc_Exception, "broken node tree");
1718 1718 return -3;
1719 1719 }
1720 1720
1721 1721 static PyObject *ntobj_shortest(nodetreeObject *self, PyObject *args)
1722 1722 {
1723 1723 PyObject *val;
1724 1724 char *node;
1725 1725 int length;
1726 1726
1727 1727 if (!PyArg_ParseTuple(args, "O", &val))
1728 1728 return NULL;
1729 1729 if (node_check(self->nt.nodelen, val, &node) == -1)
1730 1730 return NULL;
1731 1731
1732 1732 length = nt_shortest(&self->nt, node);
1733 1733 if (length == -3)
1734 1734 return NULL;
1735 1735 if (length == -2) {
1736 1736 raise_revlog_error();
1737 1737 return NULL;
1738 1738 }
1739 1739 return PyInt_FromLong(length);
1740 1740 }
1741 1741
1742 1742 static void nt_dealloc(nodetree *self)
1743 1743 {
1744 1744 free(self->nodes);
1745 1745 self->nodes = NULL;
1746 1746 }
1747 1747
1748 1748 static void ntobj_dealloc(nodetreeObject *self)
1749 1749 {
1750 1750 Py_XDECREF(self->nt.index);
1751 1751 nt_dealloc(&self->nt);
1752 1752 PyObject_Del(self);
1753 1753 }
1754 1754
1755 1755 static PyMethodDef ntobj_methods[] = {
1756 1756 {"insert", (PyCFunction)ntobj_insert, METH_VARARGS,
1757 1757 "insert an index entry"},
1758 1758 {"shortest", (PyCFunction)ntobj_shortest, METH_VARARGS,
1759 1759 "find length of shortest hex nodeid of a binary ID"},
1760 1760 {NULL} /* Sentinel */
1761 1761 };
1762 1762
1763 1763 static PyTypeObject nodetreeType = {
1764 1764 PyVarObject_HEAD_INIT(NULL, 0) /* header */
1765 1765 "parsers.nodetree", /* tp_name */
1766 1766 sizeof(nodetreeObject), /* tp_basicsize */
1767 1767 0, /* tp_itemsize */
1768 1768 (destructor)ntobj_dealloc, /* tp_dealloc */
1769 1769 0, /* tp_print */
1770 1770 0, /* tp_getattr */
1771 1771 0, /* tp_setattr */
1772 1772 0, /* tp_compare */
1773 1773 0, /* tp_repr */
1774 1774 0, /* tp_as_number */
1775 1775 0, /* tp_as_sequence */
1776 1776 0, /* tp_as_mapping */
1777 1777 0, /* tp_hash */
1778 1778 0, /* tp_call */
1779 1779 0, /* tp_str */
1780 1780 0, /* tp_getattro */
1781 1781 0, /* tp_setattro */
1782 1782 0, /* tp_as_buffer */
1783 1783 Py_TPFLAGS_DEFAULT, /* tp_flags */
1784 1784 "nodetree", /* tp_doc */
1785 1785 0, /* tp_traverse */
1786 1786 0, /* tp_clear */
1787 1787 0, /* tp_richcompare */
1788 1788 0, /* tp_weaklistoffset */
1789 1789 0, /* tp_iter */
1790 1790 0, /* tp_iternext */
1791 1791 ntobj_methods, /* tp_methods */
1792 1792 0, /* tp_members */
1793 1793 0, /* tp_getset */
1794 1794 0, /* tp_base */
1795 1795 0, /* tp_dict */
1796 1796 0, /* tp_descr_get */
1797 1797 0, /* tp_descr_set */
1798 1798 0, /* tp_dictoffset */
1799 1799 (initproc)ntobj_init, /* tp_init */
1800 1800 0, /* tp_alloc */
1801 1801 };
1802 1802
1803 1803 static int index_init_nt(indexObject *self)
1804 1804 {
1805 1805 if (!self->ntinitialized) {
1806 1806 if (nt_init(&self->nt, self, (int)self->length) == -1) {
1807 1807 nt_dealloc(&self->nt);
1808 1808 return -1;
1809 1809 }
1810 1810 if (nt_insert(&self->nt, nullid, -1) == -1) {
1811 1811 nt_dealloc(&self->nt);
1812 1812 return -1;
1813 1813 }
1814 1814 self->ntinitialized = 1;
1815 1815 self->ntrev = (int)index_length(self);
1816 1816 self->ntlookups = 1;
1817 1817 self->ntmisses = 0;
1818 1818 }
1819 1819 return 0;
1820 1820 }
1821 1821
1822 1822 /*
1823 1823 * Return values:
1824 1824 *
1825 1825 * -3: error (exception set)
1826 1826 * -2: not found (no exception set)
1827 1827 * rest: valid rev
1828 1828 */
1829 1829 static int index_find_node(indexObject *self, const char *node)
1830 1830 {
1831 1831 int rev;
1832 1832
1833 1833 if (index_init_nt(self) == -1)
1834 1834 return -3;
1835 1835
1836 1836 self->ntlookups++;
1837 1837 rev = nt_find(&self->nt, node, self->nodelen, 0);
1838 1838 if (rev >= -1)
1839 1839 return rev;
1840 1840
1841 1841 /*
1842 1842 * For the first handful of lookups, we scan the entire index,
1843 1843 * and cache only the matching nodes. This optimizes for cases
1844 1844 * like "hg tip", where only a few nodes are accessed.
1845 1845 *
1846 1846 * After that, we cache every node we visit, using a single
1847 1847 * scan amortized over multiple lookups. This gives the best
1848 1848 * bulk performance, e.g. for "hg log".
1849 1849 */
1850 1850 if (self->ntmisses++ < 4) {
1851 1851 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1852 1852 const char *n = index_node_existing(self, rev);
1853 1853 if (n == NULL)
1854 1854 return -3;
1855 1855 if (memcmp(node, n, self->nodelen) == 0) {
1856 1856 if (nt_insert(&self->nt, n, rev) == -1)
1857 1857 return -3;
1858 1858 break;
1859 1859 }
1860 1860 }
1861 1861 } else {
1862 1862 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1863 1863 const char *n = index_node_existing(self, rev);
1864 1864 if (n == NULL)
1865 1865 return -3;
1866 1866 if (nt_insert(&self->nt, n, rev) == -1) {
1867 1867 self->ntrev = rev + 1;
1868 1868 return -3;
1869 1869 }
1870 1870 if (memcmp(node, n, self->nodelen) == 0) {
1871 1871 break;
1872 1872 }
1873 1873 }
1874 1874 self->ntrev = rev;
1875 1875 }
1876 1876
1877 1877 if (rev >= 0)
1878 1878 return rev;
1879 1879 return -2;
1880 1880 }
1881 1881
1882 1882 static PyObject *index_getitem(indexObject *self, PyObject *value)
1883 1883 {
1884 1884 char *node;
1885 1885 int rev;
1886 1886
1887 1887 if (PyInt_Check(value)) {
1888 1888 long idx;
1889 1889 if (!pylong_to_long(value, &idx)) {
1890 1890 return NULL;
1891 1891 }
1892 1892 return index_get(self, idx);
1893 1893 }
1894 1894
1895 1895 if (node_check(self->nodelen, value, &node) == -1)
1896 1896 return NULL;
1897 1897 rev = index_find_node(self, node);
1898 1898 if (rev >= -1)
1899 1899 return PyInt_FromLong(rev);
1900 1900 if (rev == -2)
1901 1901 raise_revlog_error();
1902 1902 return NULL;
1903 1903 }
1904 1904
1905 1905 /*
1906 1906 * Fully populate the radix tree.
1907 1907 */
1908 1908 static int index_populate_nt(indexObject *self)
1909 1909 {
1910 1910 int rev;
1911 1911 if (self->ntrev > 0) {
1912 1912 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1913 1913 const char *n = index_node_existing(self, rev);
1914 1914 if (n == NULL)
1915 1915 return -1;
1916 1916 if (nt_insert(&self->nt, n, rev) == -1)
1917 1917 return -1;
1918 1918 }
1919 1919 self->ntrev = -1;
1920 1920 }
1921 1921 return 0;
1922 1922 }
1923 1923
1924 1924 static PyObject *index_partialmatch(indexObject *self, PyObject *args)
1925 1925 {
1926 1926 const char *fullnode;
1927 1927 Py_ssize_t nodelen;
1928 1928 char *node;
1929 1929 int rev, i;
1930 1930
1931 1931 if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &node, &nodelen))
1932 1932 return NULL;
1933 1933
1934 1934 if (nodelen < 1) {
1935 1935 PyErr_SetString(PyExc_ValueError, "key too short");
1936 1936 return NULL;
1937 1937 }
1938 1938
1939 1939 if (nodelen > 2 * self->nodelen) {
1940 1940 PyErr_SetString(PyExc_ValueError, "key too long");
1941 1941 return NULL;
1942 1942 }
1943 1943
1944 1944 for (i = 0; i < nodelen; i++)
1945 1945 hexdigit(node, i);
1946 1946 if (PyErr_Occurred()) {
1947 1947 /* input contains non-hex characters */
1948 1948 PyErr_Clear();
1949 1949 Py_RETURN_NONE;
1950 1950 }
1951 1951
1952 1952 if (index_init_nt(self) == -1)
1953 1953 return NULL;
1954 1954 if (index_populate_nt(self) == -1)
1955 1955 return NULL;
1956 1956 rev = nt_partialmatch(&self->nt, node, nodelen);
1957 1957
1958 1958 switch (rev) {
1959 1959 case -4:
1960 1960 raise_revlog_error();
1961 1961 return NULL;
1962 1962 case -2:
1963 1963 Py_RETURN_NONE;
1964 1964 case -1:
1965 1965 return PyBytes_FromStringAndSize(nullid, self->nodelen);
1966 1966 }
1967 1967
1968 1968 fullnode = index_node_existing(self, rev);
1969 1969 if (fullnode == NULL) {
1970 1970 return NULL;
1971 1971 }
1972 1972 return PyBytes_FromStringAndSize(fullnode, self->nodelen);
1973 1973 }
1974 1974
1975 1975 static PyObject *index_shortest(indexObject *self, PyObject *args)
1976 1976 {
1977 1977 PyObject *val;
1978 1978 char *node;
1979 1979 int length;
1980 1980
1981 1981 if (!PyArg_ParseTuple(args, "O", &val))
1982 1982 return NULL;
1983 1983 if (node_check(self->nodelen, val, &node) == -1)
1984 1984 return NULL;
1985 1985
1986 1986 self->ntlookups++;
1987 1987 if (index_init_nt(self) == -1)
1988 1988 return NULL;
1989 1989 if (index_populate_nt(self) == -1)
1990 1990 return NULL;
1991 1991 length = nt_shortest(&self->nt, node);
1992 1992 if (length == -3)
1993 1993 return NULL;
1994 1994 if (length == -2) {
1995 1995 raise_revlog_error();
1996 1996 return NULL;
1997 1997 }
1998 1998 return PyInt_FromLong(length);
1999 1999 }
2000 2000
2001 2001 static PyObject *index_m_get(indexObject *self, PyObject *args)
2002 2002 {
2003 2003 PyObject *val;
2004 2004 char *node;
2005 2005 int rev;
2006 2006
2007 2007 if (!PyArg_ParseTuple(args, "O", &val))
2008 2008 return NULL;
2009 2009 if (node_check(self->nodelen, val, &node) == -1)
2010 2010 return NULL;
2011 2011 rev = index_find_node(self, node);
2012 2012 if (rev == -3)
2013 2013 return NULL;
2014 2014 if (rev == -2)
2015 2015 Py_RETURN_NONE;
2016 2016 return PyInt_FromLong(rev);
2017 2017 }
2018 2018
2019 2019 static int index_contains(indexObject *self, PyObject *value)
2020 2020 {
2021 2021 char *node;
2022 2022
2023 2023 if (PyInt_Check(value)) {
2024 2024 long rev;
2025 2025 if (!pylong_to_long(value, &rev)) {
2026 2026 return -1;
2027 2027 }
2028 2028 return rev >= -1 && rev < index_length(self);
2029 2029 }
2030 2030
2031 2031 if (node_check(self->nodelen, value, &node) == -1)
2032 2032 return -1;
2033 2033
2034 2034 switch (index_find_node(self, node)) {
2035 2035 case -3:
2036 2036 return -1;
2037 2037 case -2:
2038 2038 return 0;
2039 2039 default:
2040 2040 return 1;
2041 2041 }
2042 2042 }
2043 2043
2044 2044 static PyObject *index_m_has_node(indexObject *self, PyObject *args)
2045 2045 {
2046 2046 int ret = index_contains(self, args);
2047 2047 if (ret < 0)
2048 2048 return NULL;
2049 2049 return PyBool_FromLong((long)ret);
2050 2050 }
2051 2051
2052 2052 static PyObject *index_m_rev(indexObject *self, PyObject *val)
2053 2053 {
2054 2054 char *node;
2055 2055 int rev;
2056 2056
2057 2057 if (node_check(self->nodelen, val, &node) == -1)
2058 2058 return NULL;
2059 2059 rev = index_find_node(self, node);
2060 2060 if (rev >= -1)
2061 2061 return PyInt_FromLong(rev);
2062 2062 if (rev == -2)
2063 2063 raise_revlog_error();
2064 2064 return NULL;
2065 2065 }
2066 2066
2067 2067 typedef uint64_t bitmask;
2068 2068
2069 2069 /*
2070 2070 * Given a disjoint set of revs, return all candidates for the
2071 2071 * greatest common ancestor. In revset notation, this is the set
2072 2072 * "heads(::a and ::b and ...)"
2073 2073 */
2074 2074 static PyObject *find_gca_candidates(indexObject *self, const int *revs,
2075 2075 int revcount)
2076 2076 {
2077 2077 const bitmask allseen = (1ull << revcount) - 1;
2078 2078 const bitmask poison = 1ull << revcount;
2079 2079 PyObject *gca = PyList_New(0);
2080 2080 int i, v, interesting;
2081 2081 int maxrev = -1;
2082 2082 bitmask sp;
2083 2083 bitmask *seen;
2084 2084
2085 2085 if (gca == NULL)
2086 2086 return PyErr_NoMemory();
2087 2087
2088 2088 for (i = 0; i < revcount; i++) {
2089 2089 if (revs[i] > maxrev)
2090 2090 maxrev = revs[i];
2091 2091 }
2092 2092
2093 2093 seen = calloc(sizeof(*seen), maxrev + 1);
2094 2094 if (seen == NULL) {
2095 2095 Py_DECREF(gca);
2096 2096 return PyErr_NoMemory();
2097 2097 }
2098 2098
2099 2099 for (i = 0; i < revcount; i++)
2100 2100 seen[revs[i]] = 1ull << i;
2101 2101
2102 2102 interesting = revcount;
2103 2103
2104 2104 for (v = maxrev; v >= 0 && interesting; v--) {
2105 2105 bitmask sv = seen[v];
2106 2106 int parents[2];
2107 2107
2108 2108 if (!sv)
2109 2109 continue;
2110 2110
2111 2111 if (sv < poison) {
2112 2112 interesting -= 1;
2113 2113 if (sv == allseen) {
2114 2114 PyObject *obj = PyInt_FromLong(v);
2115 2115 if (obj == NULL)
2116 2116 goto bail;
2117 2117 if (PyList_Append(gca, obj) == -1) {
2118 2118 Py_DECREF(obj);
2119 2119 goto bail;
2120 2120 }
2121 2121 sv |= poison;
2122 2122 for (i = 0; i < revcount; i++) {
2123 2123 if (revs[i] == v)
2124 2124 goto done;
2125 2125 }
2126 2126 }
2127 2127 }
2128 2128 if (index_get_parents(self, v, parents, maxrev) < 0)
2129 2129 goto bail;
2130 2130
2131 2131 for (i = 0; i < 2; i++) {
2132 2132 int p = parents[i];
2133 2133 if (p == -1)
2134 2134 continue;
2135 2135 sp = seen[p];
2136 2136 if (sv < poison) {
2137 2137 if (sp == 0) {
2138 2138 seen[p] = sv;
2139 2139 interesting++;
2140 2140 } else if (sp != sv)
2141 2141 seen[p] |= sv;
2142 2142 } else {
2143 2143 if (sp && sp < poison)
2144 2144 interesting--;
2145 2145 seen[p] = sv;
2146 2146 }
2147 2147 }
2148 2148 }
2149 2149
2150 2150 done:
2151 2151 free(seen);
2152 2152 return gca;
2153 2153 bail:
2154 2154 free(seen);
2155 2155 Py_XDECREF(gca);
2156 2156 return NULL;
2157 2157 }
2158 2158
2159 2159 /*
2160 2160 * Given a disjoint set of revs, return the subset with the longest
2161 2161 * path to the root.
2162 2162 */
2163 2163 static PyObject *find_deepest(indexObject *self, PyObject *revs)
2164 2164 {
2165 2165 const Py_ssize_t revcount = PyList_GET_SIZE(revs);
2166 2166 static const Py_ssize_t capacity = 24;
2167 2167 int *depth, *interesting = NULL;
2168 2168 int i, j, v, ninteresting;
2169 2169 PyObject *dict = NULL, *keys = NULL;
2170 2170 long *seen = NULL;
2171 2171 int maxrev = -1;
2172 2172 long final;
2173 2173
2174 2174 if (revcount > capacity) {
2175 2175 PyErr_Format(PyExc_OverflowError,
2176 2176 "bitset size (%ld) > capacity (%ld)",
2177 2177 (long)revcount, (long)capacity);
2178 2178 return NULL;
2179 2179 }
2180 2180
2181 2181 for (i = 0; i < revcount; i++) {
2182 2182 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
2183 2183 if (n > maxrev)
2184 2184 maxrev = n;
2185 2185 }
2186 2186
2187 2187 depth = calloc(sizeof(*depth), maxrev + 1);
2188 2188 if (depth == NULL)
2189 2189 return PyErr_NoMemory();
2190 2190
2191 2191 seen = calloc(sizeof(*seen), maxrev + 1);
2192 2192 if (seen == NULL) {
2193 2193 PyErr_NoMemory();
2194 2194 goto bail;
2195 2195 }
2196 2196
2197 2197 interesting = calloc(sizeof(*interesting), ((size_t)1) << revcount);
2198 2198 if (interesting == NULL) {
2199 2199 PyErr_NoMemory();
2200 2200 goto bail;
2201 2201 }
2202 2202
2203 2203 if (PyList_Sort(revs) == -1)
2204 2204 goto bail;
2205 2205
2206 2206 for (i = 0; i < revcount; i++) {
2207 2207 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
2208 2208 long b = 1l << i;
2209 2209 depth[n] = 1;
2210 2210 seen[n] = b;
2211 2211 interesting[b] = 1;
2212 2212 }
2213 2213
2214 2214 /* invariant: ninteresting is the number of non-zero entries in
2215 2215 * interesting. */
2216 2216 ninteresting = (int)revcount;
2217 2217
2218 2218 for (v = maxrev; v >= 0 && ninteresting > 1; v--) {
2219 2219 int dv = depth[v];
2220 2220 int parents[2];
2221 2221 long sv;
2222 2222
2223 2223 if (dv == 0)
2224 2224 continue;
2225 2225
2226 2226 sv = seen[v];
2227 2227 if (index_get_parents(self, v, parents, maxrev) < 0)
2228 2228 goto bail;
2229 2229
2230 2230 for (i = 0; i < 2; i++) {
2231 2231 int p = parents[i];
2232 2232 long sp;
2233 2233 int dp;
2234 2234
2235 2235 if (p == -1)
2236 2236 continue;
2237 2237
2238 2238 dp = depth[p];
2239 2239 sp = seen[p];
2240 2240 if (dp <= dv) {
2241 2241 depth[p] = dv + 1;
2242 2242 if (sp != sv) {
2243 2243 interesting[sv] += 1;
2244 2244 seen[p] = sv;
2245 2245 if (sp) {
2246 2246 interesting[sp] -= 1;
2247 2247 if (interesting[sp] == 0)
2248 2248 ninteresting -= 1;
2249 2249 }
2250 2250 }
2251 2251 } else if (dv == dp - 1) {
2252 2252 long nsp = sp | sv;
2253 2253 if (nsp == sp)
2254 2254 continue;
2255 2255 seen[p] = nsp;
2256 2256 interesting[sp] -= 1;
2257 2257 if (interesting[sp] == 0)
2258 2258 ninteresting -= 1;
2259 2259 if (interesting[nsp] == 0)
2260 2260 ninteresting += 1;
2261 2261 interesting[nsp] += 1;
2262 2262 }
2263 2263 }
2264 2264 interesting[sv] -= 1;
2265 2265 if (interesting[sv] == 0)
2266 2266 ninteresting -= 1;
2267 2267 }
2268 2268
2269 2269 final = 0;
2270 2270 j = ninteresting;
2271 2271 for (i = 0; i < (int)(2 << revcount) && j > 0; i++) {
2272 2272 if (interesting[i] == 0)
2273 2273 continue;
2274 2274 final |= i;
2275 2275 j -= 1;
2276 2276 }
2277 2277 if (final == 0) {
2278 2278 keys = PyList_New(0);
2279 2279 goto bail;
2280 2280 }
2281 2281
2282 2282 dict = PyDict_New();
2283 2283 if (dict == NULL)
2284 2284 goto bail;
2285 2285
2286 2286 for (i = 0; i < revcount; i++) {
2287 2287 PyObject *key;
2288 2288
2289 2289 if ((final & (1 << i)) == 0)
2290 2290 continue;
2291 2291
2292 2292 key = PyList_GET_ITEM(revs, i);
2293 2293 Py_INCREF(key);
2294 2294 Py_INCREF(Py_None);
2295 2295 if (PyDict_SetItem(dict, key, Py_None) == -1) {
2296 2296 Py_DECREF(key);
2297 2297 Py_DECREF(Py_None);
2298 2298 goto bail;
2299 2299 }
2300 2300 }
2301 2301
2302 2302 keys = PyDict_Keys(dict);
2303 2303
2304 2304 bail:
2305 2305 free(depth);
2306 2306 free(seen);
2307 2307 free(interesting);
2308 2308 Py_XDECREF(dict);
2309 2309
2310 2310 return keys;
2311 2311 }
2312 2312
2313 2313 /*
2314 2314 * Given a (possibly overlapping) set of revs, return all the
2315 2315 * common ancestors heads: heads(::args[0] and ::a[1] and ...)
2316 2316 */
2317 2317 static PyObject *index_commonancestorsheads(indexObject *self, PyObject *args)
2318 2318 {
2319 2319 PyObject *ret = NULL;
2320 2320 Py_ssize_t argcount, i, len;
2321 2321 bitmask repeat = 0;
2322 2322 int revcount = 0;
2323 2323 int *revs;
2324 2324
2325 2325 argcount = PySequence_Length(args);
2326 2326 revs = PyMem_Malloc(argcount * sizeof(*revs));
2327 2327 if (argcount > 0 && revs == NULL)
2328 2328 return PyErr_NoMemory();
2329 2329 len = index_length(self);
2330 2330
2331 2331 for (i = 0; i < argcount; i++) {
2332 2332 static const int capacity = 24;
2333 2333 PyObject *obj = PySequence_GetItem(args, i);
2334 2334 bitmask x;
2335 2335 long val;
2336 2336
2337 2337 if (!PyInt_Check(obj)) {
2338 2338 PyErr_SetString(PyExc_TypeError,
2339 2339 "arguments must all be ints");
2340 2340 Py_DECREF(obj);
2341 2341 goto bail;
2342 2342 }
2343 2343 val = PyInt_AsLong(obj);
2344 2344 Py_DECREF(obj);
2345 2345 if (val == -1) {
2346 2346 ret = PyList_New(0);
2347 2347 goto done;
2348 2348 }
2349 2349 if (val < 0 || val >= len) {
2350 2350 PyErr_SetString(PyExc_IndexError, "index out of range");
2351 2351 goto bail;
2352 2352 }
2353 2353 /* this cheesy bloom filter lets us avoid some more
2354 2354 * expensive duplicate checks in the common set-is-disjoint
2355 2355 * case */
2356 2356 x = 1ull << (val & 0x3f);
2357 2357 if (repeat & x) {
2358 2358 int k;
2359 2359 for (k = 0; k < revcount; k++) {
2360 2360 if (val == revs[k])
2361 2361 goto duplicate;
2362 2362 }
2363 2363 } else
2364 2364 repeat |= x;
2365 2365 if (revcount >= capacity) {
2366 2366 PyErr_Format(PyExc_OverflowError,
2367 2367 "bitset size (%d) > capacity (%d)",
2368 2368 revcount, capacity);
2369 2369 goto bail;
2370 2370 }
2371 2371 revs[revcount++] = (int)val;
2372 2372 duplicate:;
2373 2373 }
2374 2374
2375 2375 if (revcount == 0) {
2376 2376 ret = PyList_New(0);
2377 2377 goto done;
2378 2378 }
2379 2379 if (revcount == 1) {
2380 2380 PyObject *obj;
2381 2381 ret = PyList_New(1);
2382 2382 if (ret == NULL)
2383 2383 goto bail;
2384 2384 obj = PyInt_FromLong(revs[0]);
2385 2385 if (obj == NULL)
2386 2386 goto bail;
2387 2387 PyList_SET_ITEM(ret, 0, obj);
2388 2388 goto done;
2389 2389 }
2390 2390
2391 2391 ret = find_gca_candidates(self, revs, revcount);
2392 2392 if (ret == NULL)
2393 2393 goto bail;
2394 2394
2395 2395 done:
2396 2396 PyMem_Free(revs);
2397 2397 return ret;
2398 2398
2399 2399 bail:
2400 2400 PyMem_Free(revs);
2401 2401 Py_XDECREF(ret);
2402 2402 return NULL;
2403 2403 }
2404 2404
2405 2405 /*
2406 2406 * Given a (possibly overlapping) set of revs, return the greatest
2407 2407 * common ancestors: those with the longest path to the root.
2408 2408 */
2409 2409 static PyObject *index_ancestors(indexObject *self, PyObject *args)
2410 2410 {
2411 2411 PyObject *ret;
2412 2412 PyObject *gca = index_commonancestorsheads(self, args);
2413 2413 if (gca == NULL)
2414 2414 return NULL;
2415 2415
2416 2416 if (PyList_GET_SIZE(gca) <= 1) {
2417 2417 return gca;
2418 2418 }
2419 2419
2420 2420 ret = find_deepest(self, gca);
2421 2421 Py_DECREF(gca);
2422 2422 return ret;
2423 2423 }
2424 2424
2425 2425 /*
2426 2426 * Invalidate any trie entries introduced by added revs.
2427 2427 */
2428 2428 static void index_invalidate_added(indexObject *self, Py_ssize_t start)
2429 2429 {
2430 2430 Py_ssize_t i, len;
2431 2431
2432 2432 len = self->length + self->new_length;
2433 2433 i = start - self->length;
2434 2434 if (i < 0)
2435 2435 return;
2436 2436
2437 2437 for (i = start; i < len; i++)
2438 2438 nt_delete_node(&self->nt, index_deref(self, i) + 32);
2439 2439
2440 2440 self->new_length = start - self->length;
2441 2441 }
2442 2442
2443 2443 /*
2444 2444 * Delete a numeric range of revs, which must be at the end of the
2445 2445 * range.
2446 2446 */
2447 2447 static int index_slice_del(indexObject *self, PyObject *item)
2448 2448 {
2449 2449 Py_ssize_t start, stop, step, slicelength;
2450 2450 Py_ssize_t length = index_length(self) + 1;
2451 2451 int ret = 0;
2452 2452
2453 2453 /* Argument changed from PySliceObject* to PyObject* in Python 3. */
2454 2454 #ifdef IS_PY3K
2455 2455 if (PySlice_GetIndicesEx(item, length, &start, &stop, &step,
2456 2456 &slicelength) < 0)
2457 2457 #else
2458 2458 if (PySlice_GetIndicesEx((PySliceObject *)item, length, &start, &stop,
2459 2459 &step, &slicelength) < 0)
2460 2460 #endif
2461 2461 return -1;
2462 2462
2463 2463 if (slicelength <= 0)
2464 2464 return 0;
2465 2465
2466 2466 if ((step < 0 && start < stop) || (step > 0 && start > stop))
2467 2467 stop = start;
2468 2468
2469 2469 if (step < 0) {
2470 2470 stop = start + 1;
2471 2471 start = stop + step * (slicelength - 1) - 1;
2472 2472 step = -step;
2473 2473 }
2474 2474
2475 2475 if (step != 1) {
2476 2476 PyErr_SetString(PyExc_ValueError,
2477 2477 "revlog index delete requires step size of 1");
2478 2478 return -1;
2479 2479 }
2480 2480
2481 2481 if (stop != length - 1) {
2482 2482 PyErr_SetString(PyExc_IndexError,
2483 2483 "revlog index deletion indices are invalid");
2484 2484 return -1;
2485 2485 }
2486 2486
2487 2487 if (start < self->length) {
2488 2488 if (self->ntinitialized) {
2489 2489 Py_ssize_t i;
2490 2490
2491 2491 for (i = start; i < self->length; i++) {
2492 2492 const char *node = index_node_existing(self, i);
2493 2493 if (node == NULL)
2494 2494 return -1;
2495 2495
2496 2496 nt_delete_node(&self->nt, node);
2497 2497 }
2498 2498 if (self->new_length)
2499 2499 index_invalidate_added(self, self->length);
2500 2500 if (self->ntrev > start)
2501 2501 self->ntrev = (int)start;
2502 2502 } else if (self->new_length) {
2503 2503 self->new_length = 0;
2504 2504 }
2505 2505
2506 2506 self->length = start;
2507 2507 goto done;
2508 2508 }
2509 2509
2510 2510 if (self->ntinitialized) {
2511 2511 index_invalidate_added(self, start);
2512 2512 if (self->ntrev > start)
2513 2513 self->ntrev = (int)start;
2514 2514 } else {
2515 2515 self->new_length = start - self->length;
2516 2516 }
2517 2517 done:
2518 2518 Py_CLEAR(self->headrevs);
2519 2519 return ret;
2520 2520 }
2521 2521
2522 2522 /*
2523 2523 * Supported ops:
2524 2524 *
2525 2525 * slice deletion
2526 2526 * string assignment (extend node->rev mapping)
2527 2527 * string deletion (shrink node->rev mapping)
2528 2528 */
2529 2529 static int index_assign_subscript(indexObject *self, PyObject *item,
2530 2530 PyObject *value)
2531 2531 {
2532 2532 char *node;
2533 2533 long rev;
2534 2534
2535 2535 if (PySlice_Check(item) && value == NULL)
2536 2536 return index_slice_del(self, item);
2537 2537
2538 2538 if (node_check(self->nodelen, item, &node) == -1)
2539 2539 return -1;
2540 2540
2541 2541 if (value == NULL)
2542 2542 return self->ntinitialized ? nt_delete_node(&self->nt, node)
2543 2543 : 0;
2544 2544 rev = PyInt_AsLong(value);
2545 2545 if (rev > INT_MAX || rev < 0) {
2546 2546 if (!PyErr_Occurred())
2547 2547 PyErr_SetString(PyExc_ValueError, "rev out of range");
2548 2548 return -1;
2549 2549 }
2550 2550
2551 2551 if (index_init_nt(self) == -1)
2552 2552 return -1;
2553 2553 return nt_insert(&self->nt, node, (int)rev);
2554 2554 }
2555 2555
2556 2556 /*
2557 2557 * Find all RevlogNG entries in an index that has inline data. Update
2558 2558 * the optional "offsets" table with those entries.
2559 2559 */
2560 2560 static Py_ssize_t inline_scan(indexObject *self, const char **offsets)
2561 2561 {
2562 2562 const char *data = (const char *)self->buf.buf;
2563 2563 Py_ssize_t pos = 0;
2564 2564 Py_ssize_t end = self->buf.len;
2565 2565 long incr = v1_hdrsize;
2566 2566 Py_ssize_t len = 0;
2567 2567
2568 2568 while (pos + v1_hdrsize <= end && pos >= 0) {
2569 2569 uint32_t comp_len;
2570 2570 /* 3rd element of header is length of compressed inline data */
2571 2571 comp_len = getbe32(data + pos + 8);
2572 2572 incr = v1_hdrsize + comp_len;
2573 2573 if (offsets)
2574 2574 offsets[len] = data + pos;
2575 2575 len++;
2576 2576 pos += incr;
2577 2577 }
2578 2578
2579 2579 if (pos != end) {
2580 2580 if (!PyErr_Occurred())
2581 2581 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2582 2582 return -1;
2583 2583 }
2584 2584
2585 2585 return len;
2586 2586 }
2587 2587
2588 2588 static int index_init(indexObject *self, PyObject *args)
2589 2589 {
2590 2590 PyObject *data_obj, *inlined_obj;
2591 2591 Py_ssize_t size;
2592 2592
2593 2593 /* Initialize before argument-checking to avoid index_dealloc() crash.
2594 2594 */
2595 2595 self->added = NULL;
2596 2596 self->new_length = 0;
2597 2597 self->added_length = 0;
2598 2598 self->data = NULL;
2599 2599 memset(&self->buf, 0, sizeof(self->buf));
2600 2600 self->headrevs = NULL;
2601 2601 self->filteredrevs = Py_None;
2602 2602 Py_INCREF(Py_None);
2603 2603 self->ntinitialized = 0;
2604 2604 self->offsets = NULL;
2605 2605 self->nodelen = 20;
2606 2606 self->nullentry = NULL;
2607 2607
2608 2608 if (!PyArg_ParseTuple(args, "OO", &data_obj, &inlined_obj))
2609 2609 return -1;
2610 2610 if (!PyObject_CheckBuffer(data_obj)) {
2611 2611 PyErr_SetString(PyExc_TypeError,
2612 2612 "data does not support buffer interface");
2613 2613 return -1;
2614 2614 }
2615 if (self->nodelen < 20 || self->nodelen > sizeof(nullid)) {
2615 if (self->nodelen < 20 || self->nodelen > (Py_ssize_t)sizeof(nullid)) {
2616 2616 PyErr_SetString(PyExc_RuntimeError, "unsupported node size");
2617 2617 return -1;
2618 2618 }
2619 2619
2620 2620 self->nullentry = Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0, 0,
2621 2621 -1, -1, -1, -1, nullid, self->nodelen);
2622 2622 if (!self->nullentry)
2623 2623 return -1;
2624 2624 PyObject_GC_UnTrack(self->nullentry);
2625 2625
2626 2626 if (PyObject_GetBuffer(data_obj, &self->buf, PyBUF_SIMPLE) == -1)
2627 2627 return -1;
2628 2628 size = self->buf.len;
2629 2629
2630 2630 self->inlined = inlined_obj && PyObject_IsTrue(inlined_obj);
2631 2631 self->data = data_obj;
2632 2632
2633 2633 self->ntlookups = self->ntmisses = 0;
2634 2634 self->ntrev = -1;
2635 2635 Py_INCREF(self->data);
2636 2636
2637 2637 if (self->inlined) {
2638 2638 Py_ssize_t len = inline_scan(self, NULL);
2639 2639 if (len == -1)
2640 2640 goto bail;
2641 2641 self->length = len;
2642 2642 } else {
2643 2643 if (size % v1_hdrsize) {
2644 2644 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2645 2645 goto bail;
2646 2646 }
2647 2647 self->length = size / v1_hdrsize;
2648 2648 }
2649 2649
2650 2650 return 0;
2651 2651 bail:
2652 2652 return -1;
2653 2653 }
2654 2654
2655 2655 static PyObject *index_nodemap(indexObject *self)
2656 2656 {
2657 2657 Py_INCREF(self);
2658 2658 return (PyObject *)self;
2659 2659 }
2660 2660
2661 2661 static void _index_clearcaches(indexObject *self)
2662 2662 {
2663 2663 if (self->offsets) {
2664 2664 PyMem_Free((void *)self->offsets);
2665 2665 self->offsets = NULL;
2666 2666 }
2667 2667 if (self->ntinitialized) {
2668 2668 nt_dealloc(&self->nt);
2669 2669 }
2670 2670 self->ntinitialized = 0;
2671 2671 Py_CLEAR(self->headrevs);
2672 2672 }
2673 2673
2674 2674 static PyObject *index_clearcaches(indexObject *self)
2675 2675 {
2676 2676 _index_clearcaches(self);
2677 2677 self->ntrev = -1;
2678 2678 self->ntlookups = self->ntmisses = 0;
2679 2679 Py_RETURN_NONE;
2680 2680 }
2681 2681
2682 2682 static void index_dealloc(indexObject *self)
2683 2683 {
2684 2684 _index_clearcaches(self);
2685 2685 Py_XDECREF(self->filteredrevs);
2686 2686 if (self->buf.buf) {
2687 2687 PyBuffer_Release(&self->buf);
2688 2688 memset(&self->buf, 0, sizeof(self->buf));
2689 2689 }
2690 2690 Py_XDECREF(self->data);
2691 2691 PyMem_Free(self->added);
2692 2692 Py_XDECREF(self->nullentry);
2693 2693 PyObject_Del(self);
2694 2694 }
2695 2695
2696 2696 static PySequenceMethods index_sequence_methods = {
2697 2697 (lenfunc)index_length, /* sq_length */
2698 2698 0, /* sq_concat */
2699 2699 0, /* sq_repeat */
2700 2700 (ssizeargfunc)index_get, /* sq_item */
2701 2701 0, /* sq_slice */
2702 2702 0, /* sq_ass_item */
2703 2703 0, /* sq_ass_slice */
2704 2704 (objobjproc)index_contains, /* sq_contains */
2705 2705 };
2706 2706
2707 2707 static PyMappingMethods index_mapping_methods = {
2708 2708 (lenfunc)index_length, /* mp_length */
2709 2709 (binaryfunc)index_getitem, /* mp_subscript */
2710 2710 (objobjargproc)index_assign_subscript, /* mp_ass_subscript */
2711 2711 };
2712 2712
2713 2713 static PyMethodDef index_methods[] = {
2714 2714 {"ancestors", (PyCFunction)index_ancestors, METH_VARARGS,
2715 2715 "return the gca set of the given revs"},
2716 2716 {"commonancestorsheads", (PyCFunction)index_commonancestorsheads,
2717 2717 METH_VARARGS,
2718 2718 "return the heads of the common ancestors of the given revs"},
2719 2719 {"clearcaches", (PyCFunction)index_clearcaches, METH_NOARGS,
2720 2720 "clear the index caches"},
2721 2721 {"get", (PyCFunction)index_m_get, METH_VARARGS, "get an index entry"},
2722 2722 {"get_rev", (PyCFunction)index_m_get, METH_VARARGS,
2723 2723 "return `rev` associated with a node or None"},
2724 2724 {"has_node", (PyCFunction)index_m_has_node, METH_O,
2725 2725 "return True if the node exist in the index"},
2726 2726 {"rev", (PyCFunction)index_m_rev, METH_O,
2727 2727 "return `rev` associated with a node or raise RevlogError"},
2728 2728 {"computephasesmapsets", (PyCFunction)compute_phases_map_sets, METH_VARARGS,
2729 2729 "compute phases"},
2730 2730 {"reachableroots2", (PyCFunction)reachableroots2, METH_VARARGS,
2731 2731 "reachableroots"},
2732 2732 {"headrevs", (PyCFunction)index_headrevs, METH_VARARGS,
2733 2733 "get head revisions"}, /* Can do filtering since 3.2 */
2734 2734 {"headrevsfiltered", (PyCFunction)index_headrevs, METH_VARARGS,
2735 2735 "get filtered head revisions"}, /* Can always do filtering */
2736 2736 {"issnapshot", (PyCFunction)index_issnapshot, METH_O,
2737 2737 "True if the object is a snapshot"},
2738 2738 {"findsnapshots", (PyCFunction)index_findsnapshots, METH_VARARGS,
2739 2739 "Gather snapshot data in a cache dict"},
2740 2740 {"deltachain", (PyCFunction)index_deltachain, METH_VARARGS,
2741 2741 "determine revisions with deltas to reconstruct fulltext"},
2742 2742 {"slicechunktodensity", (PyCFunction)index_slicechunktodensity,
2743 2743 METH_VARARGS, "determine revisions with deltas to reconstruct fulltext"},
2744 2744 {"append", (PyCFunction)index_append, METH_O, "append an index entry"},
2745 2745 {"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS,
2746 2746 "match a potentially ambiguous node ID"},
2747 2747 {"shortest", (PyCFunction)index_shortest, METH_VARARGS,
2748 2748 "find length of shortest hex nodeid of a binary ID"},
2749 2749 {"stats", (PyCFunction)index_stats, METH_NOARGS, "stats for the index"},
2750 2750 {NULL} /* Sentinel */
2751 2751 };
2752 2752
2753 2753 static PyGetSetDef index_getset[] = {
2754 2754 {"nodemap", (getter)index_nodemap, NULL, "nodemap", NULL},
2755 2755 {NULL} /* Sentinel */
2756 2756 };
2757 2757
2758 2758 PyTypeObject HgRevlogIndex_Type = {
2759 2759 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2760 2760 "parsers.index", /* tp_name */
2761 2761 sizeof(indexObject), /* tp_basicsize */
2762 2762 0, /* tp_itemsize */
2763 2763 (destructor)index_dealloc, /* tp_dealloc */
2764 2764 0, /* tp_print */
2765 2765 0, /* tp_getattr */
2766 2766 0, /* tp_setattr */
2767 2767 0, /* tp_compare */
2768 2768 0, /* tp_repr */
2769 2769 0, /* tp_as_number */
2770 2770 &index_sequence_methods, /* tp_as_sequence */
2771 2771 &index_mapping_methods, /* tp_as_mapping */
2772 2772 0, /* tp_hash */
2773 2773 0, /* tp_call */
2774 2774 0, /* tp_str */
2775 2775 0, /* tp_getattro */
2776 2776 0, /* tp_setattro */
2777 2777 0, /* tp_as_buffer */
2778 2778 Py_TPFLAGS_DEFAULT, /* tp_flags */
2779 2779 "revlog index", /* tp_doc */
2780 2780 0, /* tp_traverse */
2781 2781 0, /* tp_clear */
2782 2782 0, /* tp_richcompare */
2783 2783 0, /* tp_weaklistoffset */
2784 2784 0, /* tp_iter */
2785 2785 0, /* tp_iternext */
2786 2786 index_methods, /* tp_methods */
2787 2787 0, /* tp_members */
2788 2788 index_getset, /* tp_getset */
2789 2789 0, /* tp_base */
2790 2790 0, /* tp_dict */
2791 2791 0, /* tp_descr_get */
2792 2792 0, /* tp_descr_set */
2793 2793 0, /* tp_dictoffset */
2794 2794 (initproc)index_init, /* tp_init */
2795 2795 0, /* tp_alloc */
2796 2796 };
2797 2797
2798 2798 /*
2799 2799 * returns a tuple of the form (index, index, cache) with elements as
2800 2800 * follows:
2801 2801 *
2802 2802 * index: an index object that lazily parses RevlogNG records
2803 2803 * cache: if data is inlined, a tuple (0, index_file_content), else None
2804 2804 * index_file_content could be a string, or a buffer
2805 2805 *
2806 2806 * added complications are for backwards compatibility
2807 2807 */
2808 2808 PyObject *parse_index2(PyObject *self, PyObject *args)
2809 2809 {
2810 2810 PyObject *cache = NULL;
2811 2811 indexObject *idx;
2812 2812 int ret;
2813 2813
2814 2814 idx = PyObject_New(indexObject, &HgRevlogIndex_Type);
2815 2815 if (idx == NULL)
2816 2816 goto bail;
2817 2817
2818 2818 ret = index_init(idx, args);
2819 2819 if (ret == -1)
2820 2820 goto bail;
2821 2821
2822 2822 if (idx->inlined) {
2823 2823 cache = Py_BuildValue("iO", 0, idx->data);
2824 2824 if (cache == NULL)
2825 2825 goto bail;
2826 2826 } else {
2827 2827 cache = Py_None;
2828 2828 Py_INCREF(cache);
2829 2829 }
2830 2830
2831 2831 return Py_BuildValue("NN", idx, cache);
2832 2832
2833 2833 bail:
2834 2834 Py_XDECREF(idx);
2835 2835 Py_XDECREF(cache);
2836 2836 return NULL;
2837 2837 }
2838 2838
2839 2839 static Revlog_CAPI CAPI = {
2840 2840 /* increment the abi_version field upon each change in the Revlog_CAPI
2841 2841 struct or in the ABI of the listed functions */
2842 2842 2,
2843 2843 index_length,
2844 2844 index_node,
2845 2845 HgRevlogIndex_GetParents,
2846 2846 };
2847 2847
2848 2848 void revlog_module_init(PyObject *mod)
2849 2849 {
2850 2850 PyObject *caps = NULL;
2851 2851 HgRevlogIndex_Type.tp_new = PyType_GenericNew;
2852 2852 if (PyType_Ready(&HgRevlogIndex_Type) < 0)
2853 2853 return;
2854 2854 Py_INCREF(&HgRevlogIndex_Type);
2855 2855 PyModule_AddObject(mod, "index", (PyObject *)&HgRevlogIndex_Type);
2856 2856
2857 2857 nodetreeType.tp_new = PyType_GenericNew;
2858 2858 if (PyType_Ready(&nodetreeType) < 0)
2859 2859 return;
2860 2860 Py_INCREF(&nodetreeType);
2861 2861 PyModule_AddObject(mod, "nodetree", (PyObject *)&nodetreeType);
2862 2862
2863 2863 caps = PyCapsule_New(&CAPI, "mercurial.cext.parsers.revlog_CAPI", NULL);
2864 2864 if (caps != NULL)
2865 2865 PyModule_AddObject(mod, "revlog_CAPI", caps);
2866 2866 }
General Comments 0
You need to be logged in to leave comments. Login now