##// END OF EJS Templates
revlog: fix out-of-bounds access by negative parents read from revlog (SEC)...
Yuya Nishihara -
r40848:9cdd525d stable
parent child Browse files
Show More
@@ -1,2488 +1,2494 b''
1 1 /*
2 2 parsers.c - efficient content parsing
3 3
4 4 Copyright 2008 Matt Mackall <mpm@selenic.com> and others
5 5
6 6 This software may be used and distributed according to the terms of
7 7 the GNU General Public License, incorporated herein by reference.
8 8 */
9 9
10 10 #include <Python.h>
11 11 #include <assert.h>
12 12 #include <ctype.h>
13 13 #include <stddef.h>
14 14 #include <string.h>
15 15
16 16 #include "bitmanipulation.h"
17 17 #include "charencode.h"
18 18 #include "util.h"
19 19
20 20 #ifdef IS_PY3K
21 21 /* The mapping of Python types is meant to be temporary to get Python
22 22 * 3 to compile. We should remove this once Python 3 support is fully
23 23 * supported and proper types are used in the extensions themselves. */
24 24 #define PyInt_Check PyLong_Check
25 25 #define PyInt_FromLong PyLong_FromLong
26 26 #define PyInt_FromSsize_t PyLong_FromSsize_t
27 27 #define PyInt_AS_LONG PyLong_AS_LONG
28 28 #define PyInt_AsLong PyLong_AsLong
29 29 #endif
30 30
31 31 typedef struct indexObjectStruct indexObject;
32 32
33 33 typedef struct {
34 34 int children[16];
35 35 } nodetreenode;
36 36
37 37 /*
38 38 * A base-16 trie for fast node->rev mapping.
39 39 *
40 40 * Positive value is index of the next node in the trie
41 41 * Negative value is a leaf: -(rev + 2)
42 42 * Zero is empty
43 43 */
44 44 typedef struct {
45 45 indexObject *index;
46 46 nodetreenode *nodes;
47 47 unsigned length; /* # nodes in use */
48 48 unsigned capacity; /* # nodes allocated */
49 49 int depth; /* maximum depth of tree */
50 50 int splits; /* # splits performed */
51 51 } nodetree;
52 52
53 53 typedef struct {
54 54 PyObject_HEAD
55 55 nodetree nt;
56 56 } nodetreeObject;
57 57
58 58 /*
59 59 * This class has two behaviors.
60 60 *
61 61 * When used in a list-like way (with integer keys), we decode an
62 62 * entry in a RevlogNG index file on demand. Our last entry is a
63 63 * sentinel, always a nullid. We have limited support for
64 64 * integer-keyed insert and delete, only at elements right before the
65 65 * sentinel.
66 66 *
67 67 * With string keys, we lazily perform a reverse mapping from node to
68 68 * rev, using a base-16 trie.
69 69 */
70 70 struct indexObjectStruct {
71 71 PyObject_HEAD
72 72 /* Type-specific fields go here. */
73 73 PyObject *data; /* raw bytes of index */
74 74 Py_buffer buf; /* buffer of data */
75 75 PyObject **cache; /* cached tuples */
76 76 const char **offsets; /* populated on demand */
77 77 Py_ssize_t raw_length; /* original number of elements */
78 78 Py_ssize_t length; /* current number of elements */
79 79 PyObject *added; /* populated on demand */
80 80 PyObject *headrevs; /* cache, invalidated on changes */
81 81 PyObject *filteredrevs;/* filtered revs set */
82 82 nodetree nt; /* base-16 trie */
83 83 int ntinitialized; /* 0 or 1 */
84 84 int ntrev; /* last rev scanned */
85 85 int ntlookups; /* # lookups */
86 86 int ntmisses; /* # lookups that miss the cache */
87 87 int inlined;
88 88 };
89 89
90 90 static Py_ssize_t index_length(const indexObject *self)
91 91 {
92 92 if (self->added == NULL)
93 93 return self->length;
94 94 return self->length + PyList_GET_SIZE(self->added);
95 95 }
96 96
97 97 static PyObject *nullentry = NULL;
98 98 static const char nullid[20] = {0};
99 99
100 100 static Py_ssize_t inline_scan(indexObject *self, const char **offsets);
101 101
102 102 #if LONG_MAX == 0x7fffffffL
103 103 static const char *const tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
104 104 #else
105 105 static const char *const tuple_format = PY23("kiiiiiis#", "kiiiiiiy#");
106 106 #endif
107 107
108 108 /* A RevlogNG v1 index entry is 64 bytes long. */
109 109 static const long v1_hdrsize = 64;
110 110
111 111 static void raise_revlog_error(void)
112 112 {
113 113 PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
114 114
115 115 mod = PyImport_ImportModule("mercurial.error");
116 116 if (mod == NULL) {
117 117 goto cleanup;
118 118 }
119 119
120 120 dict = PyModule_GetDict(mod);
121 121 if (dict == NULL) {
122 122 goto cleanup;
123 123 }
124 124 Py_INCREF(dict);
125 125
126 126 errclass = PyDict_GetItemString(dict, "RevlogError");
127 127 if (errclass == NULL) {
128 128 PyErr_SetString(PyExc_SystemError,
129 129 "could not find RevlogError");
130 130 goto cleanup;
131 131 }
132 132
133 133 /* value of exception is ignored by callers */
134 134 PyErr_SetString(errclass, "RevlogError");
135 135
136 136 cleanup:
137 137 Py_XDECREF(dict);
138 138 Py_XDECREF(mod);
139 139 }
140 140
141 141 /*
142 142 * Return a pointer to the beginning of a RevlogNG record.
143 143 */
144 144 static const char *index_deref(indexObject *self, Py_ssize_t pos)
145 145 {
146 146 if (self->inlined && pos > 0) {
147 147 if (self->offsets == NULL) {
148 148 self->offsets = PyMem_Malloc(self->raw_length *
149 149 sizeof(*self->offsets));
150 150 if (self->offsets == NULL)
151 151 return (const char *)PyErr_NoMemory();
152 152 inline_scan(self, self->offsets);
153 153 }
154 154 return self->offsets[pos];
155 155 }
156 156
157 157 return (const char *)(self->buf.buf) + pos * v1_hdrsize;
158 158 }
159 159
160 /*
161 * Get parents of the given rev.
162 *
163 * The specified rev must be valid and must not be nullrev. A returned
164 * parent revision may be nullrev, but is guaranteed to be in valid range.
165 */
160 166 static inline int index_get_parents(indexObject *self, Py_ssize_t rev,
161 167 int *ps, int maxrev)
162 168 {
163 169 if (rev >= self->length) {
164 170 PyObject *tuple = PyList_GET_ITEM(self->added, rev - self->length);
165 171 ps[0] = (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 5));
166 172 ps[1] = (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 6));
167 173 } else {
168 174 const char *data = index_deref(self, rev);
169 175 ps[0] = getbe32(data + 24);
170 176 ps[1] = getbe32(data + 28);
171 177 }
172 178 /* If index file is corrupted, ps[] may point to invalid revisions. So
173 179 * there is a risk of buffer overflow to trust them unconditionally. */
174 if (ps[0] > maxrev || ps[1] > maxrev) {
180 if (ps[0] < -1 || ps[0] > maxrev || ps[1] < -1 || ps[1] > maxrev) {
175 181 PyErr_SetString(PyExc_ValueError, "parent out of range");
176 182 return -1;
177 183 }
178 184 return 0;
179 185 }
180 186
181 187
182 188 /*
183 189 * RevlogNG format (all in big endian, data may be inlined):
184 190 * 6 bytes: offset
185 191 * 2 bytes: flags
186 192 * 4 bytes: compressed length
187 193 * 4 bytes: uncompressed length
188 194 * 4 bytes: base revision
189 195 * 4 bytes: link revision
190 196 * 4 bytes: parent 1 revision
191 197 * 4 bytes: parent 2 revision
192 198 * 32 bytes: nodeid (only 20 bytes used)
193 199 */
194 200 static PyObject *index_get(indexObject *self, Py_ssize_t pos)
195 201 {
196 202 uint64_t offset_flags;
197 203 int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
198 204 const char *c_node_id;
199 205 const char *data;
200 206 Py_ssize_t length = index_length(self);
201 207 PyObject *entry;
202 208
203 209 if (pos == -1) {
204 210 Py_INCREF(nullentry);
205 211 return nullentry;
206 212 }
207 213
208 214 if (pos < 0 || pos >= length) {
209 215 PyErr_SetString(PyExc_IndexError, "revlog index out of range");
210 216 return NULL;
211 217 }
212 218
213 219 if (pos >= self->length) {
214 220 PyObject *obj;
215 221 obj = PyList_GET_ITEM(self->added, pos - self->length);
216 222 Py_INCREF(obj);
217 223 return obj;
218 224 }
219 225
220 226 if (self->cache) {
221 227 if (self->cache[pos]) {
222 228 Py_INCREF(self->cache[pos]);
223 229 return self->cache[pos];
224 230 }
225 231 } else {
226 232 self->cache = calloc(self->raw_length, sizeof(PyObject *));
227 233 if (self->cache == NULL)
228 234 return PyErr_NoMemory();
229 235 }
230 236
231 237 data = index_deref(self, pos);
232 238 if (data == NULL)
233 239 return NULL;
234 240
235 241 offset_flags = getbe32(data + 4);
236 242 if (pos == 0) /* mask out version number for the first entry */
237 243 offset_flags &= 0xFFFF;
238 244 else {
239 245 uint32_t offset_high = getbe32(data);
240 246 offset_flags |= ((uint64_t)offset_high) << 32;
241 247 }
242 248
243 249 comp_len = getbe32(data + 8);
244 250 uncomp_len = getbe32(data + 12);
245 251 base_rev = getbe32(data + 16);
246 252 link_rev = getbe32(data + 20);
247 253 parent_1 = getbe32(data + 24);
248 254 parent_2 = getbe32(data + 28);
249 255 c_node_id = data + 32;
250 256
251 257 entry = Py_BuildValue(tuple_format, offset_flags, comp_len,
252 258 uncomp_len, base_rev, link_rev,
253 259 parent_1, parent_2, c_node_id, 20);
254 260
255 261 if (entry) {
256 262 PyObject_GC_UnTrack(entry);
257 263 Py_INCREF(entry);
258 264 }
259 265
260 266 self->cache[pos] = entry;
261 267
262 268 return entry;
263 269 }
264 270
265 271 /*
266 272 * Return the 20-byte SHA of the node corresponding to the given rev.
267 273 */
268 274 static const char *index_node(indexObject *self, Py_ssize_t pos)
269 275 {
270 276 Py_ssize_t length = index_length(self);
271 277 const char *data;
272 278
273 279 if (pos == -1)
274 280 return nullid;
275 281
276 282 if (pos >= length)
277 283 return NULL;
278 284
279 285 if (pos >= self->length) {
280 286 PyObject *tuple, *str;
281 287 tuple = PyList_GET_ITEM(self->added, pos - self->length);
282 288 str = PyTuple_GetItem(tuple, 7);
283 289 return str ? PyBytes_AS_STRING(str) : NULL;
284 290 }
285 291
286 292 data = index_deref(self, pos);
287 293 return data ? data + 32 : NULL;
288 294 }
289 295
290 296 /*
291 297 * Return the 20-byte SHA of the node corresponding to the given rev. The
292 298 * rev is assumed to be existing. If not, an exception is set.
293 299 */
294 300 static const char *index_node_existing(indexObject *self, Py_ssize_t pos)
295 301 {
296 302 const char *node = index_node(self, pos);
297 303 if (node == NULL) {
298 304 PyErr_Format(PyExc_IndexError, "could not access rev %d",
299 305 (int)pos);
300 306 }
301 307 return node;
302 308 }
303 309
304 310 static int nt_insert(nodetree *self, const char *node, int rev);
305 311
306 312 static int node_check(PyObject *obj, char **node)
307 313 {
308 314 Py_ssize_t nodelen;
309 315 if (PyBytes_AsStringAndSize(obj, node, &nodelen) == -1)
310 316 return -1;
311 317 if (nodelen == 20)
312 318 return 0;
313 319 PyErr_SetString(PyExc_ValueError, "20-byte hash required");
314 320 return -1;
315 321 }
316 322
317 323 static PyObject *index_append(indexObject *self, PyObject *obj)
318 324 {
319 325 char *node;
320 326 Py_ssize_t len;
321 327
322 328 if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj) != 8) {
323 329 PyErr_SetString(PyExc_TypeError, "8-tuple required");
324 330 return NULL;
325 331 }
326 332
327 333 if (node_check(PyTuple_GET_ITEM(obj, 7), &node) == -1)
328 334 return NULL;
329 335
330 336 len = index_length(self);
331 337
332 338 if (self->added == NULL) {
333 339 self->added = PyList_New(0);
334 340 if (self->added == NULL)
335 341 return NULL;
336 342 }
337 343
338 344 if (PyList_Append(self->added, obj) == -1)
339 345 return NULL;
340 346
341 347 if (self->ntinitialized)
342 348 nt_insert(&self->nt, node, (int)len);
343 349
344 350 Py_CLEAR(self->headrevs);
345 351 Py_RETURN_NONE;
346 352 }
347 353
348 354 static PyObject *index_stats(indexObject *self)
349 355 {
350 356 PyObject *obj = PyDict_New();
351 357 PyObject *t = NULL;
352 358
353 359 if (obj == NULL)
354 360 return NULL;
355 361
356 362 #define istat(__n, __d) \
357 363 do { \
358 364 t = PyInt_FromSsize_t(self->__n); \
359 365 if (!t) \
360 366 goto bail; \
361 367 if (PyDict_SetItemString(obj, __d, t) == -1) \
362 368 goto bail; \
363 369 Py_DECREF(t); \
364 370 } while (0)
365 371
366 372 if (self->added) {
367 373 Py_ssize_t len = PyList_GET_SIZE(self->added);
368 374 t = PyInt_FromSsize_t(len);
369 375 if (!t)
370 376 goto bail;
371 377 if (PyDict_SetItemString(obj, "index entries added", t) == -1)
372 378 goto bail;
373 379 Py_DECREF(t);
374 380 }
375 381
376 382 if (self->raw_length != self->length)
377 383 istat(raw_length, "revs on disk");
378 384 istat(length, "revs in memory");
379 385 istat(ntlookups, "node trie lookups");
380 386 istat(ntmisses, "node trie misses");
381 387 istat(ntrev, "node trie last rev scanned");
382 388 if (self->ntinitialized) {
383 389 istat(nt.capacity, "node trie capacity");
384 390 istat(nt.depth, "node trie depth");
385 391 istat(nt.length, "node trie count");
386 392 istat(nt.splits, "node trie splits");
387 393 }
388 394
389 395 #undef istat
390 396
391 397 return obj;
392 398
393 399 bail:
394 400 Py_XDECREF(obj);
395 401 Py_XDECREF(t);
396 402 return NULL;
397 403 }
398 404
399 405 /*
400 406 * When we cache a list, we want to be sure the caller can't mutate
401 407 * the cached copy.
402 408 */
403 409 static PyObject *list_copy(PyObject *list)
404 410 {
405 411 Py_ssize_t len = PyList_GET_SIZE(list);
406 412 PyObject *newlist = PyList_New(len);
407 413 Py_ssize_t i;
408 414
409 415 if (newlist == NULL)
410 416 return NULL;
411 417
412 418 for (i = 0; i < len; i++) {
413 419 PyObject *obj = PyList_GET_ITEM(list, i);
414 420 Py_INCREF(obj);
415 421 PyList_SET_ITEM(newlist, i, obj);
416 422 }
417 423
418 424 return newlist;
419 425 }
420 426
421 427 static int check_filter(PyObject *filter, Py_ssize_t arg)
422 428 {
423 429 if (filter) {
424 430 PyObject *arglist, *result;
425 431 int isfiltered;
426 432
427 433 arglist = Py_BuildValue("(n)", arg);
428 434 if (!arglist) {
429 435 return -1;
430 436 }
431 437
432 438 result = PyEval_CallObject(filter, arglist);
433 439 Py_DECREF(arglist);
434 440 if (!result) {
435 441 return -1;
436 442 }
437 443
438 444 /* PyObject_IsTrue returns 1 if true, 0 if false, -1 if error,
439 445 * same as this function, so we can just return it directly.*/
440 446 isfiltered = PyObject_IsTrue(result);
441 447 Py_DECREF(result);
442 448 return isfiltered;
443 449 } else {
444 450 return 0;
445 451 }
446 452 }
447 453
448 454 static Py_ssize_t add_roots_get_min(indexObject *self, PyObject *list,
449 455 Py_ssize_t marker, char *phases)
450 456 {
451 457 PyObject *iter = NULL;
452 458 PyObject *iter_item = NULL;
453 459 Py_ssize_t min_idx = index_length(self) + 2;
454 460 long iter_item_long;
455 461
456 462 if (PyList_GET_SIZE(list) != 0) {
457 463 iter = PyObject_GetIter(list);
458 464 if (iter == NULL)
459 465 return -2;
460 466 while ((iter_item = PyIter_Next(iter))) {
461 467 iter_item_long = PyInt_AS_LONG(iter_item);
462 468 Py_DECREF(iter_item);
463 469 if (iter_item_long < min_idx)
464 470 min_idx = iter_item_long;
465 471 phases[iter_item_long] = (char)marker;
466 472 }
467 473 Py_DECREF(iter);
468 474 }
469 475
470 476 return min_idx;
471 477 }
472 478
473 479 static inline void set_phase_from_parents(char *phases, int parent_1,
474 480 int parent_2, Py_ssize_t i)
475 481 {
476 482 if (parent_1 >= 0 && phases[parent_1] > phases[i])
477 483 phases[i] = phases[parent_1];
478 484 if (parent_2 >= 0 && phases[parent_2] > phases[i])
479 485 phases[i] = phases[parent_2];
480 486 }
481 487
482 488 static PyObject *reachableroots2(indexObject *self, PyObject *args)
483 489 {
484 490
485 491 /* Input */
486 492 long minroot;
487 493 PyObject *includepatharg = NULL;
488 494 int includepath = 0;
489 495 /* heads and roots are lists */
490 496 PyObject *heads = NULL;
491 497 PyObject *roots = NULL;
492 498 PyObject *reachable = NULL;
493 499
494 500 PyObject *val;
495 501 Py_ssize_t len = index_length(self);
496 502 long revnum;
497 503 Py_ssize_t k;
498 504 Py_ssize_t i;
499 505 Py_ssize_t l;
500 506 int r;
501 507 int parents[2];
502 508
503 509 /* Internal data structure:
504 510 * tovisit: array of length len+1 (all revs + nullrev), filled upto lentovisit
505 511 * revstates: array of length len+1 (all revs + nullrev) */
506 512 int *tovisit = NULL;
507 513 long lentovisit = 0;
508 514 enum { RS_SEEN = 1, RS_ROOT = 2, RS_REACHABLE = 4 };
509 515 char *revstates = NULL;
510 516
511 517 /* Get arguments */
512 518 if (!PyArg_ParseTuple(args, "lO!O!O!", &minroot, &PyList_Type, &heads,
513 519 &PyList_Type, &roots,
514 520 &PyBool_Type, &includepatharg))
515 521 goto bail;
516 522
517 523 if (includepatharg == Py_True)
518 524 includepath = 1;
519 525
520 526 /* Initialize return set */
521 527 reachable = PyList_New(0);
522 528 if (reachable == NULL)
523 529 goto bail;
524 530
525 531 /* Initialize internal datastructures */
526 532 tovisit = (int *)malloc((len + 1) * sizeof(int));
527 533 if (tovisit == NULL) {
528 534 PyErr_NoMemory();
529 535 goto bail;
530 536 }
531 537
532 538 revstates = (char *)calloc(len + 1, 1);
533 539 if (revstates == NULL) {
534 540 PyErr_NoMemory();
535 541 goto bail;
536 542 }
537 543
538 544 l = PyList_GET_SIZE(roots);
539 545 for (i = 0; i < l; i++) {
540 546 revnum = PyInt_AsLong(PyList_GET_ITEM(roots, i));
541 547 if (revnum == -1 && PyErr_Occurred())
542 548 goto bail;
543 549 /* If root is out of range, e.g. wdir(), it must be unreachable
544 550 * from heads. So we can just ignore it. */
545 551 if (revnum + 1 < 0 || revnum + 1 >= len + 1)
546 552 continue;
547 553 revstates[revnum + 1] |= RS_ROOT;
548 554 }
549 555
550 556 /* Populate tovisit with all the heads */
551 557 l = PyList_GET_SIZE(heads);
552 558 for (i = 0; i < l; i++) {
553 559 revnum = PyInt_AsLong(PyList_GET_ITEM(heads, i));
554 560 if (revnum == -1 && PyErr_Occurred())
555 561 goto bail;
556 562 if (revnum + 1 < 0 || revnum + 1 >= len + 1) {
557 563 PyErr_SetString(PyExc_IndexError, "head out of range");
558 564 goto bail;
559 565 }
560 566 if (!(revstates[revnum + 1] & RS_SEEN)) {
561 567 tovisit[lentovisit++] = (int)revnum;
562 568 revstates[revnum + 1] |= RS_SEEN;
563 569 }
564 570 }
565 571
566 572 /* Visit the tovisit list and find the reachable roots */
567 573 k = 0;
568 574 while (k < lentovisit) {
569 575 /* Add the node to reachable if it is a root*/
570 576 revnum = tovisit[k++];
571 577 if (revstates[revnum + 1] & RS_ROOT) {
572 578 revstates[revnum + 1] |= RS_REACHABLE;
573 579 val = PyInt_FromLong(revnum);
574 580 if (val == NULL)
575 581 goto bail;
576 582 r = PyList_Append(reachable, val);
577 583 Py_DECREF(val);
578 584 if (r < 0)
579 585 goto bail;
580 586 if (includepath == 0)
581 587 continue;
582 588 }
583 589
584 590 /* Add its parents to the list of nodes to visit */
585 591 if (revnum == -1)
586 592 continue;
587 593 r = index_get_parents(self, revnum, parents, (int)len - 1);
588 594 if (r < 0)
589 595 goto bail;
590 596 for (i = 0; i < 2; i++) {
591 597 if (!(revstates[parents[i] + 1] & RS_SEEN)
592 598 && parents[i] >= minroot) {
593 599 tovisit[lentovisit++] = parents[i];
594 600 revstates[parents[i] + 1] |= RS_SEEN;
595 601 }
596 602 }
597 603 }
598 604
599 605 /* Find all the nodes in between the roots we found and the heads
600 606 * and add them to the reachable set */
601 607 if (includepath == 1) {
602 608 long minidx = minroot;
603 609 if (minidx < 0)
604 610 minidx = 0;
605 611 for (i = minidx; i < len; i++) {
606 612 if (!(revstates[i + 1] & RS_SEEN))
607 613 continue;
608 614 r = index_get_parents(self, i, parents, (int)len - 1);
609 615 /* Corrupted index file, error is set from
610 616 * index_get_parents */
611 617 if (r < 0)
612 618 goto bail;
613 619 if (((revstates[parents[0] + 1] |
614 620 revstates[parents[1] + 1]) & RS_REACHABLE)
615 621 && !(revstates[i + 1] & RS_REACHABLE)) {
616 622 revstates[i + 1] |= RS_REACHABLE;
617 623 val = PyInt_FromSsize_t(i);
618 624 if (val == NULL)
619 625 goto bail;
620 626 r = PyList_Append(reachable, val);
621 627 Py_DECREF(val);
622 628 if (r < 0)
623 629 goto bail;
624 630 }
625 631 }
626 632 }
627 633
628 634 free(revstates);
629 635 free(tovisit);
630 636 return reachable;
631 637 bail:
632 638 Py_XDECREF(reachable);
633 639 free(revstates);
634 640 free(tovisit);
635 641 return NULL;
636 642 }
637 643
638 644 static PyObject *compute_phases_map_sets(indexObject *self, PyObject *args)
639 645 {
640 646 PyObject *roots = Py_None;
641 647 PyObject *ret = NULL;
642 648 PyObject *phasessize = NULL;
643 649 PyObject *phaseroots = NULL;
644 650 PyObject *phaseset = NULL;
645 651 PyObject *phasessetlist = NULL;
646 652 PyObject *rev = NULL;
647 653 Py_ssize_t len = index_length(self);
648 654 Py_ssize_t numphase = 0;
649 655 Py_ssize_t minrevallphases = 0;
650 656 Py_ssize_t minrevphase = 0;
651 657 Py_ssize_t i = 0;
652 658 char *phases = NULL;
653 659 long phase;
654 660
655 661 if (!PyArg_ParseTuple(args, "O", &roots))
656 662 goto done;
657 663 if (roots == NULL || !PyList_Check(roots)) {
658 664 PyErr_SetString(PyExc_TypeError, "roots must be a list");
659 665 goto done;
660 666 }
661 667
662 668 phases = calloc(len, 1); /* phase per rev: {0: public, 1: draft, 2: secret} */
663 669 if (phases == NULL) {
664 670 PyErr_NoMemory();
665 671 goto done;
666 672 }
667 673 /* Put the phase information of all the roots in phases */
668 674 numphase = PyList_GET_SIZE(roots)+1;
669 675 minrevallphases = len + 1;
670 676 phasessetlist = PyList_New(numphase);
671 677 if (phasessetlist == NULL)
672 678 goto done;
673 679
674 680 PyList_SET_ITEM(phasessetlist, 0, Py_None);
675 681 Py_INCREF(Py_None);
676 682
677 683 for (i = 0; i < numphase-1; i++) {
678 684 phaseroots = PyList_GET_ITEM(roots, i);
679 685 phaseset = PySet_New(NULL);
680 686 if (phaseset == NULL)
681 687 goto release;
682 688 PyList_SET_ITEM(phasessetlist, i+1, phaseset);
683 689 if (!PyList_Check(phaseroots)) {
684 690 PyErr_SetString(PyExc_TypeError,
685 691 "roots item must be a list");
686 692 goto release;
687 693 }
688 694 minrevphase = add_roots_get_min(self, phaseroots, i+1, phases);
689 695 if (minrevphase == -2) /* Error from add_roots_get_min */
690 696 goto release;
691 697 minrevallphases = MIN(minrevallphases, minrevphase);
692 698 }
693 699 /* Propagate the phase information from the roots to the revs */
694 700 if (minrevallphases != -1) {
695 701 int parents[2];
696 702 for (i = minrevallphases; i < len; i++) {
697 703 if (index_get_parents(self, i, parents,
698 704 (int)len - 1) < 0)
699 705 goto release;
700 706 set_phase_from_parents(phases, parents[0], parents[1], i);
701 707 }
702 708 }
703 709 /* Transform phase list to a python list */
704 710 phasessize = PyInt_FromSsize_t(len);
705 711 if (phasessize == NULL)
706 712 goto release;
707 713 for (i = 0; i < len; i++) {
708 714 phase = phases[i];
709 715 /* We only store the sets of phase for non public phase, the public phase
710 716 * is computed as a difference */
711 717 if (phase != 0) {
712 718 phaseset = PyList_GET_ITEM(phasessetlist, phase);
713 719 rev = PyInt_FromSsize_t(i);
714 720 if (rev == NULL)
715 721 goto release;
716 722 PySet_Add(phaseset, rev);
717 723 Py_XDECREF(rev);
718 724 }
719 725 }
720 726 ret = PyTuple_Pack(2, phasessize, phasessetlist);
721 727
722 728 release:
723 729 Py_XDECREF(phasessize);
724 730 Py_XDECREF(phasessetlist);
725 731 done:
726 732 free(phases);
727 733 return ret;
728 734 }
729 735
730 736 static PyObject *index_headrevs(indexObject *self, PyObject *args)
731 737 {
732 738 Py_ssize_t i, j, len;
733 739 char *nothead = NULL;
734 740 PyObject *heads = NULL;
735 741 PyObject *filter = NULL;
736 742 PyObject *filteredrevs = Py_None;
737 743
738 744 if (!PyArg_ParseTuple(args, "|O", &filteredrevs)) {
739 745 return NULL;
740 746 }
741 747
742 748 if (self->headrevs && filteredrevs == self->filteredrevs)
743 749 return list_copy(self->headrevs);
744 750
745 751 Py_DECREF(self->filteredrevs);
746 752 self->filteredrevs = filteredrevs;
747 753 Py_INCREF(filteredrevs);
748 754
749 755 if (filteredrevs != Py_None) {
750 756 filter = PyObject_GetAttrString(filteredrevs, "__contains__");
751 757 if (!filter) {
752 758 PyErr_SetString(PyExc_TypeError,
753 759 "filteredrevs has no attribute __contains__");
754 760 goto bail;
755 761 }
756 762 }
757 763
758 764 len = index_length(self);
759 765 heads = PyList_New(0);
760 766 if (heads == NULL)
761 767 goto bail;
762 768 if (len == 0) {
763 769 PyObject *nullid = PyInt_FromLong(-1);
764 770 if (nullid == NULL || PyList_Append(heads, nullid) == -1) {
765 771 Py_XDECREF(nullid);
766 772 goto bail;
767 773 }
768 774 goto done;
769 775 }
770 776
771 777 nothead = calloc(len, 1);
772 778 if (nothead == NULL) {
773 779 PyErr_NoMemory();
774 780 goto bail;
775 781 }
776 782
777 783 for (i = len - 1; i >= 0; i--) {
778 784 int isfiltered;
779 785 int parents[2];
780 786
781 787 /* If nothead[i] == 1, it means we've seen an unfiltered child of this
782 788 * node already, and therefore this node is not filtered. So we can skip
783 789 * the expensive check_filter step.
784 790 */
785 791 if (nothead[i] != 1) {
786 792 isfiltered = check_filter(filter, i);
787 793 if (isfiltered == -1) {
788 794 PyErr_SetString(PyExc_TypeError,
789 795 "unable to check filter");
790 796 goto bail;
791 797 }
792 798
793 799 if (isfiltered) {
794 800 nothead[i] = 1;
795 801 continue;
796 802 }
797 803 }
798 804
799 805 if (index_get_parents(self, i, parents, (int)len - 1) < 0)
800 806 goto bail;
801 807 for (j = 0; j < 2; j++) {
802 808 if (parents[j] >= 0)
803 809 nothead[parents[j]] = 1;
804 810 }
805 811 }
806 812
807 813 for (i = 0; i < len; i++) {
808 814 PyObject *head;
809 815
810 816 if (nothead[i])
811 817 continue;
812 818 head = PyInt_FromSsize_t(i);
813 819 if (head == NULL || PyList_Append(heads, head) == -1) {
814 820 Py_XDECREF(head);
815 821 goto bail;
816 822 }
817 823 }
818 824
819 825 done:
820 826 self->headrevs = heads;
821 827 Py_XDECREF(filter);
822 828 free(nothead);
823 829 return list_copy(self->headrevs);
824 830 bail:
825 831 Py_XDECREF(filter);
826 832 Py_XDECREF(heads);
827 833 free(nothead);
828 834 return NULL;
829 835 }
830 836
831 837 /**
832 838 * Obtain the base revision index entry.
833 839 *
834 840 * Callers must ensure that rev >= 0 or illegal memory access may occur.
835 841 */
836 842 static inline int index_baserev(indexObject *self, int rev)
837 843 {
838 844 const char *data;
839 845
840 846 if (rev >= self->length) {
841 847 PyObject *tuple = PyList_GET_ITEM(self->added, rev - self->length);
842 848 return (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 3));
843 849 }
844 850 else {
845 851 data = index_deref(self, rev);
846 852 if (data == NULL) {
847 853 return -2;
848 854 }
849 855
850 856 return getbe32(data + 16);
851 857 }
852 858 }
853 859
854 860 static PyObject *index_deltachain(indexObject *self, PyObject *args)
855 861 {
856 862 int rev, generaldelta;
857 863 PyObject *stoparg;
858 864 int stoprev, iterrev, baserev = -1;
859 865 int stopped;
860 866 PyObject *chain = NULL, *result = NULL;
861 867 const Py_ssize_t length = index_length(self);
862 868
863 869 if (!PyArg_ParseTuple(args, "iOi", &rev, &stoparg, &generaldelta)) {
864 870 return NULL;
865 871 }
866 872
867 873 if (PyInt_Check(stoparg)) {
868 874 stoprev = (int)PyInt_AsLong(stoparg);
869 875 if (stoprev == -1 && PyErr_Occurred()) {
870 876 return NULL;
871 877 }
872 878 }
873 879 else if (stoparg == Py_None) {
874 880 stoprev = -2;
875 881 }
876 882 else {
877 883 PyErr_SetString(PyExc_ValueError,
878 884 "stoprev must be integer or None");
879 885 return NULL;
880 886 }
881 887
882 888 if (rev < 0 || rev >= length) {
883 889 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
884 890 return NULL;
885 891 }
886 892
887 893 chain = PyList_New(0);
888 894 if (chain == NULL) {
889 895 return NULL;
890 896 }
891 897
892 898 baserev = index_baserev(self, rev);
893 899
894 900 /* This should never happen. */
895 901 if (baserev <= -2) {
896 902 /* Error should be set by index_deref() */
897 903 assert(PyErr_Occurred());
898 904 goto bail;
899 905 }
900 906
901 907 iterrev = rev;
902 908
903 909 while (iterrev != baserev && iterrev != stoprev) {
904 910 PyObject *value = PyInt_FromLong(iterrev);
905 911 if (value == NULL) {
906 912 goto bail;
907 913 }
908 914 if (PyList_Append(chain, value)) {
909 915 Py_DECREF(value);
910 916 goto bail;
911 917 }
912 918 Py_DECREF(value);
913 919
914 920 if (generaldelta) {
915 921 iterrev = baserev;
916 922 }
917 923 else {
918 924 iterrev--;
919 925 }
920 926
921 927 if (iterrev < 0) {
922 928 break;
923 929 }
924 930
925 931 if (iterrev >= length) {
926 932 PyErr_SetString(PyExc_IndexError, "revision outside index");
927 933 return NULL;
928 934 }
929 935
930 936 baserev = index_baserev(self, iterrev);
931 937
932 938 /* This should never happen. */
933 939 if (baserev <= -2) {
934 940 /* Error should be set by index_deref() */
935 941 assert(PyErr_Occurred());
936 942 goto bail;
937 943 }
938 944 }
939 945
940 946 if (iterrev == stoprev) {
941 947 stopped = 1;
942 948 }
943 949 else {
944 950 PyObject *value = PyInt_FromLong(iterrev);
945 951 if (value == NULL) {
946 952 goto bail;
947 953 }
948 954 if (PyList_Append(chain, value)) {
949 955 Py_DECREF(value);
950 956 goto bail;
951 957 }
952 958 Py_DECREF(value);
953 959
954 960 stopped = 0;
955 961 }
956 962
957 963 if (PyList_Reverse(chain)) {
958 964 goto bail;
959 965 }
960 966
961 967 result = Py_BuildValue("OO", chain, stopped ? Py_True : Py_False);
962 968 Py_DECREF(chain);
963 969 return result;
964 970
965 971 bail:
966 972 Py_DECREF(chain);
967 973 return NULL;
968 974 }
969 975
970 976 static inline int nt_level(const char *node, Py_ssize_t level)
971 977 {
972 978 int v = node[level>>1];
973 979 if (!(level & 1))
974 980 v >>= 4;
975 981 return v & 0xf;
976 982 }
977 983
978 984 /*
979 985 * Return values:
980 986 *
981 987 * -4: match is ambiguous (multiple candidates)
982 988 * -2: not found
983 989 * rest: valid rev
984 990 */
985 991 static int nt_find(nodetree *self, const char *node, Py_ssize_t nodelen,
986 992 int hex)
987 993 {
988 994 int (*getnybble)(const char *, Py_ssize_t) = hex ? hexdigit : nt_level;
989 995 int level, maxlevel, off;
990 996
991 997 if (nodelen == 20 && node[0] == '\0' && memcmp(node, nullid, 20) == 0)
992 998 return -1;
993 999
994 1000 if (hex)
995 1001 maxlevel = nodelen > 40 ? 40 : (int)nodelen;
996 1002 else
997 1003 maxlevel = nodelen > 20 ? 40 : ((int)nodelen * 2);
998 1004
999 1005 for (level = off = 0; level < maxlevel; level++) {
1000 1006 int k = getnybble(node, level);
1001 1007 nodetreenode *n = &self->nodes[off];
1002 1008 int v = n->children[k];
1003 1009
1004 1010 if (v < 0) {
1005 1011 const char *n;
1006 1012 Py_ssize_t i;
1007 1013
1008 1014 v = -(v + 2);
1009 1015 n = index_node(self->index, v);
1010 1016 if (n == NULL)
1011 1017 return -2;
1012 1018 for (i = level; i < maxlevel; i++)
1013 1019 if (getnybble(node, i) != nt_level(n, i))
1014 1020 return -2;
1015 1021 return v;
1016 1022 }
1017 1023 if (v == 0)
1018 1024 return -2;
1019 1025 off = v;
1020 1026 }
1021 1027 /* multiple matches against an ambiguous prefix */
1022 1028 return -4;
1023 1029 }
1024 1030
1025 1031 static int nt_new(nodetree *self)
1026 1032 {
1027 1033 if (self->length == self->capacity) {
1028 1034 unsigned newcapacity;
1029 1035 nodetreenode *newnodes;
1030 1036 newcapacity = self->capacity * 2;
1031 1037 if (newcapacity >= INT_MAX / sizeof(nodetreenode)) {
1032 1038 PyErr_SetString(PyExc_MemoryError, "overflow in nt_new");
1033 1039 return -1;
1034 1040 }
1035 1041 newnodes = realloc(self->nodes, newcapacity * sizeof(nodetreenode));
1036 1042 if (newnodes == NULL) {
1037 1043 PyErr_SetString(PyExc_MemoryError, "out of memory");
1038 1044 return -1;
1039 1045 }
1040 1046 self->capacity = newcapacity;
1041 1047 self->nodes = newnodes;
1042 1048 memset(&self->nodes[self->length], 0,
1043 1049 sizeof(nodetreenode) * (self->capacity - self->length));
1044 1050 }
1045 1051 return self->length++;
1046 1052 }
1047 1053
1048 1054 static int nt_insert(nodetree *self, const char *node, int rev)
1049 1055 {
1050 1056 int level = 0;
1051 1057 int off = 0;
1052 1058
1053 1059 while (level < 40) {
1054 1060 int k = nt_level(node, level);
1055 1061 nodetreenode *n;
1056 1062 int v;
1057 1063
1058 1064 n = &self->nodes[off];
1059 1065 v = n->children[k];
1060 1066
1061 1067 if (v == 0) {
1062 1068 n->children[k] = -rev - 2;
1063 1069 return 0;
1064 1070 }
1065 1071 if (v < 0) {
1066 1072 const char *oldnode = index_node_existing(self->index, -(v + 2));
1067 1073 int noff;
1068 1074
1069 1075 if (oldnode == NULL)
1070 1076 return -1;
1071 1077 if (!memcmp(oldnode, node, 20)) {
1072 1078 n->children[k] = -rev - 2;
1073 1079 return 0;
1074 1080 }
1075 1081 noff = nt_new(self);
1076 1082 if (noff == -1)
1077 1083 return -1;
1078 1084 /* self->nodes may have been changed by realloc */
1079 1085 self->nodes[off].children[k] = noff;
1080 1086 off = noff;
1081 1087 n = &self->nodes[off];
1082 1088 n->children[nt_level(oldnode, ++level)] = v;
1083 1089 if (level > self->depth)
1084 1090 self->depth = level;
1085 1091 self->splits += 1;
1086 1092 } else {
1087 1093 level += 1;
1088 1094 off = v;
1089 1095 }
1090 1096 }
1091 1097
1092 1098 return -1;
1093 1099 }
1094 1100
1095 1101 static PyObject *ntobj_insert(nodetreeObject *self, PyObject *args)
1096 1102 {
1097 1103 Py_ssize_t rev;
1098 1104 const char *node;
1099 1105 Py_ssize_t length;
1100 1106 if (!PyArg_ParseTuple(args, "n", &rev))
1101 1107 return NULL;
1102 1108 length = index_length(self->nt.index);
1103 1109 if (rev < 0 || rev >= length) {
1104 1110 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
1105 1111 return NULL;
1106 1112 }
1107 1113 node = index_node_existing(self->nt.index, rev);
1108 1114 if (nt_insert(&self->nt, node, (int)rev) == -1)
1109 1115 return NULL;
1110 1116 Py_RETURN_NONE;
1111 1117 }
1112 1118
1113 1119 static int nt_delete_node(nodetree *self, const char *node)
1114 1120 {
1115 1121 /* rev==-2 happens to get encoded as 0, which is interpreted as not set */
1116 1122 return nt_insert(self, node, -2);
1117 1123 }
1118 1124
1119 1125 static int nt_init(nodetree *self, indexObject *index, unsigned capacity)
1120 1126 {
1121 1127 /* Initialize before overflow-checking to avoid nt_dealloc() crash. */
1122 1128 self->nodes = NULL;
1123 1129
1124 1130 self->index = index;
1125 1131 /* The input capacity is in terms of revisions, while the field is in
1126 1132 * terms of nodetree nodes. */
1127 1133 self->capacity = (capacity < 4 ? 4 : capacity / 2);
1128 1134 self->depth = 0;
1129 1135 self->splits = 0;
1130 1136 if ((size_t)self->capacity > INT_MAX / sizeof(nodetreenode)) {
1131 1137 PyErr_SetString(PyExc_ValueError, "overflow in init_nt");
1132 1138 return -1;
1133 1139 }
1134 1140 self->nodes = calloc(self->capacity, sizeof(nodetreenode));
1135 1141 if (self->nodes == NULL) {
1136 1142 PyErr_NoMemory();
1137 1143 return -1;
1138 1144 }
1139 1145 self->length = 1;
1140 1146 return 0;
1141 1147 }
1142 1148
1143 1149 static PyTypeObject indexType;
1144 1150
1145 1151 static int ntobj_init(nodetreeObject *self, PyObject *args)
1146 1152 {
1147 1153 PyObject *index;
1148 1154 unsigned capacity;
1149 1155 if (!PyArg_ParseTuple(args, "O!I", &indexType, &index, &capacity))
1150 1156 return -1;
1151 1157 Py_INCREF(index);
1152 1158 return nt_init(&self->nt, (indexObject*)index, capacity);
1153 1159 }
1154 1160
1155 1161 static int nt_partialmatch(nodetree *self, const char *node,
1156 1162 Py_ssize_t nodelen)
1157 1163 {
1158 1164 return nt_find(self, node, nodelen, 1);
1159 1165 }
1160 1166
1161 1167 /*
1162 1168 * Find the length of the shortest unique prefix of node.
1163 1169 *
1164 1170 * Return values:
1165 1171 *
1166 1172 * -3: error (exception set)
1167 1173 * -2: not found (no exception set)
1168 1174 * rest: length of shortest prefix
1169 1175 */
1170 1176 static int nt_shortest(nodetree *self, const char *node)
1171 1177 {
1172 1178 int level, off;
1173 1179
1174 1180 for (level = off = 0; level < 40; level++) {
1175 1181 int k, v;
1176 1182 nodetreenode *n = &self->nodes[off];
1177 1183 k = nt_level(node, level);
1178 1184 v = n->children[k];
1179 1185 if (v < 0) {
1180 1186 const char *n;
1181 1187 v = -(v + 2);
1182 1188 n = index_node_existing(self->index, v);
1183 1189 if (n == NULL)
1184 1190 return -3;
1185 1191 if (memcmp(node, n, 20) != 0)
1186 1192 /*
1187 1193 * Found a unique prefix, but it wasn't for the
1188 1194 * requested node (i.e the requested node does
1189 1195 * not exist).
1190 1196 */
1191 1197 return -2;
1192 1198 return level + 1;
1193 1199 }
1194 1200 if (v == 0)
1195 1201 return -2;
1196 1202 off = v;
1197 1203 }
1198 1204 /*
1199 1205 * The node was still not unique after 40 hex digits, so this won't
1200 1206 * happen. Also, if we get here, then there's a programming error in
1201 1207 * this file that made us insert a node longer than 40 hex digits.
1202 1208 */
1203 1209 PyErr_SetString(PyExc_Exception, "broken node tree");
1204 1210 return -3;
1205 1211 }
1206 1212
1207 1213 static PyObject *ntobj_shortest(nodetreeObject *self, PyObject *args)
1208 1214 {
1209 1215 PyObject *val;
1210 1216 char *node;
1211 1217 int length;
1212 1218
1213 1219 if (!PyArg_ParseTuple(args, "O", &val))
1214 1220 return NULL;
1215 1221 if (node_check(val, &node) == -1)
1216 1222 return NULL;
1217 1223
1218 1224 length = nt_shortest(&self->nt, node);
1219 1225 if (length == -3)
1220 1226 return NULL;
1221 1227 if (length == -2) {
1222 1228 raise_revlog_error();
1223 1229 return NULL;
1224 1230 }
1225 1231 return PyInt_FromLong(length);
1226 1232 }
1227 1233
1228 1234 static void nt_dealloc(nodetree *self)
1229 1235 {
1230 1236 free(self->nodes);
1231 1237 self->nodes = NULL;
1232 1238 }
1233 1239
1234 1240 static void ntobj_dealloc(nodetreeObject *self)
1235 1241 {
1236 1242 Py_XDECREF(self->nt.index);
1237 1243 nt_dealloc(&self->nt);
1238 1244 PyObject_Del(self);
1239 1245 }
1240 1246
1241 1247 static PyMethodDef ntobj_methods[] = {
1242 1248 {"insert", (PyCFunction)ntobj_insert, METH_VARARGS,
1243 1249 "insert an index entry"},
1244 1250 {"shortest", (PyCFunction)ntobj_shortest, METH_VARARGS,
1245 1251 "find length of shortest hex nodeid of a binary ID"},
1246 1252 {NULL} /* Sentinel */
1247 1253 };
1248 1254
1249 1255 static PyTypeObject nodetreeType = {
1250 1256 PyVarObject_HEAD_INIT(NULL, 0) /* header */
1251 1257 "parsers.nodetree", /* tp_name */
1252 1258 sizeof(nodetreeObject) , /* tp_basicsize */
1253 1259 0, /* tp_itemsize */
1254 1260 (destructor)ntobj_dealloc, /* tp_dealloc */
1255 1261 0, /* tp_print */
1256 1262 0, /* tp_getattr */
1257 1263 0, /* tp_setattr */
1258 1264 0, /* tp_compare */
1259 1265 0, /* tp_repr */
1260 1266 0, /* tp_as_number */
1261 1267 0, /* tp_as_sequence */
1262 1268 0, /* tp_as_mapping */
1263 1269 0, /* tp_hash */
1264 1270 0, /* tp_call */
1265 1271 0, /* tp_str */
1266 1272 0, /* tp_getattro */
1267 1273 0, /* tp_setattro */
1268 1274 0, /* tp_as_buffer */
1269 1275 Py_TPFLAGS_DEFAULT, /* tp_flags */
1270 1276 "nodetree", /* tp_doc */
1271 1277 0, /* tp_traverse */
1272 1278 0, /* tp_clear */
1273 1279 0, /* tp_richcompare */
1274 1280 0, /* tp_weaklistoffset */
1275 1281 0, /* tp_iter */
1276 1282 0, /* tp_iternext */
1277 1283 ntobj_methods, /* tp_methods */
1278 1284 0, /* tp_members */
1279 1285 0, /* tp_getset */
1280 1286 0, /* tp_base */
1281 1287 0, /* tp_dict */
1282 1288 0, /* tp_descr_get */
1283 1289 0, /* tp_descr_set */
1284 1290 0, /* tp_dictoffset */
1285 1291 (initproc)ntobj_init, /* tp_init */
1286 1292 0, /* tp_alloc */
1287 1293 };
1288 1294
1289 1295 static int index_init_nt(indexObject *self)
1290 1296 {
1291 1297 if (!self->ntinitialized) {
1292 1298 if (nt_init(&self->nt, self, (int)self->raw_length) == -1) {
1293 1299 nt_dealloc(&self->nt);
1294 1300 return -1;
1295 1301 }
1296 1302 if (nt_insert(&self->nt, nullid, -1) == -1) {
1297 1303 nt_dealloc(&self->nt);
1298 1304 return -1;
1299 1305 }
1300 1306 self->ntinitialized = 1;
1301 1307 self->ntrev = (int)index_length(self);
1302 1308 self->ntlookups = 1;
1303 1309 self->ntmisses = 0;
1304 1310 }
1305 1311 return 0;
1306 1312 }
1307 1313
1308 1314 /*
1309 1315 * Return values:
1310 1316 *
1311 1317 * -3: error (exception set)
1312 1318 * -2: not found (no exception set)
1313 1319 * rest: valid rev
1314 1320 */
1315 1321 static int index_find_node(indexObject *self,
1316 1322 const char *node, Py_ssize_t nodelen)
1317 1323 {
1318 1324 int rev;
1319 1325
1320 1326 if (index_init_nt(self) == -1)
1321 1327 return -3;
1322 1328
1323 1329 self->ntlookups++;
1324 1330 rev = nt_find(&self->nt, node, nodelen, 0);
1325 1331 if (rev >= -1)
1326 1332 return rev;
1327 1333
1328 1334 /*
1329 1335 * For the first handful of lookups, we scan the entire index,
1330 1336 * and cache only the matching nodes. This optimizes for cases
1331 1337 * like "hg tip", where only a few nodes are accessed.
1332 1338 *
1333 1339 * After that, we cache every node we visit, using a single
1334 1340 * scan amortized over multiple lookups. This gives the best
1335 1341 * bulk performance, e.g. for "hg log".
1336 1342 */
1337 1343 if (self->ntmisses++ < 4) {
1338 1344 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1339 1345 const char *n = index_node_existing(self, rev);
1340 1346 if (n == NULL)
1341 1347 return -3;
1342 1348 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1343 1349 if (nt_insert(&self->nt, n, rev) == -1)
1344 1350 return -3;
1345 1351 break;
1346 1352 }
1347 1353 }
1348 1354 } else {
1349 1355 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1350 1356 const char *n = index_node_existing(self, rev);
1351 1357 if (n == NULL)
1352 1358 return -3;
1353 1359 if (nt_insert(&self->nt, n, rev) == -1) {
1354 1360 self->ntrev = rev + 1;
1355 1361 return -3;
1356 1362 }
1357 1363 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1358 1364 break;
1359 1365 }
1360 1366 }
1361 1367 self->ntrev = rev;
1362 1368 }
1363 1369
1364 1370 if (rev >= 0)
1365 1371 return rev;
1366 1372 return -2;
1367 1373 }
1368 1374
1369 1375 static PyObject *index_getitem(indexObject *self, PyObject *value)
1370 1376 {
1371 1377 char *node;
1372 1378 int rev;
1373 1379
1374 1380 if (PyInt_Check(value))
1375 1381 return index_get(self, PyInt_AS_LONG(value));
1376 1382
1377 1383 if (node_check(value, &node) == -1)
1378 1384 return NULL;
1379 1385 rev = index_find_node(self, node, 20);
1380 1386 if (rev >= -1)
1381 1387 return PyInt_FromLong(rev);
1382 1388 if (rev == -2)
1383 1389 raise_revlog_error();
1384 1390 return NULL;
1385 1391 }
1386 1392
1387 1393 /*
1388 1394 * Fully populate the radix tree.
1389 1395 */
1390 1396 static int index_populate_nt(indexObject *self) {
1391 1397 int rev;
1392 1398 if (self->ntrev > 0) {
1393 1399 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1394 1400 const char *n = index_node_existing(self, rev);
1395 1401 if (n == NULL)
1396 1402 return -1;
1397 1403 if (nt_insert(&self->nt, n, rev) == -1)
1398 1404 return -1;
1399 1405 }
1400 1406 self->ntrev = -1;
1401 1407 }
1402 1408 return 0;
1403 1409 }
1404 1410
1405 1411 static PyObject *index_partialmatch(indexObject *self, PyObject *args)
1406 1412 {
1407 1413 const char *fullnode;
1408 1414 int nodelen;
1409 1415 char *node;
1410 1416 int rev, i;
1411 1417
1412 1418 if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &node, &nodelen))
1413 1419 return NULL;
1414 1420
1415 1421 if (nodelen < 1) {
1416 1422 PyErr_SetString(PyExc_ValueError, "key too short");
1417 1423 return NULL;
1418 1424 }
1419 1425
1420 1426 if (nodelen > 40) {
1421 1427 PyErr_SetString(PyExc_ValueError, "key too long");
1422 1428 return NULL;
1423 1429 }
1424 1430
1425 1431 for (i = 0; i < nodelen; i++)
1426 1432 hexdigit(node, i);
1427 1433 if (PyErr_Occurred()) {
1428 1434 /* input contains non-hex characters */
1429 1435 PyErr_Clear();
1430 1436 Py_RETURN_NONE;
1431 1437 }
1432 1438
1433 1439 if (index_init_nt(self) == -1)
1434 1440 return NULL;
1435 1441 if (index_populate_nt(self) == -1)
1436 1442 return NULL;
1437 1443 rev = nt_partialmatch(&self->nt, node, nodelen);
1438 1444
1439 1445 switch (rev) {
1440 1446 case -4:
1441 1447 raise_revlog_error();
1442 1448 return NULL;
1443 1449 case -2:
1444 1450 Py_RETURN_NONE;
1445 1451 case -1:
1446 1452 return PyBytes_FromStringAndSize(nullid, 20);
1447 1453 }
1448 1454
1449 1455 fullnode = index_node_existing(self, rev);
1450 1456 if (fullnode == NULL) {
1451 1457 return NULL;
1452 1458 }
1453 1459 return PyBytes_FromStringAndSize(fullnode, 20);
1454 1460 }
1455 1461
1456 1462 static PyObject *index_shortest(indexObject *self, PyObject *args)
1457 1463 {
1458 1464 PyObject *val;
1459 1465 char *node;
1460 1466 int length;
1461 1467
1462 1468 if (!PyArg_ParseTuple(args, "O", &val))
1463 1469 return NULL;
1464 1470 if (node_check(val, &node) == -1)
1465 1471 return NULL;
1466 1472
1467 1473 self->ntlookups++;
1468 1474 if (index_init_nt(self) == -1)
1469 1475 return NULL;
1470 1476 if (index_populate_nt(self) == -1)
1471 1477 return NULL;
1472 1478 length = nt_shortest(&self->nt, node);
1473 1479 if (length == -3)
1474 1480 return NULL;
1475 1481 if (length == -2) {
1476 1482 raise_revlog_error();
1477 1483 return NULL;
1478 1484 }
1479 1485 return PyInt_FromLong(length);
1480 1486 }
1481 1487
1482 1488 static PyObject *index_m_get(indexObject *self, PyObject *args)
1483 1489 {
1484 1490 PyObject *val;
1485 1491 char *node;
1486 1492 int rev;
1487 1493
1488 1494 if (!PyArg_ParseTuple(args, "O", &val))
1489 1495 return NULL;
1490 1496 if (node_check(val, &node) == -1)
1491 1497 return NULL;
1492 1498 rev = index_find_node(self, node, 20);
1493 1499 if (rev == -3)
1494 1500 return NULL;
1495 1501 if (rev == -2)
1496 1502 Py_RETURN_NONE;
1497 1503 return PyInt_FromLong(rev);
1498 1504 }
1499 1505
1500 1506 static int index_contains(indexObject *self, PyObject *value)
1501 1507 {
1502 1508 char *node;
1503 1509
1504 1510 if (PyInt_Check(value)) {
1505 1511 long rev = PyInt_AS_LONG(value);
1506 1512 return rev >= -1 && rev < index_length(self);
1507 1513 }
1508 1514
1509 1515 if (node_check(value, &node) == -1)
1510 1516 return -1;
1511 1517
1512 1518 switch (index_find_node(self, node, 20)) {
1513 1519 case -3:
1514 1520 return -1;
1515 1521 case -2:
1516 1522 return 0;
1517 1523 default:
1518 1524 return 1;
1519 1525 }
1520 1526 }
1521 1527
1522 1528 typedef uint64_t bitmask;
1523 1529
1524 1530 /*
1525 1531 * Given a disjoint set of revs, return all candidates for the
1526 1532 * greatest common ancestor. In revset notation, this is the set
1527 1533 * "heads(::a and ::b and ...)"
1528 1534 */
1529 1535 static PyObject *find_gca_candidates(indexObject *self, const int *revs,
1530 1536 int revcount)
1531 1537 {
1532 1538 const bitmask allseen = (1ull << revcount) - 1;
1533 1539 const bitmask poison = 1ull << revcount;
1534 1540 PyObject *gca = PyList_New(0);
1535 1541 int i, v, interesting;
1536 1542 int maxrev = -1;
1537 1543 bitmask sp;
1538 1544 bitmask *seen;
1539 1545
1540 1546 if (gca == NULL)
1541 1547 return PyErr_NoMemory();
1542 1548
1543 1549 for (i = 0; i < revcount; i++) {
1544 1550 if (revs[i] > maxrev)
1545 1551 maxrev = revs[i];
1546 1552 }
1547 1553
1548 1554 seen = calloc(sizeof(*seen), maxrev + 1);
1549 1555 if (seen == NULL) {
1550 1556 Py_DECREF(gca);
1551 1557 return PyErr_NoMemory();
1552 1558 }
1553 1559
1554 1560 for (i = 0; i < revcount; i++)
1555 1561 seen[revs[i]] = 1ull << i;
1556 1562
1557 1563 interesting = revcount;
1558 1564
1559 1565 for (v = maxrev; v >= 0 && interesting; v--) {
1560 1566 bitmask sv = seen[v];
1561 1567 int parents[2];
1562 1568
1563 1569 if (!sv)
1564 1570 continue;
1565 1571
1566 1572 if (sv < poison) {
1567 1573 interesting -= 1;
1568 1574 if (sv == allseen) {
1569 1575 PyObject *obj = PyInt_FromLong(v);
1570 1576 if (obj == NULL)
1571 1577 goto bail;
1572 1578 if (PyList_Append(gca, obj) == -1) {
1573 1579 Py_DECREF(obj);
1574 1580 goto bail;
1575 1581 }
1576 1582 sv |= poison;
1577 1583 for (i = 0; i < revcount; i++) {
1578 1584 if (revs[i] == v)
1579 1585 goto done;
1580 1586 }
1581 1587 }
1582 1588 }
1583 1589 if (index_get_parents(self, v, parents, maxrev) < 0)
1584 1590 goto bail;
1585 1591
1586 1592 for (i = 0; i < 2; i++) {
1587 1593 int p = parents[i];
1588 1594 if (p == -1)
1589 1595 continue;
1590 1596 sp = seen[p];
1591 1597 if (sv < poison) {
1592 1598 if (sp == 0) {
1593 1599 seen[p] = sv;
1594 1600 interesting++;
1595 1601 }
1596 1602 else if (sp != sv)
1597 1603 seen[p] |= sv;
1598 1604 } else {
1599 1605 if (sp && sp < poison)
1600 1606 interesting--;
1601 1607 seen[p] = sv;
1602 1608 }
1603 1609 }
1604 1610 }
1605 1611
1606 1612 done:
1607 1613 free(seen);
1608 1614 return gca;
1609 1615 bail:
1610 1616 free(seen);
1611 1617 Py_XDECREF(gca);
1612 1618 return NULL;
1613 1619 }
1614 1620
1615 1621 /*
1616 1622 * Given a disjoint set of revs, return the subset with the longest
1617 1623 * path to the root.
1618 1624 */
1619 1625 static PyObject *find_deepest(indexObject *self, PyObject *revs)
1620 1626 {
1621 1627 const Py_ssize_t revcount = PyList_GET_SIZE(revs);
1622 1628 static const Py_ssize_t capacity = 24;
1623 1629 int *depth, *interesting = NULL;
1624 1630 int i, j, v, ninteresting;
1625 1631 PyObject *dict = NULL, *keys = NULL;
1626 1632 long *seen = NULL;
1627 1633 int maxrev = -1;
1628 1634 long final;
1629 1635
1630 1636 if (revcount > capacity) {
1631 1637 PyErr_Format(PyExc_OverflowError,
1632 1638 "bitset size (%ld) > capacity (%ld)",
1633 1639 (long)revcount, (long)capacity);
1634 1640 return NULL;
1635 1641 }
1636 1642
1637 1643 for (i = 0; i < revcount; i++) {
1638 1644 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
1639 1645 if (n > maxrev)
1640 1646 maxrev = n;
1641 1647 }
1642 1648
1643 1649 depth = calloc(sizeof(*depth), maxrev + 1);
1644 1650 if (depth == NULL)
1645 1651 return PyErr_NoMemory();
1646 1652
1647 1653 seen = calloc(sizeof(*seen), maxrev + 1);
1648 1654 if (seen == NULL) {
1649 1655 PyErr_NoMemory();
1650 1656 goto bail;
1651 1657 }
1652 1658
1653 1659 interesting = calloc(sizeof(*interesting), ((size_t)1) << revcount);
1654 1660 if (interesting == NULL) {
1655 1661 PyErr_NoMemory();
1656 1662 goto bail;
1657 1663 }
1658 1664
1659 1665 if (PyList_Sort(revs) == -1)
1660 1666 goto bail;
1661 1667
1662 1668 for (i = 0; i < revcount; i++) {
1663 1669 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
1664 1670 long b = 1l << i;
1665 1671 depth[n] = 1;
1666 1672 seen[n] = b;
1667 1673 interesting[b] = 1;
1668 1674 }
1669 1675
1670 1676 /* invariant: ninteresting is the number of non-zero entries in
1671 1677 * interesting. */
1672 1678 ninteresting = (int)revcount;
1673 1679
1674 1680 for (v = maxrev; v >= 0 && ninteresting > 1; v--) {
1675 1681 int dv = depth[v];
1676 1682 int parents[2];
1677 1683 long sv;
1678 1684
1679 1685 if (dv == 0)
1680 1686 continue;
1681 1687
1682 1688 sv = seen[v];
1683 1689 if (index_get_parents(self, v, parents, maxrev) < 0)
1684 1690 goto bail;
1685 1691
1686 1692 for (i = 0; i < 2; i++) {
1687 1693 int p = parents[i];
1688 1694 long sp;
1689 1695 int dp;
1690 1696
1691 1697 if (p == -1)
1692 1698 continue;
1693 1699
1694 1700 dp = depth[p];
1695 1701 sp = seen[p];
1696 1702 if (dp <= dv) {
1697 1703 depth[p] = dv + 1;
1698 1704 if (sp != sv) {
1699 1705 interesting[sv] += 1;
1700 1706 seen[p] = sv;
1701 1707 if (sp) {
1702 1708 interesting[sp] -= 1;
1703 1709 if (interesting[sp] == 0)
1704 1710 ninteresting -= 1;
1705 1711 }
1706 1712 }
1707 1713 }
1708 1714 else if (dv == dp - 1) {
1709 1715 long nsp = sp | sv;
1710 1716 if (nsp == sp)
1711 1717 continue;
1712 1718 seen[p] = nsp;
1713 1719 interesting[sp] -= 1;
1714 1720 if (interesting[sp] == 0)
1715 1721 ninteresting -= 1;
1716 1722 if (interesting[nsp] == 0)
1717 1723 ninteresting += 1;
1718 1724 interesting[nsp] += 1;
1719 1725 }
1720 1726 }
1721 1727 interesting[sv] -= 1;
1722 1728 if (interesting[sv] == 0)
1723 1729 ninteresting -= 1;
1724 1730 }
1725 1731
1726 1732 final = 0;
1727 1733 j = ninteresting;
1728 1734 for (i = 0; i < (int)(2 << revcount) && j > 0; i++) {
1729 1735 if (interesting[i] == 0)
1730 1736 continue;
1731 1737 final |= i;
1732 1738 j -= 1;
1733 1739 }
1734 1740 if (final == 0) {
1735 1741 keys = PyList_New(0);
1736 1742 goto bail;
1737 1743 }
1738 1744
1739 1745 dict = PyDict_New();
1740 1746 if (dict == NULL)
1741 1747 goto bail;
1742 1748
1743 1749 for (i = 0; i < revcount; i++) {
1744 1750 PyObject *key;
1745 1751
1746 1752 if ((final & (1 << i)) == 0)
1747 1753 continue;
1748 1754
1749 1755 key = PyList_GET_ITEM(revs, i);
1750 1756 Py_INCREF(key);
1751 1757 Py_INCREF(Py_None);
1752 1758 if (PyDict_SetItem(dict, key, Py_None) == -1) {
1753 1759 Py_DECREF(key);
1754 1760 Py_DECREF(Py_None);
1755 1761 goto bail;
1756 1762 }
1757 1763 }
1758 1764
1759 1765 keys = PyDict_Keys(dict);
1760 1766
1761 1767 bail:
1762 1768 free(depth);
1763 1769 free(seen);
1764 1770 free(interesting);
1765 1771 Py_XDECREF(dict);
1766 1772
1767 1773 return keys;
1768 1774 }
1769 1775
1770 1776 /*
1771 1777 * Given a (possibly overlapping) set of revs, return all the
1772 1778 * common ancestors heads: heads(::args[0] and ::a[1] and ...)
1773 1779 */
1774 1780 static PyObject *index_commonancestorsheads(indexObject *self, PyObject *args)
1775 1781 {
1776 1782 PyObject *ret = NULL;
1777 1783 Py_ssize_t argcount, i, len;
1778 1784 bitmask repeat = 0;
1779 1785 int revcount = 0;
1780 1786 int *revs;
1781 1787
1782 1788 argcount = PySequence_Length(args);
1783 1789 revs = PyMem_Malloc(argcount * sizeof(*revs));
1784 1790 if (argcount > 0 && revs == NULL)
1785 1791 return PyErr_NoMemory();
1786 1792 len = index_length(self);
1787 1793
1788 1794 for (i = 0; i < argcount; i++) {
1789 1795 static const int capacity = 24;
1790 1796 PyObject *obj = PySequence_GetItem(args, i);
1791 1797 bitmask x;
1792 1798 long val;
1793 1799
1794 1800 if (!PyInt_Check(obj)) {
1795 1801 PyErr_SetString(PyExc_TypeError,
1796 1802 "arguments must all be ints");
1797 1803 Py_DECREF(obj);
1798 1804 goto bail;
1799 1805 }
1800 1806 val = PyInt_AsLong(obj);
1801 1807 Py_DECREF(obj);
1802 1808 if (val == -1) {
1803 1809 ret = PyList_New(0);
1804 1810 goto done;
1805 1811 }
1806 1812 if (val < 0 || val >= len) {
1807 1813 PyErr_SetString(PyExc_IndexError,
1808 1814 "index out of range");
1809 1815 goto bail;
1810 1816 }
1811 1817 /* this cheesy bloom filter lets us avoid some more
1812 1818 * expensive duplicate checks in the common set-is-disjoint
1813 1819 * case */
1814 1820 x = 1ull << (val & 0x3f);
1815 1821 if (repeat & x) {
1816 1822 int k;
1817 1823 for (k = 0; k < revcount; k++) {
1818 1824 if (val == revs[k])
1819 1825 goto duplicate;
1820 1826 }
1821 1827 }
1822 1828 else repeat |= x;
1823 1829 if (revcount >= capacity) {
1824 1830 PyErr_Format(PyExc_OverflowError,
1825 1831 "bitset size (%d) > capacity (%d)",
1826 1832 revcount, capacity);
1827 1833 goto bail;
1828 1834 }
1829 1835 revs[revcount++] = (int)val;
1830 1836 duplicate:;
1831 1837 }
1832 1838
1833 1839 if (revcount == 0) {
1834 1840 ret = PyList_New(0);
1835 1841 goto done;
1836 1842 }
1837 1843 if (revcount == 1) {
1838 1844 PyObject *obj;
1839 1845 ret = PyList_New(1);
1840 1846 if (ret == NULL)
1841 1847 goto bail;
1842 1848 obj = PyInt_FromLong(revs[0]);
1843 1849 if (obj == NULL)
1844 1850 goto bail;
1845 1851 PyList_SET_ITEM(ret, 0, obj);
1846 1852 goto done;
1847 1853 }
1848 1854
1849 1855 ret = find_gca_candidates(self, revs, revcount);
1850 1856 if (ret == NULL)
1851 1857 goto bail;
1852 1858
1853 1859 done:
1854 1860 PyMem_Free(revs);
1855 1861 return ret;
1856 1862
1857 1863 bail:
1858 1864 PyMem_Free(revs);
1859 1865 Py_XDECREF(ret);
1860 1866 return NULL;
1861 1867 }
1862 1868
1863 1869 /*
1864 1870 * Given a (possibly overlapping) set of revs, return the greatest
1865 1871 * common ancestors: those with the longest path to the root.
1866 1872 */
1867 1873 static PyObject *index_ancestors(indexObject *self, PyObject *args)
1868 1874 {
1869 1875 PyObject *ret;
1870 1876 PyObject *gca = index_commonancestorsheads(self, args);
1871 1877 if (gca == NULL)
1872 1878 return NULL;
1873 1879
1874 1880 if (PyList_GET_SIZE(gca) <= 1) {
1875 1881 return gca;
1876 1882 }
1877 1883
1878 1884 ret = find_deepest(self, gca);
1879 1885 Py_DECREF(gca);
1880 1886 return ret;
1881 1887 }
1882 1888
1883 1889 /*
1884 1890 * Invalidate any trie entries introduced by added revs.
1885 1891 */
1886 1892 static void index_invalidate_added(indexObject *self, Py_ssize_t start)
1887 1893 {
1888 1894 Py_ssize_t i, len = PyList_GET_SIZE(self->added);
1889 1895
1890 1896 for (i = start; i < len; i++) {
1891 1897 PyObject *tuple = PyList_GET_ITEM(self->added, i);
1892 1898 PyObject *node = PyTuple_GET_ITEM(tuple, 7);
1893 1899
1894 1900 nt_delete_node(&self->nt, PyBytes_AS_STRING(node));
1895 1901 }
1896 1902
1897 1903 if (start == 0)
1898 1904 Py_CLEAR(self->added);
1899 1905 }
1900 1906
1901 1907 /*
1902 1908 * Delete a numeric range of revs, which must be at the end of the
1903 1909 * range, but exclude the sentinel nullid entry.
1904 1910 */
1905 1911 static int index_slice_del(indexObject *self, PyObject *item)
1906 1912 {
1907 1913 Py_ssize_t start, stop, step, slicelength;
1908 1914 Py_ssize_t length = index_length(self) + 1;
1909 1915 int ret = 0;
1910 1916
1911 1917 /* Argument changed from PySliceObject* to PyObject* in Python 3. */
1912 1918 #ifdef IS_PY3K
1913 1919 if (PySlice_GetIndicesEx(item, length,
1914 1920 &start, &stop, &step, &slicelength) < 0)
1915 1921 #else
1916 1922 if (PySlice_GetIndicesEx((PySliceObject*)item, length,
1917 1923 &start, &stop, &step, &slicelength) < 0)
1918 1924 #endif
1919 1925 return -1;
1920 1926
1921 1927 if (slicelength <= 0)
1922 1928 return 0;
1923 1929
1924 1930 if ((step < 0 && start < stop) || (step > 0 && start > stop))
1925 1931 stop = start;
1926 1932
1927 1933 if (step < 0) {
1928 1934 stop = start + 1;
1929 1935 start = stop + step*(slicelength - 1) - 1;
1930 1936 step = -step;
1931 1937 }
1932 1938
1933 1939 if (step != 1) {
1934 1940 PyErr_SetString(PyExc_ValueError,
1935 1941 "revlog index delete requires step size of 1");
1936 1942 return -1;
1937 1943 }
1938 1944
1939 1945 if (stop != length - 1) {
1940 1946 PyErr_SetString(PyExc_IndexError,
1941 1947 "revlog index deletion indices are invalid");
1942 1948 return -1;
1943 1949 }
1944 1950
1945 1951 if (start < self->length) {
1946 1952 if (self->ntinitialized) {
1947 1953 Py_ssize_t i;
1948 1954
1949 1955 for (i = start + 1; i < self->length; i++) {
1950 1956 const char *node = index_node_existing(self, i);
1951 1957 if (node == NULL)
1952 1958 return -1;
1953 1959
1954 1960 nt_delete_node(&self->nt, node);
1955 1961 }
1956 1962 if (self->added)
1957 1963 index_invalidate_added(self, 0);
1958 1964 if (self->ntrev > start)
1959 1965 self->ntrev = (int)start;
1960 1966 }
1961 1967 self->length = start;
1962 1968 if (start < self->raw_length) {
1963 1969 if (self->cache) {
1964 1970 Py_ssize_t i;
1965 1971 for (i = start; i < self->raw_length; i++)
1966 1972 Py_CLEAR(self->cache[i]);
1967 1973 }
1968 1974 self->raw_length = start;
1969 1975 }
1970 1976 goto done;
1971 1977 }
1972 1978
1973 1979 if (self->ntinitialized) {
1974 1980 index_invalidate_added(self, start - self->length);
1975 1981 if (self->ntrev > start)
1976 1982 self->ntrev = (int)start;
1977 1983 }
1978 1984 if (self->added)
1979 1985 ret = PyList_SetSlice(self->added, start - self->length,
1980 1986 PyList_GET_SIZE(self->added), NULL);
1981 1987 done:
1982 1988 Py_CLEAR(self->headrevs);
1983 1989 return ret;
1984 1990 }
1985 1991
1986 1992 /*
1987 1993 * Supported ops:
1988 1994 *
1989 1995 * slice deletion
1990 1996 * string assignment (extend node->rev mapping)
1991 1997 * string deletion (shrink node->rev mapping)
1992 1998 */
1993 1999 static int index_assign_subscript(indexObject *self, PyObject *item,
1994 2000 PyObject *value)
1995 2001 {
1996 2002 char *node;
1997 2003 long rev;
1998 2004
1999 2005 if (PySlice_Check(item) && value == NULL)
2000 2006 return index_slice_del(self, item);
2001 2007
2002 2008 if (node_check(item, &node) == -1)
2003 2009 return -1;
2004 2010
2005 2011 if (value == NULL)
2006 2012 return self->ntinitialized ? nt_delete_node(&self->nt, node) : 0;
2007 2013 rev = PyInt_AsLong(value);
2008 2014 if (rev > INT_MAX || rev < 0) {
2009 2015 if (!PyErr_Occurred())
2010 2016 PyErr_SetString(PyExc_ValueError, "rev out of range");
2011 2017 return -1;
2012 2018 }
2013 2019
2014 2020 if (index_init_nt(self) == -1)
2015 2021 return -1;
2016 2022 return nt_insert(&self->nt, node, (int)rev);
2017 2023 }
2018 2024
2019 2025 /*
2020 2026 * Find all RevlogNG entries in an index that has inline data. Update
2021 2027 * the optional "offsets" table with those entries.
2022 2028 */
2023 2029 static Py_ssize_t inline_scan(indexObject *self, const char **offsets)
2024 2030 {
2025 2031 const char *data = (const char *)self->buf.buf;
2026 2032 Py_ssize_t pos = 0;
2027 2033 Py_ssize_t end = self->buf.len;
2028 2034 long incr = v1_hdrsize;
2029 2035 Py_ssize_t len = 0;
2030 2036
2031 2037 while (pos + v1_hdrsize <= end && pos >= 0) {
2032 2038 uint32_t comp_len;
2033 2039 /* 3rd element of header is length of compressed inline data */
2034 2040 comp_len = getbe32(data + pos + 8);
2035 2041 incr = v1_hdrsize + comp_len;
2036 2042 if (offsets)
2037 2043 offsets[len] = data + pos;
2038 2044 len++;
2039 2045 pos += incr;
2040 2046 }
2041 2047
2042 2048 if (pos != end) {
2043 2049 if (!PyErr_Occurred())
2044 2050 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2045 2051 return -1;
2046 2052 }
2047 2053
2048 2054 return len;
2049 2055 }
2050 2056
2051 2057 static int index_init(indexObject *self, PyObject *args)
2052 2058 {
2053 2059 PyObject *data_obj, *inlined_obj;
2054 2060 Py_ssize_t size;
2055 2061
2056 2062 /* Initialize before argument-checking to avoid index_dealloc() crash. */
2057 2063 self->raw_length = 0;
2058 2064 self->added = NULL;
2059 2065 self->cache = NULL;
2060 2066 self->data = NULL;
2061 2067 memset(&self->buf, 0, sizeof(self->buf));
2062 2068 self->headrevs = NULL;
2063 2069 self->filteredrevs = Py_None;
2064 2070 Py_INCREF(Py_None);
2065 2071 self->ntinitialized = 0;
2066 2072 self->offsets = NULL;
2067 2073
2068 2074 if (!PyArg_ParseTuple(args, "OO", &data_obj, &inlined_obj))
2069 2075 return -1;
2070 2076 if (!PyObject_CheckBuffer(data_obj)) {
2071 2077 PyErr_SetString(PyExc_TypeError,
2072 2078 "data does not support buffer interface");
2073 2079 return -1;
2074 2080 }
2075 2081
2076 2082 if (PyObject_GetBuffer(data_obj, &self->buf, PyBUF_SIMPLE) == -1)
2077 2083 return -1;
2078 2084 size = self->buf.len;
2079 2085
2080 2086 self->inlined = inlined_obj && PyObject_IsTrue(inlined_obj);
2081 2087 self->data = data_obj;
2082 2088
2083 2089 self->ntlookups = self->ntmisses = 0;
2084 2090 self->ntrev = -1;
2085 2091 Py_INCREF(self->data);
2086 2092
2087 2093 if (self->inlined) {
2088 2094 Py_ssize_t len = inline_scan(self, NULL);
2089 2095 if (len == -1)
2090 2096 goto bail;
2091 2097 self->raw_length = len;
2092 2098 self->length = len;
2093 2099 } else {
2094 2100 if (size % v1_hdrsize) {
2095 2101 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2096 2102 goto bail;
2097 2103 }
2098 2104 self->raw_length = size / v1_hdrsize;
2099 2105 self->length = self->raw_length;
2100 2106 }
2101 2107
2102 2108 return 0;
2103 2109 bail:
2104 2110 return -1;
2105 2111 }
2106 2112
2107 2113 static PyObject *index_nodemap(indexObject *self)
2108 2114 {
2109 2115 Py_INCREF(self);
2110 2116 return (PyObject *)self;
2111 2117 }
2112 2118
2113 2119 static void _index_clearcaches(indexObject *self)
2114 2120 {
2115 2121 if (self->cache) {
2116 2122 Py_ssize_t i;
2117 2123
2118 2124 for (i = 0; i < self->raw_length; i++)
2119 2125 Py_CLEAR(self->cache[i]);
2120 2126 free(self->cache);
2121 2127 self->cache = NULL;
2122 2128 }
2123 2129 if (self->offsets) {
2124 2130 PyMem_Free((void *)self->offsets);
2125 2131 self->offsets = NULL;
2126 2132 }
2127 2133 if (self->ntinitialized) {
2128 2134 nt_dealloc(&self->nt);
2129 2135 }
2130 2136 self->ntinitialized = 0;
2131 2137 Py_CLEAR(self->headrevs);
2132 2138 }
2133 2139
2134 2140 static PyObject *index_clearcaches(indexObject *self)
2135 2141 {
2136 2142 _index_clearcaches(self);
2137 2143 self->ntrev = -1;
2138 2144 self->ntlookups = self->ntmisses = 0;
2139 2145 Py_RETURN_NONE;
2140 2146 }
2141 2147
2142 2148 static void index_dealloc(indexObject *self)
2143 2149 {
2144 2150 _index_clearcaches(self);
2145 2151 Py_XDECREF(self->filteredrevs);
2146 2152 if (self->buf.buf) {
2147 2153 PyBuffer_Release(&self->buf);
2148 2154 memset(&self->buf, 0, sizeof(self->buf));
2149 2155 }
2150 2156 Py_XDECREF(self->data);
2151 2157 Py_XDECREF(self->added);
2152 2158 PyObject_Del(self);
2153 2159 }
2154 2160
2155 2161 static PySequenceMethods index_sequence_methods = {
2156 2162 (lenfunc)index_length, /* sq_length */
2157 2163 0, /* sq_concat */
2158 2164 0, /* sq_repeat */
2159 2165 (ssizeargfunc)index_get, /* sq_item */
2160 2166 0, /* sq_slice */
2161 2167 0, /* sq_ass_item */
2162 2168 0, /* sq_ass_slice */
2163 2169 (objobjproc)index_contains, /* sq_contains */
2164 2170 };
2165 2171
2166 2172 static PyMappingMethods index_mapping_methods = {
2167 2173 (lenfunc)index_length, /* mp_length */
2168 2174 (binaryfunc)index_getitem, /* mp_subscript */
2169 2175 (objobjargproc)index_assign_subscript, /* mp_ass_subscript */
2170 2176 };
2171 2177
2172 2178 static PyMethodDef index_methods[] = {
2173 2179 {"ancestors", (PyCFunction)index_ancestors, METH_VARARGS,
2174 2180 "return the gca set of the given revs"},
2175 2181 {"commonancestorsheads", (PyCFunction)index_commonancestorsheads,
2176 2182 METH_VARARGS,
2177 2183 "return the heads of the common ancestors of the given revs"},
2178 2184 {"clearcaches", (PyCFunction)index_clearcaches, METH_NOARGS,
2179 2185 "clear the index caches"},
2180 2186 {"get", (PyCFunction)index_m_get, METH_VARARGS,
2181 2187 "get an index entry"},
2182 2188 {"computephasesmapsets", (PyCFunction)compute_phases_map_sets,
2183 2189 METH_VARARGS, "compute phases"},
2184 2190 {"reachableroots2", (PyCFunction)reachableroots2, METH_VARARGS,
2185 2191 "reachableroots"},
2186 2192 {"headrevs", (PyCFunction)index_headrevs, METH_VARARGS,
2187 2193 "get head revisions"}, /* Can do filtering since 3.2 */
2188 2194 {"headrevsfiltered", (PyCFunction)index_headrevs, METH_VARARGS,
2189 2195 "get filtered head revisions"}, /* Can always do filtering */
2190 2196 {"deltachain", (PyCFunction)index_deltachain, METH_VARARGS,
2191 2197 "determine revisions with deltas to reconstruct fulltext"},
2192 2198 {"append", (PyCFunction)index_append, METH_O,
2193 2199 "append an index entry"},
2194 2200 {"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS,
2195 2201 "match a potentially ambiguous node ID"},
2196 2202 {"shortest", (PyCFunction)index_shortest, METH_VARARGS,
2197 2203 "find length of shortest hex nodeid of a binary ID"},
2198 2204 {"stats", (PyCFunction)index_stats, METH_NOARGS,
2199 2205 "stats for the index"},
2200 2206 {NULL} /* Sentinel */
2201 2207 };
2202 2208
2203 2209 static PyGetSetDef index_getset[] = {
2204 2210 {"nodemap", (getter)index_nodemap, NULL, "nodemap", NULL},
2205 2211 {NULL} /* Sentinel */
2206 2212 };
2207 2213
2208 2214 static PyTypeObject indexType = {
2209 2215 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2210 2216 "parsers.index", /* tp_name */
2211 2217 sizeof(indexObject), /* tp_basicsize */
2212 2218 0, /* tp_itemsize */
2213 2219 (destructor)index_dealloc, /* tp_dealloc */
2214 2220 0, /* tp_print */
2215 2221 0, /* tp_getattr */
2216 2222 0, /* tp_setattr */
2217 2223 0, /* tp_compare */
2218 2224 0, /* tp_repr */
2219 2225 0, /* tp_as_number */
2220 2226 &index_sequence_methods, /* tp_as_sequence */
2221 2227 &index_mapping_methods, /* tp_as_mapping */
2222 2228 0, /* tp_hash */
2223 2229 0, /* tp_call */
2224 2230 0, /* tp_str */
2225 2231 0, /* tp_getattro */
2226 2232 0, /* tp_setattro */
2227 2233 0, /* tp_as_buffer */
2228 2234 Py_TPFLAGS_DEFAULT, /* tp_flags */
2229 2235 "revlog index", /* tp_doc */
2230 2236 0, /* tp_traverse */
2231 2237 0, /* tp_clear */
2232 2238 0, /* tp_richcompare */
2233 2239 0, /* tp_weaklistoffset */
2234 2240 0, /* tp_iter */
2235 2241 0, /* tp_iternext */
2236 2242 index_methods, /* tp_methods */
2237 2243 0, /* tp_members */
2238 2244 index_getset, /* tp_getset */
2239 2245 0, /* tp_base */
2240 2246 0, /* tp_dict */
2241 2247 0, /* tp_descr_get */
2242 2248 0, /* tp_descr_set */
2243 2249 0, /* tp_dictoffset */
2244 2250 (initproc)index_init, /* tp_init */
2245 2251 0, /* tp_alloc */
2246 2252 };
2247 2253
2248 2254 /*
2249 2255 * returns a tuple of the form (index, index, cache) with elements as
2250 2256 * follows:
2251 2257 *
2252 2258 * index: an index object that lazily parses RevlogNG records
2253 2259 * cache: if data is inlined, a tuple (0, index_file_content), else None
2254 2260 * index_file_content could be a string, or a buffer
2255 2261 *
2256 2262 * added complications are for backwards compatibility
2257 2263 */
2258 2264 PyObject *parse_index2(PyObject *self, PyObject *args)
2259 2265 {
2260 2266 PyObject *tuple = NULL, *cache = NULL;
2261 2267 indexObject *idx;
2262 2268 int ret;
2263 2269
2264 2270 idx = PyObject_New(indexObject, &indexType);
2265 2271 if (idx == NULL)
2266 2272 goto bail;
2267 2273
2268 2274 ret = index_init(idx, args);
2269 2275 if (ret == -1)
2270 2276 goto bail;
2271 2277
2272 2278 if (idx->inlined) {
2273 2279 cache = Py_BuildValue("iO", 0, idx->data);
2274 2280 if (cache == NULL)
2275 2281 goto bail;
2276 2282 } else {
2277 2283 cache = Py_None;
2278 2284 Py_INCREF(cache);
2279 2285 }
2280 2286
2281 2287 tuple = Py_BuildValue("NN", idx, cache);
2282 2288 if (!tuple)
2283 2289 goto bail;
2284 2290 return tuple;
2285 2291
2286 2292 bail:
2287 2293 Py_XDECREF(idx);
2288 2294 Py_XDECREF(cache);
2289 2295 Py_XDECREF(tuple);
2290 2296 return NULL;
2291 2297 }
2292 2298
2293 2299 #ifdef WITH_RUST
2294 2300
2295 2301 /* rustlazyancestors: iteration over ancestors implemented in Rust
2296 2302 *
2297 2303 * This class holds a reference to an index and to the Rust iterator.
2298 2304 */
2299 2305 typedef struct rustlazyancestorsObjectStruct rustlazyancestorsObject;
2300 2306
2301 2307 struct rustlazyancestorsObjectStruct {
2302 2308 PyObject_HEAD
2303 2309 /* Type-specific fields go here. */
2304 2310 indexObject *index; /* Ref kept to avoid GC'ing the index */
2305 2311 void *iter; /* Rust iterator */
2306 2312 };
2307 2313
2308 2314 /* FFI exposed from Rust code */
2309 2315 rustlazyancestorsObject *rustlazyancestors_init(
2310 2316 indexObject *index,
2311 2317 /* to pass index_get_parents() */
2312 2318 int (*)(indexObject *, Py_ssize_t, int*, int),
2313 2319 /* intrevs vector */
2314 2320 Py_ssize_t initrevslen, long *initrevs,
2315 2321 long stoprev,
2316 2322 int inclusive);
2317 2323 void rustlazyancestors_drop(rustlazyancestorsObject *self);
2318 2324 int rustlazyancestors_next(rustlazyancestorsObject *self);
2319 2325 int rustlazyancestors_contains(rustlazyancestorsObject *self, long rev);
2320 2326
2321 2327 /* CPython instance methods */
2322 2328 static int rustla_init(rustlazyancestorsObject *self,
2323 2329 PyObject *args) {
2324 2330 PyObject *initrevsarg = NULL;
2325 2331 PyObject *inclusivearg = NULL;
2326 2332 long stoprev = 0;
2327 2333 long *initrevs = NULL;
2328 2334 int inclusive = 0;
2329 2335 Py_ssize_t i;
2330 2336
2331 2337 indexObject *index;
2332 2338 if (!PyArg_ParseTuple(args, "O!O!lO!",
2333 2339 &indexType, &index,
2334 2340 &PyList_Type, &initrevsarg,
2335 2341 &stoprev,
2336 2342 &PyBool_Type, &inclusivearg))
2337 2343 return -1;
2338 2344
2339 2345 Py_INCREF(index);
2340 2346 self->index = index;
2341 2347
2342 2348 if (inclusivearg == Py_True)
2343 2349 inclusive = 1;
2344 2350
2345 2351 Py_ssize_t linit = PyList_GET_SIZE(initrevsarg);
2346 2352
2347 2353 initrevs = (long*)calloc(linit, sizeof(long));
2348 2354
2349 2355 if (initrevs == NULL) {
2350 2356 PyErr_NoMemory();
2351 2357 goto bail;
2352 2358 }
2353 2359
2354 2360 for (i=0; i<linit; i++) {
2355 2361 initrevs[i] = PyInt_AsLong(PyList_GET_ITEM(initrevsarg, i));
2356 2362 }
2357 2363 if (PyErr_Occurred())
2358 2364 goto bail;
2359 2365
2360 2366 self->iter = rustlazyancestors_init(index,
2361 2367 index_get_parents,
2362 2368 linit, initrevs,
2363 2369 stoprev, inclusive);
2364 2370 if (self->iter == NULL) {
2365 2371 /* if this is because of GraphError::ParentOutOfRange
2366 2372 * index_get_parents() has already set the proper ValueError */
2367 2373 goto bail;
2368 2374 }
2369 2375
2370 2376 free(initrevs);
2371 2377 return 0;
2372 2378
2373 2379 bail:
2374 2380 free(initrevs);
2375 2381 return -1;
2376 2382 };
2377 2383
2378 2384 static void rustla_dealloc(rustlazyancestorsObject *self)
2379 2385 {
2380 2386 Py_XDECREF(self->index);
2381 2387 if (self->iter != NULL) { /* can happen if rustla_init failed */
2382 2388 rustlazyancestors_drop(self->iter);
2383 2389 }
2384 2390 PyObject_Del(self);
2385 2391 }
2386 2392
2387 2393 static PyObject *rustla_next(rustlazyancestorsObject *self) {
2388 2394 int res = rustlazyancestors_next(self->iter);
2389 2395 if (res == -1) {
2390 2396 /* Setting an explicit exception seems unnecessary
2391 2397 * as examples from Python source code (Objects/rangeobjets.c and
2392 2398 * Modules/_io/stringio.c) seem to demonstrate.
2393 2399 */
2394 2400 return NULL;
2395 2401 }
2396 2402 return PyInt_FromLong(res);
2397 2403 }
2398 2404
2399 2405 static int rustla_contains(rustlazyancestorsObject *self, PyObject *rev) {
2400 2406 if (!(PyInt_Check(rev))) {
2401 2407 return 0;
2402 2408 }
2403 2409 return rustlazyancestors_contains(self->iter, PyInt_AS_LONG(rev));
2404 2410 }
2405 2411
2406 2412 static PySequenceMethods rustla_sequence_methods = {
2407 2413 0, /* sq_length */
2408 2414 0, /* sq_concat */
2409 2415 0, /* sq_repeat */
2410 2416 0, /* sq_item */
2411 2417 0, /* sq_slice */
2412 2418 0, /* sq_ass_item */
2413 2419 0, /* sq_ass_slice */
2414 2420 (objobjproc)rustla_contains, /* sq_contains */
2415 2421 };
2416 2422
2417 2423 static PyTypeObject rustlazyancestorsType = {
2418 2424 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2419 2425 "parsers.rustlazyancestors", /* tp_name */
2420 2426 sizeof(rustlazyancestorsObject), /* tp_basicsize */
2421 2427 0, /* tp_itemsize */
2422 2428 (destructor)rustla_dealloc, /* tp_dealloc */
2423 2429 0, /* tp_print */
2424 2430 0, /* tp_getattr */
2425 2431 0, /* tp_setattr */
2426 2432 0, /* tp_compare */
2427 2433 0, /* tp_repr */
2428 2434 0, /* tp_as_number */
2429 2435 &rustla_sequence_methods, /* tp_as_sequence */
2430 2436 0, /* tp_as_mapping */
2431 2437 0, /* tp_hash */
2432 2438 0, /* tp_call */
2433 2439 0, /* tp_str */
2434 2440 0, /* tp_getattro */
2435 2441 0, /* tp_setattro */
2436 2442 0, /* tp_as_buffer */
2437 2443 Py_TPFLAGS_DEFAULT, /* tp_flags */
2438 2444 "Iterator over ancestors, implemented in Rust", /* tp_doc */
2439 2445 0, /* tp_traverse */
2440 2446 0, /* tp_clear */
2441 2447 0, /* tp_richcompare */
2442 2448 0, /* tp_weaklistoffset */
2443 2449 0, /* tp_iter */
2444 2450 (iternextfunc)rustla_next, /* tp_iternext */
2445 2451 0, /* tp_methods */
2446 2452 0, /* tp_members */
2447 2453 0, /* tp_getset */
2448 2454 0, /* tp_base */
2449 2455 0, /* tp_dict */
2450 2456 0, /* tp_descr_get */
2451 2457 0, /* tp_descr_set */
2452 2458 0, /* tp_dictoffset */
2453 2459 (initproc)rustla_init, /* tp_init */
2454 2460 0, /* tp_alloc */
2455 2461 };
2456 2462 #endif /* WITH_RUST */
2457 2463
2458 2464 void revlog_module_init(PyObject *mod)
2459 2465 {
2460 2466 indexType.tp_new = PyType_GenericNew;
2461 2467 if (PyType_Ready(&indexType) < 0)
2462 2468 return;
2463 2469 Py_INCREF(&indexType);
2464 2470 PyModule_AddObject(mod, "index", (PyObject *)&indexType);
2465 2471
2466 2472 nodetreeType.tp_new = PyType_GenericNew;
2467 2473 if (PyType_Ready(&nodetreeType) < 0)
2468 2474 return;
2469 2475 Py_INCREF(&nodetreeType);
2470 2476 PyModule_AddObject(mod, "nodetree", (PyObject *)&nodetreeType);
2471 2477
2472 2478 if (!nullentry) {
2473 2479 nullentry = Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0, 0,
2474 2480 -1, -1, -1, -1, nullid, 20);
2475 2481 }
2476 2482 if (nullentry)
2477 2483 PyObject_GC_UnTrack(nullentry);
2478 2484
2479 2485 #ifdef WITH_RUST
2480 2486 rustlazyancestorsType.tp_new = PyType_GenericNew;
2481 2487 if (PyType_Ready(&rustlazyancestorsType) < 0)
2482 2488 return;
2483 2489 Py_INCREF(&rustlazyancestorsType);
2484 2490 PyModule_AddObject(mod, "rustlazyancestors",
2485 2491 (PyObject *)&rustlazyancestorsType);
2486 2492 #endif
2487 2493
2488 2494 }
@@ -1,205 +1,222 b''
1 1 revlog.parseindex must be able to parse the index file even if
2 2 an index entry is split between two 64k blocks. The ideal test
3 3 would be to create an index file with inline data where
4 4 64k < size < 64k + 64 (64k is the size of the read buffer, 64 is
5 5 the size of an index entry) and with an index entry starting right
6 6 before the 64k block boundary, and try to read it.
7 7 We approximate that by reducing the read buffer to 1 byte.
8 8
9 9 $ hg init a
10 10 $ cd a
11 11 $ echo abc > foo
12 12 $ hg add foo
13 13 $ hg commit -m 'add foo'
14 14 $ echo >> foo
15 15 $ hg commit -m 'change foo'
16 16 $ hg log -r 0:
17 17 changeset: 0:7c31755bf9b5
18 18 user: test
19 19 date: Thu Jan 01 00:00:00 1970 +0000
20 20 summary: add foo
21 21
22 22 changeset: 1:26333235a41c
23 23 tag: tip
24 24 user: test
25 25 date: Thu Jan 01 00:00:00 1970 +0000
26 26 summary: change foo
27 27
28 28 $ cat >> test.py << EOF
29 29 > from __future__ import print_function
30 30 > from mercurial import changelog, node, vfs
31 31 >
32 32 > class singlebyteread(object):
33 33 > def __init__(self, real):
34 34 > self.real = real
35 35 >
36 36 > def read(self, size=-1):
37 37 > if size == 65536:
38 38 > size = 1
39 39 > return self.real.read(size)
40 40 >
41 41 > def __getattr__(self, key):
42 42 > return getattr(self.real, key)
43 43 >
44 44 > def __enter__(self):
45 45 > self.real.__enter__()
46 46 > return self
47 47 >
48 48 > def __exit__(self, *args, **kwargs):
49 49 > return self.real.__exit__(*args, **kwargs)
50 50 >
51 51 > def opener(*args):
52 52 > o = vfs.vfs(*args)
53 53 > def wrapper(*a, **kwargs):
54 54 > f = o(*a, **kwargs)
55 55 > return singlebyteread(f)
56 56 > return wrapper
57 57 >
58 58 > cl = changelog.changelog(opener('.hg/store'))
59 59 > print(len(cl), 'revisions:')
60 60 > for r in cl:
61 61 > print(node.short(cl.node(r)))
62 62 > EOF
63 63 $ "$PYTHON" test.py
64 64 2 revisions:
65 65 7c31755bf9b5
66 66 26333235a41c
67 67
68 68 $ cd ..
69 69
70 70 #if no-pure
71 71
72 72 Test SEGV caused by bad revision passed to reachableroots() (issue4775):
73 73
74 74 $ cd a
75 75
76 76 $ "$PYTHON" <<EOF
77 77 > from __future__ import print_function
78 78 > from mercurial import changelog, vfs
79 79 > cl = changelog.changelog(vfs.vfs('.hg/store'))
80 80 > print('good heads:')
81 81 > for head in [0, len(cl) - 1, -1]:
82 82 > print('%s: %r' % (head, cl.reachableroots(0, [head], [0])))
83 83 > print('bad heads:')
84 84 > for head in [len(cl), 10000, -2, -10000, None]:
85 85 > print('%s:' % head, end=' ')
86 86 > try:
87 87 > cl.reachableroots(0, [head], [0])
88 88 > print('uncaught buffer overflow?')
89 89 > except (IndexError, TypeError) as inst:
90 90 > print(inst)
91 91 > print('good roots:')
92 92 > for root in [0, len(cl) - 1, -1]:
93 93 > print('%s: %r' % (root, cl.reachableroots(root, [len(cl) - 1], [root])))
94 94 > print('out-of-range roots are ignored:')
95 95 > for root in [len(cl), 10000, -2, -10000]:
96 96 > print('%s: %r' % (root, cl.reachableroots(root, [len(cl) - 1], [root])))
97 97 > print('bad roots:')
98 98 > for root in [None]:
99 99 > print('%s:' % root, end=' ')
100 100 > try:
101 101 > cl.reachableroots(root, [len(cl) - 1], [root])
102 102 > print('uncaught error?')
103 103 > except TypeError as inst:
104 104 > print(inst)
105 105 > EOF
106 106 good heads:
107 107 0: [0]
108 108 1: [0]
109 109 -1: []
110 110 bad heads:
111 111 2: head out of range
112 112 10000: head out of range
113 113 -2: head out of range
114 114 -10000: head out of range
115 115 None: an integer is required
116 116 good roots:
117 117 0: [0]
118 118 1: [1]
119 119 -1: [-1]
120 120 out-of-range roots are ignored:
121 121 2: []
122 122 10000: []
123 123 -2: []
124 124 -10000: []
125 125 bad roots:
126 126 None: an integer is required
127 127
128 128 $ cd ..
129 129
130 130 Test corrupted p1/p2 fields that could cause SEGV at parsers.c:
131 131
132 132 $ mkdir invalidparent
133 133 $ cd invalidparent
134 134
135 135 $ hg clone --pull -q --config phases.publish=False ../a limit
136 $ hg clone --pull -q --config phases.publish=False ../a neglimit
136 137 $ hg clone --pull -q --config phases.publish=False ../a segv
137 $ rm -R limit/.hg/cache segv/.hg/cache
138 $ rm -R limit/.hg/cache neglimit/.hg/cache segv/.hg/cache
138 139
139 140 $ "$PYTHON" <<EOF
140 141 > data = open("limit/.hg/store/00changelog.i", "rb").read()
141 > for n, p in [(b'limit', b'\0\0\0\x02'), (b'segv', b'\0\x01\0\0')]:
142 > poisons = [
143 > (b'limit', b'\0\0\0\x02'),
144 > (b'neglimit', b'\xff\xff\xff\xfe'),
145 > (b'segv', b'\0\x01\0\0'),
146 > ]
147 > for n, p in poisons:
142 148 > # corrupt p1 at rev0 and p2 at rev1
143 149 > d = data[:24] + p + data[28:127 + 28] + p + data[127 + 32:]
144 150 > open(n + b"/.hg/store/00changelog.i", "wb").write(d)
145 151 > EOF
146 152
147 153 $ hg -R limit debugrevlogindex -f1 -c
148 154 rev flag size link p1 p2 nodeid
149 155 0 0000 62 0 2 -1 7c31755bf9b5
150 156 1 0000 65 1 0 2 26333235a41c
151 157
152 158 $ hg -R limit debugdeltachain -c
153 159 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio
154 160 0 1 1 -1 base 63 62 63 1.01613 63 0 0.00000
155 161 1 2 1 -1 base 66 65 66 1.01538 66 0 0.00000
156 162
163 $ hg -R neglimit debugrevlogindex -f1 -c
164 rev flag size link p1 p2 nodeid
165 0 0000 62 0 -2 -1 7c31755bf9b5
166 1 0000 65 1 0 -2 26333235a41c
167
157 168 $ hg -R segv debugrevlogindex -f1 -c
158 169 rev flag size link p1 p2 nodeid
159 170 0 0000 62 0 65536 -1 7c31755bf9b5
160 171 1 0000 65 1 0 65536 26333235a41c
161 172
162 173 $ hg -R segv debugdeltachain -c
163 174 rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio
164 175 0 1 1 -1 base 63 62 63 1.01613 63 0 0.00000
165 176 1 2 1 -1 base 66 65 66 1.01538 66 0 0.00000
166 177
167 178 $ cat <<EOF > test.py
168 179 > from __future__ import print_function
169 180 > import sys
170 181 > from mercurial import changelog, vfs
171 182 > cl = changelog.changelog(vfs.vfs(sys.argv[1]))
172 183 > n0, n1 = cl.node(0), cl.node(1)
173 184 > ops = [
174 185 > ('reachableroots',
175 186 > lambda: cl.index.reachableroots2(0, [1], [0], False)),
176 187 > ('compute_phases_map_sets', lambda: cl.computephases([[0], []])),
177 188 > ('index_headrevs', lambda: cl.headrevs()),
178 189 > ('find_gca_candidates', lambda: cl.commonancestorsheads(n0, n1)),
179 190 > ('find_deepest', lambda: cl.ancestor(n0, n1)),
180 191 > ]
181 192 > for l, f in ops:
182 193 > print(l + ':', end=' ')
183 194 > try:
184 195 > f()
185 196 > print('uncaught buffer overflow?')
186 197 > except ValueError as inst:
187 198 > print(inst)
188 199 > EOF
189 200
190 201 $ "$PYTHON" test.py limit/.hg/store
191 202 reachableroots: parent out of range
192 203 compute_phases_map_sets: parent out of range
193 204 index_headrevs: parent out of range
194 205 find_gca_candidates: parent out of range
195 206 find_deepest: parent out of range
207 $ "$PYTHON" test.py neglimit/.hg/store
208 reachableroots: parent out of range
209 compute_phases_map_sets: parent out of range
210 index_headrevs: parent out of range
211 find_gca_candidates: parent out of range
212 find_deepest: parent out of range
196 213 $ "$PYTHON" test.py segv/.hg/store
197 214 reachableroots: parent out of range
198 215 compute_phases_map_sets: parent out of range
199 216 index_headrevs: parent out of range
200 217 find_gca_candidates: parent out of range
201 218 find_deepest: parent out of range
202 219
203 220 $ cd ..
204 221
205 222 #endif
General Comments 0
You need to be logged in to leave comments. Login now