##// END OF EJS Templates
index: handle index[-1] as nullid more explicitly...
Martin von Zweigbergk -
r38883:f3d394ea default
parent child Browse files
Show More
@@ -1,2184 +1,2184
1 1 /*
2 2 parsers.c - efficient content parsing
3 3
4 4 Copyright 2008 Matt Mackall <mpm@selenic.com> and others
5 5
6 6 This software may be used and distributed according to the terms of
7 7 the GNU General Public License, incorporated herein by reference.
8 8 */
9 9
10 10 #include <Python.h>
11 11 #include <assert.h>
12 12 #include <ctype.h>
13 13 #include <stddef.h>
14 14 #include <string.h>
15 15
16 16 #include "bitmanipulation.h"
17 17 #include "charencode.h"
18 18 #include "util.h"
19 19
20 20 #ifdef IS_PY3K
21 21 /* The mapping of Python types is meant to be temporary to get Python
22 22 * 3 to compile. We should remove this once Python 3 support is fully
23 23 * supported and proper types are used in the extensions themselves. */
24 24 #define PyInt_Check PyLong_Check
25 25 #define PyInt_FromLong PyLong_FromLong
26 26 #define PyInt_FromSsize_t PyLong_FromSsize_t
27 27 #define PyInt_AS_LONG PyLong_AS_LONG
28 28 #define PyInt_AsLong PyLong_AsLong
29 29 #endif
30 30
31 31 /*
32 32 * A base-16 trie for fast node->rev mapping.
33 33 *
34 34 * Positive value is index of the next node in the trie
35 35 * Negative value is a leaf: -(rev + 2)
36 36 * Zero is empty
37 37 */
38 38 typedef struct {
39 39 int children[16];
40 40 } nodetree;
41 41
42 42 /*
43 43 * This class has two behaviors.
44 44 *
45 45 * When used in a list-like way (with integer keys), we decode an
46 46 * entry in a RevlogNG index file on demand. Our last entry is a
47 47 * sentinel, always a nullid. We have limited support for
48 48 * integer-keyed insert and delete, only at elements right before the
49 49 * sentinel.
50 50 *
51 51 * With string keys, we lazily perform a reverse mapping from node to
52 52 * rev, using a base-16 trie.
53 53 */
54 54 typedef struct {
55 55 PyObject_HEAD
56 56 /* Type-specific fields go here. */
57 57 PyObject *data; /* raw bytes of index */
58 58 Py_buffer buf; /* buffer of data */
59 59 PyObject **cache; /* cached tuples */
60 60 const char **offsets; /* populated on demand */
61 61 Py_ssize_t raw_length; /* original number of elements */
62 62 Py_ssize_t length; /* current number of elements */
63 63 PyObject *added; /* populated on demand */
64 64 PyObject *headrevs; /* cache, invalidated on changes */
65 65 PyObject *filteredrevs;/* filtered revs set */
66 66 nodetree *nt; /* base-16 trie */
67 67 unsigned ntlength; /* # nodes in use */
68 68 unsigned ntcapacity; /* # nodes allocated */
69 69 int ntdepth; /* maximum depth of tree */
70 70 int ntsplits; /* # splits performed */
71 71 int ntrev; /* last rev scanned */
72 72 int ntlookups; /* # lookups */
73 73 int ntmisses; /* # lookups that miss the cache */
74 74 int inlined;
75 75 } indexObject;
76 76
77 77 static Py_ssize_t index_length(const indexObject *self)
78 78 {
79 79 if (self->added == NULL)
80 80 return self->length;
81 81 return self->length + PyList_GET_SIZE(self->added);
82 82 }
83 83
84 84 static PyObject *nullentry;
85 85 static const char nullid[20];
86 86
87 87 static Py_ssize_t inline_scan(indexObject *self, const char **offsets);
88 88
89 89 #if LONG_MAX == 0x7fffffffL
90 90 static const char *const tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
91 91 #else
92 92 static const char *const tuple_format = PY23("kiiiiiis#", "kiiiiiiy#");
93 93 #endif
94 94
95 95 /* A RevlogNG v1 index entry is 64 bytes long. */
96 96 static const long v1_hdrsize = 64;
97 97
98 98 /*
99 99 * Return a pointer to the beginning of a RevlogNG record.
100 100 */
101 101 static const char *index_deref(indexObject *self, Py_ssize_t pos)
102 102 {
103 103 if (self->inlined && pos > 0) {
104 104 if (self->offsets == NULL) {
105 105 self->offsets = PyMem_Malloc(self->raw_length *
106 106 sizeof(*self->offsets));
107 107 if (self->offsets == NULL)
108 108 return (const char *)PyErr_NoMemory();
109 109 inline_scan(self, self->offsets);
110 110 }
111 111 return self->offsets[pos];
112 112 }
113 113
114 114 return (const char *)(self->buf.buf) + pos * v1_hdrsize;
115 115 }
116 116
117 117 static inline int index_get_parents(indexObject *self, Py_ssize_t rev,
118 118 int *ps, int maxrev)
119 119 {
120 120 if (rev >= self->length - 1) {
121 121 PyObject *tuple = PyList_GET_ITEM(self->added,
122 122 rev - self->length + 1);
123 123 ps[0] = (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 5));
124 124 ps[1] = (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 6));
125 125 } else {
126 126 const char *data = index_deref(self, rev);
127 127 ps[0] = getbe32(data + 24);
128 128 ps[1] = getbe32(data + 28);
129 129 }
130 130 /* If index file is corrupted, ps[] may point to invalid revisions. So
131 131 * there is a risk of buffer overflow to trust them unconditionally. */
132 132 if (ps[0] > maxrev || ps[1] > maxrev) {
133 133 PyErr_SetString(PyExc_ValueError, "parent out of range");
134 134 return -1;
135 135 }
136 136 return 0;
137 137 }
138 138
139 139
140 140 /*
141 141 * RevlogNG format (all in big endian, data may be inlined):
142 142 * 6 bytes: offset
143 143 * 2 bytes: flags
144 144 * 4 bytes: compressed length
145 145 * 4 bytes: uncompressed length
146 146 * 4 bytes: base revision
147 147 * 4 bytes: link revision
148 148 * 4 bytes: parent 1 revision
149 149 * 4 bytes: parent 2 revision
150 150 * 32 bytes: nodeid (only 20 bytes used)
151 151 */
152 152 static PyObject *index_get(indexObject *self, Py_ssize_t pos)
153 153 {
154 154 uint64_t offset_flags;
155 155 int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
156 156 const char *c_node_id;
157 157 const char *data;
158 158 Py_ssize_t length = index_length(self);
159 159 PyObject *entry;
160 160
161 if (pos == -1 || pos == length - 1) {
162 Py_INCREF(nullentry);
163 return nullentry;
164 }
165
161 166 if (pos < 0)
162 167 pos += length;
163 168
164 169 if (pos < 0 || pos >= length) {
165 170 PyErr_SetString(PyExc_IndexError, "revlog index out of range");
166 171 return NULL;
167 172 }
168 173
169 if (pos == length - 1) {
170 Py_INCREF(nullentry);
171 return nullentry;
172 }
173
174 174 if (pos >= self->length - 1) {
175 175 PyObject *obj;
176 176 obj = PyList_GET_ITEM(self->added, pos - self->length + 1);
177 177 Py_INCREF(obj);
178 178 return obj;
179 179 }
180 180
181 181 if (self->cache) {
182 182 if (self->cache[pos]) {
183 183 Py_INCREF(self->cache[pos]);
184 184 return self->cache[pos];
185 185 }
186 186 } else {
187 187 self->cache = calloc(self->raw_length, sizeof(PyObject *));
188 188 if (self->cache == NULL)
189 189 return PyErr_NoMemory();
190 190 }
191 191
192 192 data = index_deref(self, pos);
193 193 if (data == NULL)
194 194 return NULL;
195 195
196 196 offset_flags = getbe32(data + 4);
197 197 if (pos == 0) /* mask out version number for the first entry */
198 198 offset_flags &= 0xFFFF;
199 199 else {
200 200 uint32_t offset_high = getbe32(data);
201 201 offset_flags |= ((uint64_t)offset_high) << 32;
202 202 }
203 203
204 204 comp_len = getbe32(data + 8);
205 205 uncomp_len = getbe32(data + 12);
206 206 base_rev = getbe32(data + 16);
207 207 link_rev = getbe32(data + 20);
208 208 parent_1 = getbe32(data + 24);
209 209 parent_2 = getbe32(data + 28);
210 210 c_node_id = data + 32;
211 211
212 212 entry = Py_BuildValue(tuple_format, offset_flags, comp_len,
213 213 uncomp_len, base_rev, link_rev,
214 214 parent_1, parent_2, c_node_id, 20);
215 215
216 216 if (entry) {
217 217 PyObject_GC_UnTrack(entry);
218 218 Py_INCREF(entry);
219 219 }
220 220
221 221 self->cache[pos] = entry;
222 222
223 223 return entry;
224 224 }
225 225
226 226 /*
227 227 * Return the 20-byte SHA of the node corresponding to the given rev.
228 228 */
229 229 static const char *index_node(indexObject *self, Py_ssize_t pos)
230 230 {
231 231 Py_ssize_t length = index_length(self);
232 232 const char *data;
233 233
234 234 if (pos == length - 1 || pos == -1)
235 235 return nullid;
236 236
237 237 if (pos >= length)
238 238 return NULL;
239 239
240 240 if (pos >= self->length - 1) {
241 241 PyObject *tuple, *str;
242 242 tuple = PyList_GET_ITEM(self->added, pos - self->length + 1);
243 243 str = PyTuple_GetItem(tuple, 7);
244 244 return str ? PyBytes_AS_STRING(str) : NULL;
245 245 }
246 246
247 247 data = index_deref(self, pos);
248 248 return data ? data + 32 : NULL;
249 249 }
250 250
251 251 /*
252 252 * Return the 20-byte SHA of the node corresponding to the given rev. The
253 253 * rev is assumed to be existing. If not, an exception is set.
254 254 */
255 255 static const char *index_node_existing(indexObject *self, Py_ssize_t pos)
256 256 {
257 257 const char *node = index_node(self, pos);
258 258 if (node == NULL) {
259 259 PyErr_Format(PyExc_IndexError, "could not access rev %d",
260 260 (int)pos);
261 261 }
262 262 return node;
263 263 }
264 264
265 265 static int nt_insert(indexObject *self, const char *node, int rev);
266 266
267 267 static int node_check(PyObject *obj, char **node)
268 268 {
269 269 Py_ssize_t nodelen;
270 270 if (PyBytes_AsStringAndSize(obj, node, &nodelen) == -1)
271 271 return -1;
272 272 if (nodelen == 20)
273 273 return 0;
274 274 PyErr_SetString(PyExc_ValueError, "20-byte hash required");
275 275 return -1;
276 276 }
277 277
278 278 static PyObject *index_insert(indexObject *self, PyObject *args)
279 279 {
280 280 PyObject *obj;
281 281 char *node;
282 282 int index;
283 283 Py_ssize_t len;
284 284
285 285 if (!PyArg_ParseTuple(args, "iO", &index, &obj))
286 286 return NULL;
287 287
288 288 if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj) != 8) {
289 289 PyErr_SetString(PyExc_TypeError, "8-tuple required");
290 290 return NULL;
291 291 }
292 292
293 293 if (node_check(PyTuple_GET_ITEM(obj, 7), &node) == -1)
294 294 return NULL;
295 295
296 296 len = index_length(self);
297 297
298 298 if (index < 0)
299 299 index += len;
300 300
301 301 if (index != len - 1) {
302 302 PyErr_SetString(PyExc_IndexError,
303 303 "insert only supported at index -1");
304 304 return NULL;
305 305 }
306 306
307 307 if (self->added == NULL) {
308 308 self->added = PyList_New(0);
309 309 if (self->added == NULL)
310 310 return NULL;
311 311 }
312 312
313 313 if (PyList_Append(self->added, obj) == -1)
314 314 return NULL;
315 315
316 316 if (self->nt)
317 317 nt_insert(self, node, index);
318 318
319 319 Py_CLEAR(self->headrevs);
320 320 Py_RETURN_NONE;
321 321 }
322 322
323 323 static void _index_clearcaches(indexObject *self)
324 324 {
325 325 if (self->cache) {
326 326 Py_ssize_t i;
327 327
328 328 for (i = 0; i < self->raw_length; i++)
329 329 Py_CLEAR(self->cache[i]);
330 330 free(self->cache);
331 331 self->cache = NULL;
332 332 }
333 333 if (self->offsets) {
334 334 PyMem_Free(self->offsets);
335 335 self->offsets = NULL;
336 336 }
337 337 free(self->nt);
338 338 self->nt = NULL;
339 339 Py_CLEAR(self->headrevs);
340 340 }
341 341
342 342 static PyObject *index_clearcaches(indexObject *self)
343 343 {
344 344 _index_clearcaches(self);
345 345 self->ntlength = self->ntcapacity = 0;
346 346 self->ntdepth = self->ntsplits = 0;
347 347 self->ntrev = -1;
348 348 self->ntlookups = self->ntmisses = 0;
349 349 Py_RETURN_NONE;
350 350 }
351 351
352 352 static PyObject *index_stats(indexObject *self)
353 353 {
354 354 PyObject *obj = PyDict_New();
355 355 PyObject *t = NULL;
356 356
357 357 if (obj == NULL)
358 358 return NULL;
359 359
360 360 #define istat(__n, __d) \
361 361 do { \
362 362 t = PyInt_FromSsize_t(self->__n); \
363 363 if (!t) \
364 364 goto bail; \
365 365 if (PyDict_SetItemString(obj, __d, t) == -1) \
366 366 goto bail; \
367 367 Py_DECREF(t); \
368 368 } while (0)
369 369
370 370 if (self->added) {
371 371 Py_ssize_t len = PyList_GET_SIZE(self->added);
372 372 t = PyInt_FromSsize_t(len);
373 373 if (!t)
374 374 goto bail;
375 375 if (PyDict_SetItemString(obj, "index entries added", t) == -1)
376 376 goto bail;
377 377 Py_DECREF(t);
378 378 }
379 379
380 380 if (self->raw_length != self->length - 1)
381 381 istat(raw_length, "revs on disk");
382 382 istat(length, "revs in memory");
383 383 istat(ntcapacity, "node trie capacity");
384 384 istat(ntdepth, "node trie depth");
385 385 istat(ntlength, "node trie count");
386 386 istat(ntlookups, "node trie lookups");
387 387 istat(ntmisses, "node trie misses");
388 388 istat(ntrev, "node trie last rev scanned");
389 389 istat(ntsplits, "node trie splits");
390 390
391 391 #undef istat
392 392
393 393 return obj;
394 394
395 395 bail:
396 396 Py_XDECREF(obj);
397 397 Py_XDECREF(t);
398 398 return NULL;
399 399 }
400 400
401 401 /*
402 402 * When we cache a list, we want to be sure the caller can't mutate
403 403 * the cached copy.
404 404 */
405 405 static PyObject *list_copy(PyObject *list)
406 406 {
407 407 Py_ssize_t len = PyList_GET_SIZE(list);
408 408 PyObject *newlist = PyList_New(len);
409 409 Py_ssize_t i;
410 410
411 411 if (newlist == NULL)
412 412 return NULL;
413 413
414 414 for (i = 0; i < len; i++) {
415 415 PyObject *obj = PyList_GET_ITEM(list, i);
416 416 Py_INCREF(obj);
417 417 PyList_SET_ITEM(newlist, i, obj);
418 418 }
419 419
420 420 return newlist;
421 421 }
422 422
423 423 static int check_filter(PyObject *filter, Py_ssize_t arg)
424 424 {
425 425 if (filter) {
426 426 PyObject *arglist, *result;
427 427 int isfiltered;
428 428
429 429 arglist = Py_BuildValue("(n)", arg);
430 430 if (!arglist) {
431 431 return -1;
432 432 }
433 433
434 434 result = PyEval_CallObject(filter, arglist);
435 435 Py_DECREF(arglist);
436 436 if (!result) {
437 437 return -1;
438 438 }
439 439
440 440 /* PyObject_IsTrue returns 1 if true, 0 if false, -1 if error,
441 441 * same as this function, so we can just return it directly.*/
442 442 isfiltered = PyObject_IsTrue(result);
443 443 Py_DECREF(result);
444 444 return isfiltered;
445 445 } else {
446 446 return 0;
447 447 }
448 448 }
449 449
450 450 static Py_ssize_t add_roots_get_min(indexObject *self, PyObject *list,
451 451 Py_ssize_t marker, char *phases)
452 452 {
453 453 PyObject *iter = NULL;
454 454 PyObject *iter_item = NULL;
455 455 Py_ssize_t min_idx = index_length(self) + 1;
456 456 long iter_item_long;
457 457
458 458 if (PyList_GET_SIZE(list) != 0) {
459 459 iter = PyObject_GetIter(list);
460 460 if (iter == NULL)
461 461 return -2;
462 462 while ((iter_item = PyIter_Next(iter))) {
463 463 iter_item_long = PyInt_AS_LONG(iter_item);
464 464 Py_DECREF(iter_item);
465 465 if (iter_item_long < min_idx)
466 466 min_idx = iter_item_long;
467 467 phases[iter_item_long] = marker;
468 468 }
469 469 Py_DECREF(iter);
470 470 }
471 471
472 472 return min_idx;
473 473 }
474 474
475 475 static inline void set_phase_from_parents(char *phases, int parent_1,
476 476 int parent_2, Py_ssize_t i)
477 477 {
478 478 if (parent_1 >= 0 && phases[parent_1] > phases[i])
479 479 phases[i] = phases[parent_1];
480 480 if (parent_2 >= 0 && phases[parent_2] > phases[i])
481 481 phases[i] = phases[parent_2];
482 482 }
483 483
484 484 static PyObject *reachableroots2(indexObject *self, PyObject *args)
485 485 {
486 486
487 487 /* Input */
488 488 long minroot;
489 489 PyObject *includepatharg = NULL;
490 490 int includepath = 0;
491 491 /* heads and roots are lists */
492 492 PyObject *heads = NULL;
493 493 PyObject *roots = NULL;
494 494 PyObject *reachable = NULL;
495 495
496 496 PyObject *val;
497 497 Py_ssize_t len = index_length(self) - 1;
498 498 long revnum;
499 499 Py_ssize_t k;
500 500 Py_ssize_t i;
501 501 Py_ssize_t l;
502 502 int r;
503 503 int parents[2];
504 504
505 505 /* Internal data structure:
506 506 * tovisit: array of length len+1 (all revs + nullrev), filled upto lentovisit
507 507 * revstates: array of length len+1 (all revs + nullrev) */
508 508 int *tovisit = NULL;
509 509 long lentovisit = 0;
510 510 enum { RS_SEEN = 1, RS_ROOT = 2, RS_REACHABLE = 4 };
511 511 char *revstates = NULL;
512 512
513 513 /* Get arguments */
514 514 if (!PyArg_ParseTuple(args, "lO!O!O!", &minroot, &PyList_Type, &heads,
515 515 &PyList_Type, &roots,
516 516 &PyBool_Type, &includepatharg))
517 517 goto bail;
518 518
519 519 if (includepatharg == Py_True)
520 520 includepath = 1;
521 521
522 522 /* Initialize return set */
523 523 reachable = PyList_New(0);
524 524 if (reachable == NULL)
525 525 goto bail;
526 526
527 527 /* Initialize internal datastructures */
528 528 tovisit = (int *)malloc((len + 1) * sizeof(int));
529 529 if (tovisit == NULL) {
530 530 PyErr_NoMemory();
531 531 goto bail;
532 532 }
533 533
534 534 revstates = (char *)calloc(len + 1, 1);
535 535 if (revstates == NULL) {
536 536 PyErr_NoMemory();
537 537 goto bail;
538 538 }
539 539
540 540 l = PyList_GET_SIZE(roots);
541 541 for (i = 0; i < l; i++) {
542 542 revnum = PyInt_AsLong(PyList_GET_ITEM(roots, i));
543 543 if (revnum == -1 && PyErr_Occurred())
544 544 goto bail;
545 545 /* If root is out of range, e.g. wdir(), it must be unreachable
546 546 * from heads. So we can just ignore it. */
547 547 if (revnum + 1 < 0 || revnum + 1 >= len + 1)
548 548 continue;
549 549 revstates[revnum + 1] |= RS_ROOT;
550 550 }
551 551
552 552 /* Populate tovisit with all the heads */
553 553 l = PyList_GET_SIZE(heads);
554 554 for (i = 0; i < l; i++) {
555 555 revnum = PyInt_AsLong(PyList_GET_ITEM(heads, i));
556 556 if (revnum == -1 && PyErr_Occurred())
557 557 goto bail;
558 558 if (revnum + 1 < 0 || revnum + 1 >= len + 1) {
559 559 PyErr_SetString(PyExc_IndexError, "head out of range");
560 560 goto bail;
561 561 }
562 562 if (!(revstates[revnum + 1] & RS_SEEN)) {
563 563 tovisit[lentovisit++] = (int)revnum;
564 564 revstates[revnum + 1] |= RS_SEEN;
565 565 }
566 566 }
567 567
568 568 /* Visit the tovisit list and find the reachable roots */
569 569 k = 0;
570 570 while (k < lentovisit) {
571 571 /* Add the node to reachable if it is a root*/
572 572 revnum = tovisit[k++];
573 573 if (revstates[revnum + 1] & RS_ROOT) {
574 574 revstates[revnum + 1] |= RS_REACHABLE;
575 575 val = PyInt_FromLong(revnum);
576 576 if (val == NULL)
577 577 goto bail;
578 578 r = PyList_Append(reachable, val);
579 579 Py_DECREF(val);
580 580 if (r < 0)
581 581 goto bail;
582 582 if (includepath == 0)
583 583 continue;
584 584 }
585 585
586 586 /* Add its parents to the list of nodes to visit */
587 587 if (revnum == -1)
588 588 continue;
589 589 r = index_get_parents(self, revnum, parents, (int)len - 1);
590 590 if (r < 0)
591 591 goto bail;
592 592 for (i = 0; i < 2; i++) {
593 593 if (!(revstates[parents[i] + 1] & RS_SEEN)
594 594 && parents[i] >= minroot) {
595 595 tovisit[lentovisit++] = parents[i];
596 596 revstates[parents[i] + 1] |= RS_SEEN;
597 597 }
598 598 }
599 599 }
600 600
601 601 /* Find all the nodes in between the roots we found and the heads
602 602 * and add them to the reachable set */
603 603 if (includepath == 1) {
604 604 long minidx = minroot;
605 605 if (minidx < 0)
606 606 minidx = 0;
607 607 for (i = minidx; i < len; i++) {
608 608 if (!(revstates[i + 1] & RS_SEEN))
609 609 continue;
610 610 r = index_get_parents(self, i, parents, (int)len - 1);
611 611 /* Corrupted index file, error is set from
612 612 * index_get_parents */
613 613 if (r < 0)
614 614 goto bail;
615 615 if (((revstates[parents[0] + 1] |
616 616 revstates[parents[1] + 1]) & RS_REACHABLE)
617 617 && !(revstates[i + 1] & RS_REACHABLE)) {
618 618 revstates[i + 1] |= RS_REACHABLE;
619 619 val = PyInt_FromLong(i);
620 620 if (val == NULL)
621 621 goto bail;
622 622 r = PyList_Append(reachable, val);
623 623 Py_DECREF(val);
624 624 if (r < 0)
625 625 goto bail;
626 626 }
627 627 }
628 628 }
629 629
630 630 free(revstates);
631 631 free(tovisit);
632 632 return reachable;
633 633 bail:
634 634 Py_XDECREF(reachable);
635 635 free(revstates);
636 636 free(tovisit);
637 637 return NULL;
638 638 }
639 639
640 640 static PyObject *compute_phases_map_sets(indexObject *self, PyObject *args)
641 641 {
642 642 PyObject *roots = Py_None;
643 643 PyObject *ret = NULL;
644 644 PyObject *phasessize = NULL;
645 645 PyObject *phaseroots = NULL;
646 646 PyObject *phaseset = NULL;
647 647 PyObject *phasessetlist = NULL;
648 648 PyObject *rev = NULL;
649 649 Py_ssize_t len = index_length(self) - 1;
650 650 Py_ssize_t numphase = 0;
651 651 Py_ssize_t minrevallphases = 0;
652 652 Py_ssize_t minrevphase = 0;
653 653 Py_ssize_t i = 0;
654 654 char *phases = NULL;
655 655 long phase;
656 656
657 657 if (!PyArg_ParseTuple(args, "O", &roots))
658 658 goto done;
659 659 if (roots == NULL || !PyList_Check(roots)) {
660 660 PyErr_SetString(PyExc_TypeError, "roots must be a list");
661 661 goto done;
662 662 }
663 663
664 664 phases = calloc(len, 1); /* phase per rev: {0: public, 1: draft, 2: secret} */
665 665 if (phases == NULL) {
666 666 PyErr_NoMemory();
667 667 goto done;
668 668 }
669 669 /* Put the phase information of all the roots in phases */
670 670 numphase = PyList_GET_SIZE(roots)+1;
671 671 minrevallphases = len + 1;
672 672 phasessetlist = PyList_New(numphase);
673 673 if (phasessetlist == NULL)
674 674 goto done;
675 675
676 676 PyList_SET_ITEM(phasessetlist, 0, Py_None);
677 677 Py_INCREF(Py_None);
678 678
679 679 for (i = 0; i < numphase-1; i++) {
680 680 phaseroots = PyList_GET_ITEM(roots, i);
681 681 phaseset = PySet_New(NULL);
682 682 if (phaseset == NULL)
683 683 goto release;
684 684 PyList_SET_ITEM(phasessetlist, i+1, phaseset);
685 685 if (!PyList_Check(phaseroots)) {
686 686 PyErr_SetString(PyExc_TypeError,
687 687 "roots item must be a list");
688 688 goto release;
689 689 }
690 690 minrevphase = add_roots_get_min(self, phaseroots, i+1, phases);
691 691 if (minrevphase == -2) /* Error from add_roots_get_min */
692 692 goto release;
693 693 minrevallphases = MIN(minrevallphases, minrevphase);
694 694 }
695 695 /* Propagate the phase information from the roots to the revs */
696 696 if (minrevallphases != -1) {
697 697 int parents[2];
698 698 for (i = minrevallphases; i < len; i++) {
699 699 if (index_get_parents(self, i, parents,
700 700 (int)len - 1) < 0)
701 701 goto release;
702 702 set_phase_from_parents(phases, parents[0], parents[1], i);
703 703 }
704 704 }
705 705 /* Transform phase list to a python list */
706 706 phasessize = PyInt_FromLong(len);
707 707 if (phasessize == NULL)
708 708 goto release;
709 709 for (i = 0; i < len; i++) {
710 710 phase = phases[i];
711 711 /* We only store the sets of phase for non public phase, the public phase
712 712 * is computed as a difference */
713 713 if (phase != 0) {
714 714 phaseset = PyList_GET_ITEM(phasessetlist, phase);
715 715 rev = PyInt_FromLong(i);
716 716 if (rev == NULL)
717 717 goto release;
718 718 PySet_Add(phaseset, rev);
719 719 Py_XDECREF(rev);
720 720 }
721 721 }
722 722 ret = PyTuple_Pack(2, phasessize, phasessetlist);
723 723
724 724 release:
725 725 Py_XDECREF(phasessize);
726 726 Py_XDECREF(phasessetlist);
727 727 done:
728 728 free(phases);
729 729 return ret;
730 730 }
731 731
732 732 static PyObject *index_headrevs(indexObject *self, PyObject *args)
733 733 {
734 734 Py_ssize_t i, j, len;
735 735 char *nothead = NULL;
736 736 PyObject *heads = NULL;
737 737 PyObject *filter = NULL;
738 738 PyObject *filteredrevs = Py_None;
739 739
740 740 if (!PyArg_ParseTuple(args, "|O", &filteredrevs)) {
741 741 return NULL;
742 742 }
743 743
744 744 if (self->headrevs && filteredrevs == self->filteredrevs)
745 745 return list_copy(self->headrevs);
746 746
747 747 Py_DECREF(self->filteredrevs);
748 748 self->filteredrevs = filteredrevs;
749 749 Py_INCREF(filteredrevs);
750 750
751 751 if (filteredrevs != Py_None) {
752 752 filter = PyObject_GetAttrString(filteredrevs, "__contains__");
753 753 if (!filter) {
754 754 PyErr_SetString(PyExc_TypeError,
755 755 "filteredrevs has no attribute __contains__");
756 756 goto bail;
757 757 }
758 758 }
759 759
760 760 len = index_length(self) - 1;
761 761 heads = PyList_New(0);
762 762 if (heads == NULL)
763 763 goto bail;
764 764 if (len == 0) {
765 765 PyObject *nullid = PyInt_FromLong(-1);
766 766 if (nullid == NULL || PyList_Append(heads, nullid) == -1) {
767 767 Py_XDECREF(nullid);
768 768 goto bail;
769 769 }
770 770 goto done;
771 771 }
772 772
773 773 nothead = calloc(len, 1);
774 774 if (nothead == NULL) {
775 775 PyErr_NoMemory();
776 776 goto bail;
777 777 }
778 778
779 779 for (i = len - 1; i >= 0; i--) {
780 780 int isfiltered;
781 781 int parents[2];
782 782
783 783 /* If nothead[i] == 1, it means we've seen an unfiltered child of this
784 784 * node already, and therefore this node is not filtered. So we can skip
785 785 * the expensive check_filter step.
786 786 */
787 787 if (nothead[i] != 1) {
788 788 isfiltered = check_filter(filter, i);
789 789 if (isfiltered == -1) {
790 790 PyErr_SetString(PyExc_TypeError,
791 791 "unable to check filter");
792 792 goto bail;
793 793 }
794 794
795 795 if (isfiltered) {
796 796 nothead[i] = 1;
797 797 continue;
798 798 }
799 799 }
800 800
801 801 if (index_get_parents(self, i, parents, (int)len - 1) < 0)
802 802 goto bail;
803 803 for (j = 0; j < 2; j++) {
804 804 if (parents[j] >= 0)
805 805 nothead[parents[j]] = 1;
806 806 }
807 807 }
808 808
809 809 for (i = 0; i < len; i++) {
810 810 PyObject *head;
811 811
812 812 if (nothead[i])
813 813 continue;
814 814 head = PyInt_FromSsize_t(i);
815 815 if (head == NULL || PyList_Append(heads, head) == -1) {
816 816 Py_XDECREF(head);
817 817 goto bail;
818 818 }
819 819 }
820 820
821 821 done:
822 822 self->headrevs = heads;
823 823 Py_XDECREF(filter);
824 824 free(nothead);
825 825 return list_copy(self->headrevs);
826 826 bail:
827 827 Py_XDECREF(filter);
828 828 Py_XDECREF(heads);
829 829 free(nothead);
830 830 return NULL;
831 831 }
832 832
833 833 /**
834 834 * Obtain the base revision index entry.
835 835 *
836 836 * Callers must ensure that rev >= 0 or illegal memory access may occur.
837 837 */
838 838 static inline int index_baserev(indexObject *self, int rev)
839 839 {
840 840 const char *data;
841 841
842 842 if (rev >= self->length - 1) {
843 843 PyObject *tuple = PyList_GET_ITEM(self->added,
844 844 rev - self->length + 1);
845 845 return (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 3));
846 846 }
847 847 else {
848 848 data = index_deref(self, rev);
849 849 if (data == NULL) {
850 850 return -2;
851 851 }
852 852
853 853 return getbe32(data + 16);
854 854 }
855 855 }
856 856
857 857 static PyObject *index_deltachain(indexObject *self, PyObject *args)
858 858 {
859 859 int rev, generaldelta;
860 860 PyObject *stoparg;
861 861 int stoprev, iterrev, baserev = -1;
862 862 int stopped;
863 863 PyObject *chain = NULL, *result = NULL;
864 864 const Py_ssize_t length = index_length(self);
865 865
866 866 if (!PyArg_ParseTuple(args, "iOi", &rev, &stoparg, &generaldelta)) {
867 867 return NULL;
868 868 }
869 869
870 870 if (PyInt_Check(stoparg)) {
871 871 stoprev = (int)PyInt_AsLong(stoparg);
872 872 if (stoprev == -1 && PyErr_Occurred()) {
873 873 return NULL;
874 874 }
875 875 }
876 876 else if (stoparg == Py_None) {
877 877 stoprev = -2;
878 878 }
879 879 else {
880 880 PyErr_SetString(PyExc_ValueError,
881 881 "stoprev must be integer or None");
882 882 return NULL;
883 883 }
884 884
885 885 if (rev < 0 || rev >= length - 1) {
886 886 PyErr_SetString(PyExc_ValueError, "revlog index out of range");
887 887 return NULL;
888 888 }
889 889
890 890 chain = PyList_New(0);
891 891 if (chain == NULL) {
892 892 return NULL;
893 893 }
894 894
895 895 baserev = index_baserev(self, rev);
896 896
897 897 /* This should never happen. */
898 898 if (baserev <= -2) {
899 899 /* Error should be set by index_deref() */
900 900 assert(PyErr_Occurred());
901 901 goto bail;
902 902 }
903 903
904 904 iterrev = rev;
905 905
906 906 while (iterrev != baserev && iterrev != stoprev) {
907 907 PyObject *value = PyInt_FromLong(iterrev);
908 908 if (value == NULL) {
909 909 goto bail;
910 910 }
911 911 if (PyList_Append(chain, value)) {
912 912 Py_DECREF(value);
913 913 goto bail;
914 914 }
915 915 Py_DECREF(value);
916 916
917 917 if (generaldelta) {
918 918 iterrev = baserev;
919 919 }
920 920 else {
921 921 iterrev--;
922 922 }
923 923
924 924 if (iterrev < 0) {
925 925 break;
926 926 }
927 927
928 928 if (iterrev >= length - 1) {
929 929 PyErr_SetString(PyExc_IndexError, "revision outside index");
930 930 return NULL;
931 931 }
932 932
933 933 baserev = index_baserev(self, iterrev);
934 934
935 935 /* This should never happen. */
936 936 if (baserev <= -2) {
937 937 /* Error should be set by index_deref() */
938 938 assert(PyErr_Occurred());
939 939 goto bail;
940 940 }
941 941 }
942 942
943 943 if (iterrev == stoprev) {
944 944 stopped = 1;
945 945 }
946 946 else {
947 947 PyObject *value = PyInt_FromLong(iterrev);
948 948 if (value == NULL) {
949 949 goto bail;
950 950 }
951 951 if (PyList_Append(chain, value)) {
952 952 Py_DECREF(value);
953 953 goto bail;
954 954 }
955 955 Py_DECREF(value);
956 956
957 957 stopped = 0;
958 958 }
959 959
960 960 if (PyList_Reverse(chain)) {
961 961 goto bail;
962 962 }
963 963
964 964 result = Py_BuildValue("OO", chain, stopped ? Py_True : Py_False);
965 965 Py_DECREF(chain);
966 966 return result;
967 967
968 968 bail:
969 969 Py_DECREF(chain);
970 970 return NULL;
971 971 }
972 972
973 973 static inline int nt_level(const char *node, Py_ssize_t level)
974 974 {
975 975 int v = node[level>>1];
976 976 if (!(level & 1))
977 977 v >>= 4;
978 978 return v & 0xf;
979 979 }
980 980
981 981 /*
982 982 * Return values:
983 983 *
984 984 * -4: match is ambiguous (multiple candidates)
985 985 * -2: not found
986 986 * rest: valid rev
987 987 */
988 988 static int nt_find(indexObject *self, const char *node, Py_ssize_t nodelen,
989 989 int hex)
990 990 {
991 991 int (*getnybble)(const char *, Py_ssize_t) = hex ? hexdigit : nt_level;
992 992 int level, maxlevel, off;
993 993
994 994 if (nodelen == 20 && node[0] == '\0' && memcmp(node, nullid, 20) == 0)
995 995 return -1;
996 996
997 997 if (hex)
998 998 maxlevel = nodelen > 40 ? 40 : (int)nodelen;
999 999 else
1000 1000 maxlevel = nodelen > 20 ? 40 : ((int)nodelen * 2);
1001 1001
1002 1002 for (level = off = 0; level < maxlevel; level++) {
1003 1003 int k = getnybble(node, level);
1004 1004 nodetree *n = &self->nt[off];
1005 1005 int v = n->children[k];
1006 1006
1007 1007 if (v < 0) {
1008 1008 const char *n;
1009 1009 Py_ssize_t i;
1010 1010
1011 1011 v = -(v + 2);
1012 1012 n = index_node(self, v);
1013 1013 if (n == NULL)
1014 1014 return -2;
1015 1015 for (i = level; i < maxlevel; i++)
1016 1016 if (getnybble(node, i) != nt_level(n, i))
1017 1017 return -2;
1018 1018 return v;
1019 1019 }
1020 1020 if (v == 0)
1021 1021 return -2;
1022 1022 off = v;
1023 1023 }
1024 1024 /* multiple matches against an ambiguous prefix */
1025 1025 return -4;
1026 1026 }
1027 1027
1028 1028 static int nt_new(indexObject *self)
1029 1029 {
1030 1030 if (self->ntlength == self->ntcapacity) {
1031 1031 if (self->ntcapacity >= INT_MAX / (sizeof(nodetree) * 2)) {
1032 1032 PyErr_SetString(PyExc_MemoryError,
1033 1033 "overflow in nt_new");
1034 1034 return -1;
1035 1035 }
1036 1036 self->ntcapacity *= 2;
1037 1037 self->nt = realloc(self->nt,
1038 1038 self->ntcapacity * sizeof(nodetree));
1039 1039 if (self->nt == NULL) {
1040 1040 PyErr_SetString(PyExc_MemoryError, "out of memory");
1041 1041 return -1;
1042 1042 }
1043 1043 memset(&self->nt[self->ntlength], 0,
1044 1044 sizeof(nodetree) * (self->ntcapacity - self->ntlength));
1045 1045 }
1046 1046 return self->ntlength++;
1047 1047 }
1048 1048
1049 1049 static int nt_insert(indexObject *self, const char *node, int rev)
1050 1050 {
1051 1051 int level = 0;
1052 1052 int off = 0;
1053 1053
1054 1054 while (level < 40) {
1055 1055 int k = nt_level(node, level);
1056 1056 nodetree *n;
1057 1057 int v;
1058 1058
1059 1059 n = &self->nt[off];
1060 1060 v = n->children[k];
1061 1061
1062 1062 if (v == 0) {
1063 1063 n->children[k] = -rev - 2;
1064 1064 return 0;
1065 1065 }
1066 1066 if (v < 0) {
1067 1067 const char *oldnode = index_node_existing(self, -(v + 2));
1068 1068 int noff;
1069 1069
1070 1070 if (oldnode == NULL)
1071 1071 return -1;
1072 1072 if (!memcmp(oldnode, node, 20)) {
1073 1073 n->children[k] = -rev - 2;
1074 1074 return 0;
1075 1075 }
1076 1076 noff = nt_new(self);
1077 1077 if (noff == -1)
1078 1078 return -1;
1079 1079 /* self->nt may have been changed by realloc */
1080 1080 self->nt[off].children[k] = noff;
1081 1081 off = noff;
1082 1082 n = &self->nt[off];
1083 1083 n->children[nt_level(oldnode, ++level)] = v;
1084 1084 if (level > self->ntdepth)
1085 1085 self->ntdepth = level;
1086 1086 self->ntsplits += 1;
1087 1087 } else {
1088 1088 level += 1;
1089 1089 off = v;
1090 1090 }
1091 1091 }
1092 1092
1093 1093 return -1;
1094 1094 }
1095 1095
1096 1096 static int nt_delete_node(indexObject *self, const char *node)
1097 1097 {
1098 1098 /* rev==-2 happens to get encoded as 0, which is interpreted as not set */
1099 1099 return nt_insert(self, node, -2);
1100 1100 }
1101 1101
1102 1102 static int nt_init(indexObject *self)
1103 1103 {
1104 1104 if (self->nt == NULL) {
1105 1105 if ((size_t)self->raw_length > INT_MAX / sizeof(nodetree)) {
1106 1106 PyErr_SetString(PyExc_ValueError, "overflow in nt_init");
1107 1107 return -1;
1108 1108 }
1109 1109 self->ntcapacity = self->raw_length < 4
1110 1110 ? 4 : (int)self->raw_length / 2;
1111 1111
1112 1112 self->nt = calloc(self->ntcapacity, sizeof(nodetree));
1113 1113 if (self->nt == NULL) {
1114 1114 PyErr_NoMemory();
1115 1115 return -1;
1116 1116 }
1117 1117 self->ntlength = 1;
1118 1118 self->ntrev = (int)index_length(self) - 1;
1119 1119 self->ntlookups = 1;
1120 1120 self->ntmisses = 0;
1121 1121 if (nt_insert(self, nullid, -1) == -1) {
1122 1122 free(self->nt);
1123 1123 self->nt = NULL;
1124 1124 return -1;
1125 1125 }
1126 1126 }
1127 1127 return 0;
1128 1128 }
1129 1129
1130 1130 /*
1131 1131 * Return values:
1132 1132 *
1133 1133 * -3: error (exception set)
1134 1134 * -2: not found (no exception set)
1135 1135 * rest: valid rev
1136 1136 */
1137 1137 static int index_find_node(indexObject *self,
1138 1138 const char *node, Py_ssize_t nodelen)
1139 1139 {
1140 1140 int rev;
1141 1141
1142 1142 if (nt_init(self) == -1)
1143 1143 return -3;
1144 1144
1145 1145 self->ntlookups++;
1146 1146 rev = nt_find(self, node, nodelen, 0);
1147 1147 if (rev >= -1)
1148 1148 return rev;
1149 1149
1150 1150 /*
1151 1151 * For the first handful of lookups, we scan the entire index,
1152 1152 * and cache only the matching nodes. This optimizes for cases
1153 1153 * like "hg tip", where only a few nodes are accessed.
1154 1154 *
1155 1155 * After that, we cache every node we visit, using a single
1156 1156 * scan amortized over multiple lookups. This gives the best
1157 1157 * bulk performance, e.g. for "hg log".
1158 1158 */
1159 1159 if (self->ntmisses++ < 4) {
1160 1160 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1161 1161 const char *n = index_node_existing(self, rev);
1162 1162 if (n == NULL)
1163 1163 return -3;
1164 1164 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1165 1165 if (nt_insert(self, n, rev) == -1)
1166 1166 return -3;
1167 1167 break;
1168 1168 }
1169 1169 }
1170 1170 } else {
1171 1171 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1172 1172 const char *n = index_node_existing(self, rev);
1173 1173 if (n == NULL)
1174 1174 return -3;
1175 1175 if (nt_insert(self, n, rev) == -1) {
1176 1176 self->ntrev = rev + 1;
1177 1177 return -3;
1178 1178 }
1179 1179 if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
1180 1180 break;
1181 1181 }
1182 1182 }
1183 1183 self->ntrev = rev;
1184 1184 }
1185 1185
1186 1186 if (rev >= 0)
1187 1187 return rev;
1188 1188 return -2;
1189 1189 }
1190 1190
1191 1191 static void raise_revlog_error(void)
1192 1192 {
1193 1193 PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
1194 1194
1195 1195 mod = PyImport_ImportModule("mercurial.error");
1196 1196 if (mod == NULL) {
1197 1197 goto cleanup;
1198 1198 }
1199 1199
1200 1200 dict = PyModule_GetDict(mod);
1201 1201 if (dict == NULL) {
1202 1202 goto cleanup;
1203 1203 }
1204 1204 Py_INCREF(dict);
1205 1205
1206 1206 errclass = PyDict_GetItemString(dict, "RevlogError");
1207 1207 if (errclass == NULL) {
1208 1208 PyErr_SetString(PyExc_SystemError,
1209 1209 "could not find RevlogError");
1210 1210 goto cleanup;
1211 1211 }
1212 1212
1213 1213 /* value of exception is ignored by callers */
1214 1214 PyErr_SetString(errclass, "RevlogError");
1215 1215
1216 1216 cleanup:
1217 1217 Py_XDECREF(dict);
1218 1218 Py_XDECREF(mod);
1219 1219 }
1220 1220
1221 1221 static PyObject *index_getitem(indexObject *self, PyObject *value)
1222 1222 {
1223 1223 char *node;
1224 1224 int rev;
1225 1225
1226 1226 if (PyInt_Check(value))
1227 1227 return index_get(self, PyInt_AS_LONG(value));
1228 1228
1229 1229 if (node_check(value, &node) == -1)
1230 1230 return NULL;
1231 1231 rev = index_find_node(self, node, 20);
1232 1232 if (rev >= -1)
1233 1233 return PyInt_FromLong(rev);
1234 1234 if (rev == -2)
1235 1235 raise_revlog_error();
1236 1236 return NULL;
1237 1237 }
1238 1238
1239 1239 /*
1240 1240 * Fully populate the radix tree.
1241 1241 */
1242 1242 static int nt_populate(indexObject *self) {
1243 1243 int rev;
1244 1244 if (self->ntrev > 0) {
1245 1245 for (rev = self->ntrev - 1; rev >= 0; rev--) {
1246 1246 const char *n = index_node_existing(self, rev);
1247 1247 if (n == NULL)
1248 1248 return -1;
1249 1249 if (nt_insert(self, n, rev) == -1)
1250 1250 return -1;
1251 1251 }
1252 1252 self->ntrev = -1;
1253 1253 }
1254 1254 return 0;
1255 1255 }
1256 1256
1257 1257 static int nt_partialmatch(indexObject *self, const char *node,
1258 1258 Py_ssize_t nodelen)
1259 1259 {
1260 1260 if (nt_init(self) == -1)
1261 1261 return -3;
1262 1262 if (nt_populate(self) == -1)
1263 1263 return -3;
1264 1264
1265 1265 return nt_find(self, node, nodelen, 1);
1266 1266 }
1267 1267
1268 1268 /*
1269 1269 * Find the length of the shortest unique prefix of node.
1270 1270 *
1271 1271 * Return values:
1272 1272 *
1273 1273 * -3: error (exception set)
1274 1274 * -2: not found (no exception set)
1275 1275 * rest: length of shortest prefix
1276 1276 */
1277 1277 static int nt_shortest(indexObject *self, const char *node)
1278 1278 {
1279 1279 int level, off;
1280 1280
1281 1281 if (nt_init(self) == -1)
1282 1282 return -3;
1283 1283 if (nt_populate(self) == -1)
1284 1284 return -3;
1285 1285
1286 1286 for (level = off = 0; level < 40; level++) {
1287 1287 int k, v;
1288 1288 nodetree *n = &self->nt[off];
1289 1289 k = nt_level(node, level);
1290 1290 v = n->children[k];
1291 1291 if (v < 0) {
1292 1292 const char *n;
1293 1293 v = -(v + 2);
1294 1294 n = index_node_existing(self, v);
1295 1295 if (n == NULL)
1296 1296 return -3;
1297 1297 if (memcmp(node, n, 20) != 0)
1298 1298 /*
1299 1299 * Found a unique prefix, but it wasn't for the
1300 1300 * requested node (i.e the requested node does
1301 1301 * not exist).
1302 1302 */
1303 1303 return -2;
1304 1304 return level + 1;
1305 1305 }
1306 1306 if (v == 0)
1307 1307 return -2;
1308 1308 off = v;
1309 1309 }
1310 1310 /*
1311 1311 * The node was still not unique after 40 hex digits, so this won't
1312 1312 * happen. Also, if we get here, then there's a programming error in
1313 1313 * this file that made us insert a node longer than 40 hex digits.
1314 1314 */
1315 1315 PyErr_SetString(PyExc_Exception, "broken node tree");
1316 1316 return -3;
1317 1317 }
1318 1318
1319 1319 static PyObject *index_partialmatch(indexObject *self, PyObject *args)
1320 1320 {
1321 1321 const char *fullnode;
1322 1322 int nodelen;
1323 1323 char *node;
1324 1324 int rev, i;
1325 1325
1326 1326 if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &node, &nodelen))
1327 1327 return NULL;
1328 1328
1329 1329 if (nodelen < 1) {
1330 1330 PyErr_SetString(PyExc_ValueError, "key too short");
1331 1331 return NULL;
1332 1332 }
1333 1333
1334 1334 if (nodelen > 40) {
1335 1335 PyErr_SetString(PyExc_ValueError, "key too long");
1336 1336 return NULL;
1337 1337 }
1338 1338
1339 1339 for (i = 0; i < nodelen; i++)
1340 1340 hexdigit(node, i);
1341 1341 if (PyErr_Occurred()) {
1342 1342 /* input contains non-hex characters */
1343 1343 PyErr_Clear();
1344 1344 Py_RETURN_NONE;
1345 1345 }
1346 1346
1347 1347 rev = nt_partialmatch(self, node, nodelen);
1348 1348
1349 1349 switch (rev) {
1350 1350 case -4:
1351 1351 raise_revlog_error();
1352 1352 case -3:
1353 1353 return NULL;
1354 1354 case -2:
1355 1355 Py_RETURN_NONE;
1356 1356 case -1:
1357 1357 return PyBytes_FromStringAndSize(nullid, 20);
1358 1358 }
1359 1359
1360 1360 fullnode = index_node_existing(self, rev);
1361 1361 if (fullnode == NULL) {
1362 1362 return NULL;
1363 1363 }
1364 1364 return PyBytes_FromStringAndSize(fullnode, 20);
1365 1365 }
1366 1366
1367 1367 static PyObject *index_shortest(indexObject *self, PyObject *args)
1368 1368 {
1369 1369 PyObject *val;
1370 1370 char *node;
1371 1371 int length;
1372 1372
1373 1373 if (!PyArg_ParseTuple(args, "O", &val))
1374 1374 return NULL;
1375 1375 if (node_check(val, &node) == -1)
1376 1376 return NULL;
1377 1377
1378 1378 self->ntlookups++;
1379 1379 length = nt_shortest(self, node);
1380 1380 if (length == -3)
1381 1381 return NULL;
1382 1382 if (length == -2) {
1383 1383 raise_revlog_error();
1384 1384 return NULL;
1385 1385 }
1386 1386 return PyInt_FromLong(length);
1387 1387 }
1388 1388
1389 1389 static PyObject *index_m_get(indexObject *self, PyObject *args)
1390 1390 {
1391 1391 PyObject *val;
1392 1392 char *node;
1393 1393 int rev;
1394 1394
1395 1395 if (!PyArg_ParseTuple(args, "O", &val))
1396 1396 return NULL;
1397 1397 if (node_check(val, &node) == -1)
1398 1398 return NULL;
1399 1399 rev = index_find_node(self, node, 20);
1400 1400 if (rev == -3)
1401 1401 return NULL;
1402 1402 if (rev == -2)
1403 1403 Py_RETURN_NONE;
1404 1404 return PyInt_FromLong(rev);
1405 1405 }
1406 1406
1407 1407 static int index_contains(indexObject *self, PyObject *value)
1408 1408 {
1409 1409 char *node;
1410 1410
1411 1411 if (PyInt_Check(value)) {
1412 1412 long rev = PyInt_AS_LONG(value);
1413 1413 return rev >= -1 && rev < index_length(self);
1414 1414 }
1415 1415
1416 1416 if (node_check(value, &node) == -1)
1417 1417 return -1;
1418 1418
1419 1419 switch (index_find_node(self, node, 20)) {
1420 1420 case -3:
1421 1421 return -1;
1422 1422 case -2:
1423 1423 return 0;
1424 1424 default:
1425 1425 return 1;
1426 1426 }
1427 1427 }
1428 1428
1429 1429 typedef uint64_t bitmask;
1430 1430
1431 1431 /*
1432 1432 * Given a disjoint set of revs, return all candidates for the
1433 1433 * greatest common ancestor. In revset notation, this is the set
1434 1434 * "heads(::a and ::b and ...)"
1435 1435 */
1436 1436 static PyObject *find_gca_candidates(indexObject *self, const int *revs,
1437 1437 int revcount)
1438 1438 {
1439 1439 const bitmask allseen = (1ull << revcount) - 1;
1440 1440 const bitmask poison = 1ull << revcount;
1441 1441 PyObject *gca = PyList_New(0);
1442 1442 int i, v, interesting;
1443 1443 int maxrev = -1;
1444 1444 bitmask sp;
1445 1445 bitmask *seen;
1446 1446
1447 1447 if (gca == NULL)
1448 1448 return PyErr_NoMemory();
1449 1449
1450 1450 for (i = 0; i < revcount; i++) {
1451 1451 if (revs[i] > maxrev)
1452 1452 maxrev = revs[i];
1453 1453 }
1454 1454
1455 1455 seen = calloc(sizeof(*seen), maxrev + 1);
1456 1456 if (seen == NULL) {
1457 1457 Py_DECREF(gca);
1458 1458 return PyErr_NoMemory();
1459 1459 }
1460 1460
1461 1461 for (i = 0; i < revcount; i++)
1462 1462 seen[revs[i]] = 1ull << i;
1463 1463
1464 1464 interesting = revcount;
1465 1465
1466 1466 for (v = maxrev; v >= 0 && interesting; v--) {
1467 1467 bitmask sv = seen[v];
1468 1468 int parents[2];
1469 1469
1470 1470 if (!sv)
1471 1471 continue;
1472 1472
1473 1473 if (sv < poison) {
1474 1474 interesting -= 1;
1475 1475 if (sv == allseen) {
1476 1476 PyObject *obj = PyInt_FromLong(v);
1477 1477 if (obj == NULL)
1478 1478 goto bail;
1479 1479 if (PyList_Append(gca, obj) == -1) {
1480 1480 Py_DECREF(obj);
1481 1481 goto bail;
1482 1482 }
1483 1483 sv |= poison;
1484 1484 for (i = 0; i < revcount; i++) {
1485 1485 if (revs[i] == v)
1486 1486 goto done;
1487 1487 }
1488 1488 }
1489 1489 }
1490 1490 if (index_get_parents(self, v, parents, maxrev) < 0)
1491 1491 goto bail;
1492 1492
1493 1493 for (i = 0; i < 2; i++) {
1494 1494 int p = parents[i];
1495 1495 if (p == -1)
1496 1496 continue;
1497 1497 sp = seen[p];
1498 1498 if (sv < poison) {
1499 1499 if (sp == 0) {
1500 1500 seen[p] = sv;
1501 1501 interesting++;
1502 1502 }
1503 1503 else if (sp != sv)
1504 1504 seen[p] |= sv;
1505 1505 } else {
1506 1506 if (sp && sp < poison)
1507 1507 interesting--;
1508 1508 seen[p] = sv;
1509 1509 }
1510 1510 }
1511 1511 }
1512 1512
1513 1513 done:
1514 1514 free(seen);
1515 1515 return gca;
1516 1516 bail:
1517 1517 free(seen);
1518 1518 Py_XDECREF(gca);
1519 1519 return NULL;
1520 1520 }
1521 1521
1522 1522 /*
1523 1523 * Given a disjoint set of revs, return the subset with the longest
1524 1524 * path to the root.
1525 1525 */
1526 1526 static PyObject *find_deepest(indexObject *self, PyObject *revs)
1527 1527 {
1528 1528 const Py_ssize_t revcount = PyList_GET_SIZE(revs);
1529 1529 static const Py_ssize_t capacity = 24;
1530 1530 int *depth, *interesting = NULL;
1531 1531 int i, j, v, ninteresting;
1532 1532 PyObject *dict = NULL, *keys = NULL;
1533 1533 long *seen = NULL;
1534 1534 int maxrev = -1;
1535 1535 long final;
1536 1536
1537 1537 if (revcount > capacity) {
1538 1538 PyErr_Format(PyExc_OverflowError,
1539 1539 "bitset size (%ld) > capacity (%ld)",
1540 1540 (long)revcount, (long)capacity);
1541 1541 return NULL;
1542 1542 }
1543 1543
1544 1544 for (i = 0; i < revcount; i++) {
1545 1545 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
1546 1546 if (n > maxrev)
1547 1547 maxrev = n;
1548 1548 }
1549 1549
1550 1550 depth = calloc(sizeof(*depth), maxrev + 1);
1551 1551 if (depth == NULL)
1552 1552 return PyErr_NoMemory();
1553 1553
1554 1554 seen = calloc(sizeof(*seen), maxrev + 1);
1555 1555 if (seen == NULL) {
1556 1556 PyErr_NoMemory();
1557 1557 goto bail;
1558 1558 }
1559 1559
1560 1560 interesting = calloc(sizeof(*interesting), 1 << revcount);
1561 1561 if (interesting == NULL) {
1562 1562 PyErr_NoMemory();
1563 1563 goto bail;
1564 1564 }
1565 1565
1566 1566 if (PyList_Sort(revs) == -1)
1567 1567 goto bail;
1568 1568
1569 1569 for (i = 0; i < revcount; i++) {
1570 1570 int n = (int)PyInt_AsLong(PyList_GET_ITEM(revs, i));
1571 1571 long b = 1l << i;
1572 1572 depth[n] = 1;
1573 1573 seen[n] = b;
1574 1574 interesting[b] = 1;
1575 1575 }
1576 1576
1577 1577 /* invariant: ninteresting is the number of non-zero entries in
1578 1578 * interesting. */
1579 1579 ninteresting = (int)revcount;
1580 1580
1581 1581 for (v = maxrev; v >= 0 && ninteresting > 1; v--) {
1582 1582 int dv = depth[v];
1583 1583 int parents[2];
1584 1584 long sv;
1585 1585
1586 1586 if (dv == 0)
1587 1587 continue;
1588 1588
1589 1589 sv = seen[v];
1590 1590 if (index_get_parents(self, v, parents, maxrev) < 0)
1591 1591 goto bail;
1592 1592
1593 1593 for (i = 0; i < 2; i++) {
1594 1594 int p = parents[i];
1595 1595 long sp;
1596 1596 int dp;
1597 1597
1598 1598 if (p == -1)
1599 1599 continue;
1600 1600
1601 1601 dp = depth[p];
1602 1602 sp = seen[p];
1603 1603 if (dp <= dv) {
1604 1604 depth[p] = dv + 1;
1605 1605 if (sp != sv) {
1606 1606 interesting[sv] += 1;
1607 1607 seen[p] = sv;
1608 1608 if (sp) {
1609 1609 interesting[sp] -= 1;
1610 1610 if (interesting[sp] == 0)
1611 1611 ninteresting -= 1;
1612 1612 }
1613 1613 }
1614 1614 }
1615 1615 else if (dv == dp - 1) {
1616 1616 long nsp = sp | sv;
1617 1617 if (nsp == sp)
1618 1618 continue;
1619 1619 seen[p] = nsp;
1620 1620 interesting[sp] -= 1;
1621 1621 if (interesting[sp] == 0)
1622 1622 ninteresting -= 1;
1623 1623 if (interesting[nsp] == 0)
1624 1624 ninteresting += 1;
1625 1625 interesting[nsp] += 1;
1626 1626 }
1627 1627 }
1628 1628 interesting[sv] -= 1;
1629 1629 if (interesting[sv] == 0)
1630 1630 ninteresting -= 1;
1631 1631 }
1632 1632
1633 1633 final = 0;
1634 1634 j = ninteresting;
1635 1635 for (i = 0; i < (int)(2 << revcount) && j > 0; i++) {
1636 1636 if (interesting[i] == 0)
1637 1637 continue;
1638 1638 final |= i;
1639 1639 j -= 1;
1640 1640 }
1641 1641 if (final == 0) {
1642 1642 keys = PyList_New(0);
1643 1643 goto bail;
1644 1644 }
1645 1645
1646 1646 dict = PyDict_New();
1647 1647 if (dict == NULL)
1648 1648 goto bail;
1649 1649
1650 1650 for (i = 0; i < revcount; i++) {
1651 1651 PyObject *key;
1652 1652
1653 1653 if ((final & (1 << i)) == 0)
1654 1654 continue;
1655 1655
1656 1656 key = PyList_GET_ITEM(revs, i);
1657 1657 Py_INCREF(key);
1658 1658 Py_INCREF(Py_None);
1659 1659 if (PyDict_SetItem(dict, key, Py_None) == -1) {
1660 1660 Py_DECREF(key);
1661 1661 Py_DECREF(Py_None);
1662 1662 goto bail;
1663 1663 }
1664 1664 }
1665 1665
1666 1666 keys = PyDict_Keys(dict);
1667 1667
1668 1668 bail:
1669 1669 free(depth);
1670 1670 free(seen);
1671 1671 free(interesting);
1672 1672 Py_XDECREF(dict);
1673 1673
1674 1674 return keys;
1675 1675 }
1676 1676
1677 1677 /*
1678 1678 * Given a (possibly overlapping) set of revs, return all the
1679 1679 * common ancestors heads: heads(::args[0] and ::a[1] and ...)
1680 1680 */
1681 1681 static PyObject *index_commonancestorsheads(indexObject *self, PyObject *args)
1682 1682 {
1683 1683 PyObject *ret = NULL;
1684 1684 Py_ssize_t argcount, i, len;
1685 1685 bitmask repeat = 0;
1686 1686 int revcount = 0;
1687 1687 int *revs;
1688 1688
1689 1689 argcount = PySequence_Length(args);
1690 1690 revs = PyMem_Malloc(argcount * sizeof(*revs));
1691 1691 if (argcount > 0 && revs == NULL)
1692 1692 return PyErr_NoMemory();
1693 1693 len = index_length(self) - 1;
1694 1694
1695 1695 for (i = 0; i < argcount; i++) {
1696 1696 static const int capacity = 24;
1697 1697 PyObject *obj = PySequence_GetItem(args, i);
1698 1698 bitmask x;
1699 1699 long val;
1700 1700
1701 1701 if (!PyInt_Check(obj)) {
1702 1702 PyErr_SetString(PyExc_TypeError,
1703 1703 "arguments must all be ints");
1704 1704 Py_DECREF(obj);
1705 1705 goto bail;
1706 1706 }
1707 1707 val = PyInt_AsLong(obj);
1708 1708 Py_DECREF(obj);
1709 1709 if (val == -1) {
1710 1710 ret = PyList_New(0);
1711 1711 goto done;
1712 1712 }
1713 1713 if (val < 0 || val >= len) {
1714 1714 PyErr_SetString(PyExc_IndexError,
1715 1715 "index out of range");
1716 1716 goto bail;
1717 1717 }
1718 1718 /* this cheesy bloom filter lets us avoid some more
1719 1719 * expensive duplicate checks in the common set-is-disjoint
1720 1720 * case */
1721 1721 x = 1ull << (val & 0x3f);
1722 1722 if (repeat & x) {
1723 1723 int k;
1724 1724 for (k = 0; k < revcount; k++) {
1725 1725 if (val == revs[k])
1726 1726 goto duplicate;
1727 1727 }
1728 1728 }
1729 1729 else repeat |= x;
1730 1730 if (revcount >= capacity) {
1731 1731 PyErr_Format(PyExc_OverflowError,
1732 1732 "bitset size (%d) > capacity (%d)",
1733 1733 revcount, capacity);
1734 1734 goto bail;
1735 1735 }
1736 1736 revs[revcount++] = (int)val;
1737 1737 duplicate:;
1738 1738 }
1739 1739
1740 1740 if (revcount == 0) {
1741 1741 ret = PyList_New(0);
1742 1742 goto done;
1743 1743 }
1744 1744 if (revcount == 1) {
1745 1745 PyObject *obj;
1746 1746 ret = PyList_New(1);
1747 1747 if (ret == NULL)
1748 1748 goto bail;
1749 1749 obj = PyInt_FromLong(revs[0]);
1750 1750 if (obj == NULL)
1751 1751 goto bail;
1752 1752 PyList_SET_ITEM(ret, 0, obj);
1753 1753 goto done;
1754 1754 }
1755 1755
1756 1756 ret = find_gca_candidates(self, revs, revcount);
1757 1757 if (ret == NULL)
1758 1758 goto bail;
1759 1759
1760 1760 done:
1761 1761 PyMem_Free(revs);
1762 1762 return ret;
1763 1763
1764 1764 bail:
1765 1765 PyMem_Free(revs);
1766 1766 Py_XDECREF(ret);
1767 1767 return NULL;
1768 1768 }
1769 1769
1770 1770 /*
1771 1771 * Given a (possibly overlapping) set of revs, return the greatest
1772 1772 * common ancestors: those with the longest path to the root.
1773 1773 */
1774 1774 static PyObject *index_ancestors(indexObject *self, PyObject *args)
1775 1775 {
1776 1776 PyObject *ret;
1777 1777 PyObject *gca = index_commonancestorsheads(self, args);
1778 1778 if (gca == NULL)
1779 1779 return NULL;
1780 1780
1781 1781 if (PyList_GET_SIZE(gca) <= 1) {
1782 1782 return gca;
1783 1783 }
1784 1784
1785 1785 ret = find_deepest(self, gca);
1786 1786 Py_DECREF(gca);
1787 1787 return ret;
1788 1788 }
1789 1789
1790 1790 /*
1791 1791 * Invalidate any trie entries introduced by added revs.
1792 1792 */
1793 1793 static void nt_invalidate_added(indexObject *self, Py_ssize_t start)
1794 1794 {
1795 1795 Py_ssize_t i, len = PyList_GET_SIZE(self->added);
1796 1796
1797 1797 for (i = start; i < len; i++) {
1798 1798 PyObject *tuple = PyList_GET_ITEM(self->added, i);
1799 1799 PyObject *node = PyTuple_GET_ITEM(tuple, 7);
1800 1800
1801 1801 nt_delete_node(self, PyBytes_AS_STRING(node));
1802 1802 }
1803 1803
1804 1804 if (start == 0)
1805 1805 Py_CLEAR(self->added);
1806 1806 }
1807 1807
1808 1808 /*
1809 1809 * Delete a numeric range of revs, which must be at the end of the
1810 1810 * range, but exclude the sentinel nullid entry.
1811 1811 */
1812 1812 static int index_slice_del(indexObject *self, PyObject *item)
1813 1813 {
1814 1814 Py_ssize_t start, stop, step, slicelength;
1815 1815 Py_ssize_t length = index_length(self);
1816 1816 int ret = 0;
1817 1817
1818 1818 /* Argument changed from PySliceObject* to PyObject* in Python 3. */
1819 1819 #ifdef IS_PY3K
1820 1820 if (PySlice_GetIndicesEx(item, length,
1821 1821 #else
1822 1822 if (PySlice_GetIndicesEx((PySliceObject*)item, length,
1823 1823 #endif
1824 1824 &start, &stop, &step, &slicelength) < 0)
1825 1825 return -1;
1826 1826
1827 1827 if (slicelength <= 0)
1828 1828 return 0;
1829 1829
1830 1830 if ((step < 0 && start < stop) || (step > 0 && start > stop))
1831 1831 stop = start;
1832 1832
1833 1833 if (step < 0) {
1834 1834 stop = start + 1;
1835 1835 start = stop + step*(slicelength - 1) - 1;
1836 1836 step = -step;
1837 1837 }
1838 1838
1839 1839 if (step != 1) {
1840 1840 PyErr_SetString(PyExc_ValueError,
1841 1841 "revlog index delete requires step size of 1");
1842 1842 return -1;
1843 1843 }
1844 1844
1845 1845 if (stop != length - 1) {
1846 1846 PyErr_SetString(PyExc_IndexError,
1847 1847 "revlog index deletion indices are invalid");
1848 1848 return -1;
1849 1849 }
1850 1850
1851 1851 if (start < self->length - 1) {
1852 1852 if (self->nt) {
1853 1853 Py_ssize_t i;
1854 1854
1855 1855 for (i = start + 1; i < self->length - 1; i++) {
1856 1856 const char *node = index_node_existing(self, i);
1857 1857 if (node == NULL)
1858 1858 return -1;
1859 1859
1860 1860 nt_delete_node(self, node);
1861 1861 }
1862 1862 if (self->added)
1863 1863 nt_invalidate_added(self, 0);
1864 1864 if (self->ntrev > start)
1865 1865 self->ntrev = (int)start;
1866 1866 }
1867 1867 self->length = start + 1;
1868 1868 if (start < self->raw_length) {
1869 1869 if (self->cache) {
1870 1870 Py_ssize_t i;
1871 1871 for (i = start; i < self->raw_length; i++)
1872 1872 Py_CLEAR(self->cache[i]);
1873 1873 }
1874 1874 self->raw_length = start;
1875 1875 }
1876 1876 goto done;
1877 1877 }
1878 1878
1879 1879 if (self->nt) {
1880 1880 nt_invalidate_added(self, start - self->length + 1);
1881 1881 if (self->ntrev > start)
1882 1882 self->ntrev = (int)start;
1883 1883 }
1884 1884 if (self->added)
1885 1885 ret = PyList_SetSlice(self->added, start - self->length + 1,
1886 1886 PyList_GET_SIZE(self->added), NULL);
1887 1887 done:
1888 1888 Py_CLEAR(self->headrevs);
1889 1889 return ret;
1890 1890 }
1891 1891
1892 1892 /*
1893 1893 * Supported ops:
1894 1894 *
1895 1895 * slice deletion
1896 1896 * string assignment (extend node->rev mapping)
1897 1897 * string deletion (shrink node->rev mapping)
1898 1898 */
1899 1899 static int index_assign_subscript(indexObject *self, PyObject *item,
1900 1900 PyObject *value)
1901 1901 {
1902 1902 char *node;
1903 1903 long rev;
1904 1904
1905 1905 if (PySlice_Check(item) && value == NULL)
1906 1906 return index_slice_del(self, item);
1907 1907
1908 1908 if (node_check(item, &node) == -1)
1909 1909 return -1;
1910 1910
1911 1911 if (value == NULL)
1912 1912 return self->nt ? nt_delete_node(self, node) : 0;
1913 1913 rev = PyInt_AsLong(value);
1914 1914 if (rev > INT_MAX || rev < 0) {
1915 1915 if (!PyErr_Occurred())
1916 1916 PyErr_SetString(PyExc_ValueError, "rev out of range");
1917 1917 return -1;
1918 1918 }
1919 1919
1920 1920 if (nt_init(self) == -1)
1921 1921 return -1;
1922 1922 return nt_insert(self, node, (int)rev);
1923 1923 }
1924 1924
1925 1925 /*
1926 1926 * Find all RevlogNG entries in an index that has inline data. Update
1927 1927 * the optional "offsets" table with those entries.
1928 1928 */
1929 1929 static Py_ssize_t inline_scan(indexObject *self, const char **offsets)
1930 1930 {
1931 1931 const char *data = (const char *)self->buf.buf;
1932 1932 Py_ssize_t pos = 0;
1933 1933 Py_ssize_t end = self->buf.len;
1934 1934 long incr = v1_hdrsize;
1935 1935 Py_ssize_t len = 0;
1936 1936
1937 1937 while (pos + v1_hdrsize <= end && pos >= 0) {
1938 1938 uint32_t comp_len;
1939 1939 /* 3rd element of header is length of compressed inline data */
1940 1940 comp_len = getbe32(data + pos + 8);
1941 1941 incr = v1_hdrsize + comp_len;
1942 1942 if (offsets)
1943 1943 offsets[len] = data + pos;
1944 1944 len++;
1945 1945 pos += incr;
1946 1946 }
1947 1947
1948 1948 if (pos != end) {
1949 1949 if (!PyErr_Occurred())
1950 1950 PyErr_SetString(PyExc_ValueError, "corrupt index file");
1951 1951 return -1;
1952 1952 }
1953 1953
1954 1954 return len;
1955 1955 }
1956 1956
1957 1957 static int index_init(indexObject *self, PyObject *args)
1958 1958 {
1959 1959 PyObject *data_obj, *inlined_obj;
1960 1960 Py_ssize_t size;
1961 1961
1962 1962 /* Initialize before argument-checking to avoid index_dealloc() crash. */
1963 1963 self->raw_length = 0;
1964 1964 self->added = NULL;
1965 1965 self->cache = NULL;
1966 1966 self->data = NULL;
1967 1967 memset(&self->buf, 0, sizeof(self->buf));
1968 1968 self->headrevs = NULL;
1969 1969 self->filteredrevs = Py_None;
1970 1970 Py_INCREF(Py_None);
1971 1971 self->nt = NULL;
1972 1972 self->offsets = NULL;
1973 1973
1974 1974 if (!PyArg_ParseTuple(args, "OO", &data_obj, &inlined_obj))
1975 1975 return -1;
1976 1976 if (!PyObject_CheckBuffer(data_obj)) {
1977 1977 PyErr_SetString(PyExc_TypeError,
1978 1978 "data does not support buffer interface");
1979 1979 return -1;
1980 1980 }
1981 1981
1982 1982 if (PyObject_GetBuffer(data_obj, &self->buf, PyBUF_SIMPLE) == -1)
1983 1983 return -1;
1984 1984 size = self->buf.len;
1985 1985
1986 1986 self->inlined = inlined_obj && PyObject_IsTrue(inlined_obj);
1987 1987 self->data = data_obj;
1988 1988
1989 1989 self->ntlength = self->ntcapacity = 0;
1990 1990 self->ntdepth = self->ntsplits = 0;
1991 1991 self->ntlookups = self->ntmisses = 0;
1992 1992 self->ntrev = -1;
1993 1993 Py_INCREF(self->data);
1994 1994
1995 1995 if (self->inlined) {
1996 1996 Py_ssize_t len = inline_scan(self, NULL);
1997 1997 if (len == -1)
1998 1998 goto bail;
1999 1999 self->raw_length = len;
2000 2000 self->length = len + 1;
2001 2001 } else {
2002 2002 if (size % v1_hdrsize) {
2003 2003 PyErr_SetString(PyExc_ValueError, "corrupt index file");
2004 2004 goto bail;
2005 2005 }
2006 2006 self->raw_length = size / v1_hdrsize;
2007 2007 self->length = self->raw_length + 1;
2008 2008 }
2009 2009
2010 2010 return 0;
2011 2011 bail:
2012 2012 return -1;
2013 2013 }
2014 2014
2015 2015 static PyObject *index_nodemap(indexObject *self)
2016 2016 {
2017 2017 Py_INCREF(self);
2018 2018 return (PyObject *)self;
2019 2019 }
2020 2020
2021 2021 static void index_dealloc(indexObject *self)
2022 2022 {
2023 2023 _index_clearcaches(self);
2024 2024 Py_XDECREF(self->filteredrevs);
2025 2025 if (self->buf.buf) {
2026 2026 PyBuffer_Release(&self->buf);
2027 2027 memset(&self->buf, 0, sizeof(self->buf));
2028 2028 }
2029 2029 Py_XDECREF(self->data);
2030 2030 Py_XDECREF(self->added);
2031 2031 PyObject_Del(self);
2032 2032 }
2033 2033
2034 2034 static PySequenceMethods index_sequence_methods = {
2035 2035 (lenfunc)index_length, /* sq_length */
2036 2036 0, /* sq_concat */
2037 2037 0, /* sq_repeat */
2038 2038 (ssizeargfunc)index_get, /* sq_item */
2039 2039 0, /* sq_slice */
2040 2040 0, /* sq_ass_item */
2041 2041 0, /* sq_ass_slice */
2042 2042 (objobjproc)index_contains, /* sq_contains */
2043 2043 };
2044 2044
2045 2045 static PyMappingMethods index_mapping_methods = {
2046 2046 (lenfunc)index_length, /* mp_length */
2047 2047 (binaryfunc)index_getitem, /* mp_subscript */
2048 2048 (objobjargproc)index_assign_subscript, /* mp_ass_subscript */
2049 2049 };
2050 2050
2051 2051 static PyMethodDef index_methods[] = {
2052 2052 {"ancestors", (PyCFunction)index_ancestors, METH_VARARGS,
2053 2053 "return the gca set of the given revs"},
2054 2054 {"commonancestorsheads", (PyCFunction)index_commonancestorsheads,
2055 2055 METH_VARARGS,
2056 2056 "return the heads of the common ancestors of the given revs"},
2057 2057 {"clearcaches", (PyCFunction)index_clearcaches, METH_NOARGS,
2058 2058 "clear the index caches"},
2059 2059 {"get", (PyCFunction)index_m_get, METH_VARARGS,
2060 2060 "get an index entry"},
2061 2061 {"computephasesmapsets", (PyCFunction)compute_phases_map_sets,
2062 2062 METH_VARARGS, "compute phases"},
2063 2063 {"reachableroots2", (PyCFunction)reachableroots2, METH_VARARGS,
2064 2064 "reachableroots"},
2065 2065 {"headrevs", (PyCFunction)index_headrevs, METH_VARARGS,
2066 2066 "get head revisions"}, /* Can do filtering since 3.2 */
2067 2067 {"headrevsfiltered", (PyCFunction)index_headrevs, METH_VARARGS,
2068 2068 "get filtered head revisions"}, /* Can always do filtering */
2069 2069 {"deltachain", (PyCFunction)index_deltachain, METH_VARARGS,
2070 2070 "determine revisions with deltas to reconstruct fulltext"},
2071 2071 {"insert", (PyCFunction)index_insert, METH_VARARGS,
2072 2072 "insert an index entry"},
2073 2073 {"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS,
2074 2074 "match a potentially ambiguous node ID"},
2075 2075 {"shortest", (PyCFunction)index_shortest, METH_VARARGS,
2076 2076 "find length of shortest hex nodeid of a binary ID"},
2077 2077 {"stats", (PyCFunction)index_stats, METH_NOARGS,
2078 2078 "stats for the index"},
2079 2079 {NULL} /* Sentinel */
2080 2080 };
2081 2081
2082 2082 static PyGetSetDef index_getset[] = {
2083 2083 {"nodemap", (getter)index_nodemap, NULL, "nodemap", NULL},
2084 2084 {NULL} /* Sentinel */
2085 2085 };
2086 2086
2087 2087 static PyTypeObject indexType = {
2088 2088 PyVarObject_HEAD_INIT(NULL, 0) /* header */
2089 2089 "parsers.index", /* tp_name */
2090 2090 sizeof(indexObject), /* tp_basicsize */
2091 2091 0, /* tp_itemsize */
2092 2092 (destructor)index_dealloc, /* tp_dealloc */
2093 2093 0, /* tp_print */
2094 2094 0, /* tp_getattr */
2095 2095 0, /* tp_setattr */
2096 2096 0, /* tp_compare */
2097 2097 0, /* tp_repr */
2098 2098 0, /* tp_as_number */
2099 2099 &index_sequence_methods, /* tp_as_sequence */
2100 2100 &index_mapping_methods, /* tp_as_mapping */
2101 2101 0, /* tp_hash */
2102 2102 0, /* tp_call */
2103 2103 0, /* tp_str */
2104 2104 0, /* tp_getattro */
2105 2105 0, /* tp_setattro */
2106 2106 0, /* tp_as_buffer */
2107 2107 Py_TPFLAGS_DEFAULT, /* tp_flags */
2108 2108 "revlog index", /* tp_doc */
2109 2109 0, /* tp_traverse */
2110 2110 0, /* tp_clear */
2111 2111 0, /* tp_richcompare */
2112 2112 0, /* tp_weaklistoffset */
2113 2113 0, /* tp_iter */
2114 2114 0, /* tp_iternext */
2115 2115 index_methods, /* tp_methods */
2116 2116 0, /* tp_members */
2117 2117 index_getset, /* tp_getset */
2118 2118 0, /* tp_base */
2119 2119 0, /* tp_dict */
2120 2120 0, /* tp_descr_get */
2121 2121 0, /* tp_descr_set */
2122 2122 0, /* tp_dictoffset */
2123 2123 (initproc)index_init, /* tp_init */
2124 2124 0, /* tp_alloc */
2125 2125 };
2126 2126
2127 2127 /*
2128 2128 * returns a tuple of the form (index, index, cache) with elements as
2129 2129 * follows:
2130 2130 *
2131 2131 * index: an index object that lazily parses RevlogNG records
2132 2132 * cache: if data is inlined, a tuple (0, index_file_content), else None
2133 2133 * index_file_content could be a string, or a buffer
2134 2134 *
2135 2135 * added complications are for backwards compatibility
2136 2136 */
2137 2137 PyObject *parse_index2(PyObject *self, PyObject *args)
2138 2138 {
2139 2139 PyObject *tuple = NULL, *cache = NULL;
2140 2140 indexObject *idx;
2141 2141 int ret;
2142 2142
2143 2143 idx = PyObject_New(indexObject, &indexType);
2144 2144 if (idx == NULL)
2145 2145 goto bail;
2146 2146
2147 2147 ret = index_init(idx, args);
2148 2148 if (ret == -1)
2149 2149 goto bail;
2150 2150
2151 2151 if (idx->inlined) {
2152 2152 cache = Py_BuildValue("iO", 0, idx->data);
2153 2153 if (cache == NULL)
2154 2154 goto bail;
2155 2155 } else {
2156 2156 cache = Py_None;
2157 2157 Py_INCREF(cache);
2158 2158 }
2159 2159
2160 2160 tuple = Py_BuildValue("NN", idx, cache);
2161 2161 if (!tuple)
2162 2162 goto bail;
2163 2163 return tuple;
2164 2164
2165 2165 bail:
2166 2166 Py_XDECREF(idx);
2167 2167 Py_XDECREF(cache);
2168 2168 Py_XDECREF(tuple);
2169 2169 return NULL;
2170 2170 }
2171 2171
2172 2172 void revlog_module_init(PyObject *mod)
2173 2173 {
2174 2174 indexType.tp_new = PyType_GenericNew;
2175 2175 if (PyType_Ready(&indexType) < 0)
2176 2176 return;
2177 2177 Py_INCREF(&indexType);
2178 2178 PyModule_AddObject(mod, "index", (PyObject *)&indexType);
2179 2179
2180 2180 nullentry = Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0, 0,
2181 2181 -1, -1, -1, -1, nullid, 20);
2182 2182 if (nullentry)
2183 2183 PyObject_GC_UnTrack(nullentry);
2184 2184 }
@@ -1,179 +1,179
1 1 # parsers.py - Python implementation of parsers.c
2 2 #
3 3 # Copyright 2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import struct
11 11 import zlib
12 12
13 13 from ..node import nullid
14 14 from .. import pycompat
15 15 stringio = pycompat.bytesio
16 16
17 17
18 18 _pack = struct.pack
19 19 _unpack = struct.unpack
20 20 _compress = zlib.compress
21 21 _decompress = zlib.decompress
22 22
23 23 # Some code below makes tuples directly because it's more convenient. However,
24 24 # code outside this module should always use dirstatetuple.
25 25 def dirstatetuple(*x):
26 26 # x is a tuple
27 27 return x
28 28
29 29 indexformatng = ">Qiiiiii20s12x"
30 30 indexfirst = struct.calcsize('Q')
31 31 sizeint = struct.calcsize('i')
32 32 indexsize = struct.calcsize(indexformatng)
33 33
34 34 def gettype(q):
35 35 return int(q & 0xFFFF)
36 36
37 37 def offset_type(offset, type):
38 38 return int(int(offset) << 16 | type)
39 39
40 40 class BaseIndexObject(object):
41 41 def __len__(self):
42 42 return self._lgt + len(self._extra) + 1
43 43
44 44 def insert(self, i, tup):
45 45 assert i == -1
46 46 self._extra.append(tup)
47 47
48 48 def _fix_index(self, i):
49 49 if not isinstance(i, int):
50 50 raise TypeError("expecting int indexes")
51 51 if i < 0:
52 52 i = len(self) + i
53 53 if i < 0 or i >= len(self):
54 54 raise IndexError
55 55 return i
56 56
57 57 def __getitem__(self, i):
58 if i == -1 or i == len(self) - 1:
59 return (0, 0, 0, -1, -1, -1, -1, nullid)
58 60 i = self._fix_index(i)
59 if i == len(self) - 1:
60 return (0, 0, 0, -1, -1, -1, -1, nullid)
61 61 if i >= self._lgt:
62 62 return self._extra[i - self._lgt]
63 63 index = self._calculate_index(i)
64 64 r = struct.unpack(indexformatng, self._data[index:index + indexsize])
65 65 if i == 0:
66 66 e = list(r)
67 67 type = gettype(e[0])
68 68 e[0] = offset_type(0, type)
69 69 return tuple(e)
70 70 return r
71 71
72 72 class IndexObject(BaseIndexObject):
73 73 def __init__(self, data):
74 74 assert len(data) % indexsize == 0
75 75 self._data = data
76 76 self._lgt = len(data) // indexsize
77 77 self._extra = []
78 78
79 79 def _calculate_index(self, i):
80 80 return i * indexsize
81 81
82 82 def __delitem__(self, i):
83 83 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
84 84 raise ValueError("deleting slices only supports a:-1 with step 1")
85 85 i = self._fix_index(i.start)
86 86 if i < self._lgt:
87 87 self._data = self._data[:i * indexsize]
88 88 self._lgt = i
89 89 self._extra = []
90 90 else:
91 91 self._extra = self._extra[:i - self._lgt]
92 92
93 93 class InlinedIndexObject(BaseIndexObject):
94 94 def __init__(self, data, inline=0):
95 95 self._data = data
96 96 self._lgt = self._inline_scan(None)
97 97 self._inline_scan(self._lgt)
98 98 self._extra = []
99 99
100 100 def _inline_scan(self, lgt):
101 101 off = 0
102 102 if lgt is not None:
103 103 self._offsets = [0] * lgt
104 104 count = 0
105 105 while off <= len(self._data) - indexsize:
106 106 s, = struct.unpack('>i',
107 107 self._data[off + indexfirst:off + sizeint + indexfirst])
108 108 if lgt is not None:
109 109 self._offsets[count] = off
110 110 count += 1
111 111 off += indexsize + s
112 112 if off != len(self._data):
113 113 raise ValueError("corrupted data")
114 114 return count
115 115
116 116 def __delitem__(self, i):
117 117 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
118 118 raise ValueError("deleting slices only supports a:-1 with step 1")
119 119 i = self._fix_index(i.start)
120 120 if i < self._lgt:
121 121 self._offsets = self._offsets[:i]
122 122 self._lgt = i
123 123 self._extra = []
124 124 else:
125 125 self._extra = self._extra[:i - self._lgt]
126 126
127 127 def _calculate_index(self, i):
128 128 return self._offsets[i]
129 129
130 130 def parse_index2(data, inline):
131 131 if not inline:
132 132 return IndexObject(data), None
133 133 return InlinedIndexObject(data, inline), (0, data)
134 134
135 135 def parse_dirstate(dmap, copymap, st):
136 136 parents = [st[:20], st[20: 40]]
137 137 # dereference fields so they will be local in loop
138 138 format = ">cllll"
139 139 e_size = struct.calcsize(format)
140 140 pos1 = 40
141 141 l = len(st)
142 142
143 143 # the inner loop
144 144 while pos1 < l:
145 145 pos2 = pos1 + e_size
146 146 e = _unpack(">cllll", st[pos1:pos2]) # a literal here is faster
147 147 pos1 = pos2 + e[4]
148 148 f = st[pos2:pos1]
149 149 if '\0' in f:
150 150 f, c = f.split('\0')
151 151 copymap[f] = c
152 152 dmap[f] = e[:4]
153 153 return parents
154 154
155 155 def pack_dirstate(dmap, copymap, pl, now):
156 156 now = int(now)
157 157 cs = stringio()
158 158 write = cs.write
159 159 write("".join(pl))
160 160 for f, e in dmap.iteritems():
161 161 if e[0] == 'n' and e[3] == now:
162 162 # The file was last modified "simultaneously" with the current
163 163 # write to dirstate (i.e. within the same second for file-
164 164 # systems with a granularity of 1 sec). This commonly happens
165 165 # for at least a couple of files on 'update'.
166 166 # The user could change the file without changing its size
167 167 # within the same second. Invalidate the file's mtime in
168 168 # dirstate, forcing future 'status' calls to compare the
169 169 # contents of the file if the size is the same. This prevents
170 170 # mistakenly treating such files as clean.
171 171 e = dirstatetuple(e[0], e[1], e[2], -1)
172 172 dmap[f] = e
173 173
174 174 if f in copymap:
175 175 f = "%s\0%s" % (f, copymap[f])
176 176 e = _pack(">cllll", e[0], e[1], e[2], e[3], len(f))
177 177 write(e)
178 178 write(f)
179 179 return cs.getvalue()
General Comments 0
You need to be logged in to leave comments. Login now