Show More
@@ -752,8 +752,8 static const char *index_deref(indexObje | |||||
752 | return PyString_AS_STRING(self->data) + pos * v1_hdrsize; |
|
752 | return PyString_AS_STRING(self->data) + pos * v1_hdrsize; | |
753 | } |
|
753 | } | |
754 |
|
754 | |||
755 |
static inline |
|
755 | static inline int index_get_parents(indexObject *self, Py_ssize_t rev, | |
756 | int *ps) |
|
756 | int *ps, int maxrev) | |
757 | { |
|
757 | { | |
758 | if (rev >= self->length - 1) { |
|
758 | if (rev >= self->length - 1) { | |
759 | PyObject *tuple = PyList_GET_ITEM(self->added, |
|
759 | PyObject *tuple = PyList_GET_ITEM(self->added, | |
@@ -765,6 +765,13 static inline void index_get_parents(ind | |||||
765 | ps[0] = getbe32(data + 24); |
|
765 | ps[0] = getbe32(data + 24); | |
766 | ps[1] = getbe32(data + 28); |
|
766 | ps[1] = getbe32(data + 28); | |
767 | } |
|
767 | } | |
|
768 | /* If index file is corrupted, ps[] may point to invalid revisions. So | |||
|
769 | * there is a risk of buffer overflow to trust them unconditionally. */ | |||
|
770 | if (ps[0] > maxrev || ps[1] > maxrev) { | |||
|
771 | PyErr_SetString(PyExc_ValueError, "parent out of range"); | |||
|
772 | return -1; | |||
|
773 | } | |||
|
774 | return 0; | |||
768 | } |
|
775 | } | |
769 |
|
776 | |||
770 |
|
777 | |||
@@ -1149,7 +1156,8 static PyObject *compute_phases_map_sets | |||||
1149 | if (minrevallphases != -1) { |
|
1156 | if (minrevallphases != -1) { | |
1150 | int parents[2]; |
|
1157 | int parents[2]; | |
1151 | for (i = minrevallphases; i < len; i++) { |
|
1158 | for (i = minrevallphases; i < len; i++) { | |
1152 |
index_get_parents(self, i, parents) |
|
1159 | if (index_get_parents(self, i, parents, len - 1) < 0) | |
|
1160 | goto release_phasesetlist; | |||
1153 | set_phase_from_parents(phases, parents[0], parents[1], i); |
|
1161 | set_phase_from_parents(phases, parents[0], parents[1], i); | |
1154 | } |
|
1162 | } | |
1155 | } |
|
1163 | } | |
@@ -1248,7 +1256,8 static PyObject *index_headrevs(indexObj | |||||
1248 | continue; |
|
1256 | continue; | |
1249 | } |
|
1257 | } | |
1250 |
|
1258 | |||
1251 |
index_get_parents(self, i, parents) |
|
1259 | if (index_get_parents(self, i, parents, len - 1) < 0) | |
|
1260 | goto bail; | |||
1252 | for (j = 0; j < 2; j++) { |
|
1261 | for (j = 0; j < 2; j++) { | |
1253 | if (parents[j] >= 0) |
|
1262 | if (parents[j] >= 0) | |
1254 | nothead[parents[j]] = 1; |
|
1263 | nothead[parents[j]] = 1; | |
@@ -1716,7 +1725,8 static PyObject *find_gca_candidates(ind | |||||
1716 | } |
|
1725 | } | |
1717 | } |
|
1726 | } | |
1718 | } |
|
1727 | } | |
1719 |
index_get_parents(self, v, parents) |
|
1728 | if (index_get_parents(self, v, parents, maxrev) < 0) | |
|
1729 | goto bail; | |||
1720 |
|
1730 | |||
1721 | for (i = 0; i < 2; i++) { |
|
1731 | for (i = 0; i < 2; i++) { | |
1722 | int p = parents[i]; |
|
1732 | int p = parents[i]; | |
@@ -1813,7 +1823,8 static PyObject *find_deepest(indexObjec | |||||
1813 | continue; |
|
1823 | continue; | |
1814 |
|
1824 | |||
1815 | sv = seen[v]; |
|
1825 | sv = seen[v]; | |
1816 |
index_get_parents(self, v, parents) |
|
1826 | if (index_get_parents(self, v, parents, maxrev) < 0) | |
|
1827 | goto bail; | |||
1817 |
|
1828 | |||
1818 | for (i = 0; i < 2; i++) { |
|
1829 | for (i = 0; i < 2; i++) { | |
1819 | int p = parents[i]; |
|
1830 | int p = parents[i]; |
@@ -59,3 +59,62 We approximate that by reducing the read | |||||
59 | 26333235a41c |
|
59 | 26333235a41c | |
60 |
|
60 | |||
61 | $ cd .. |
|
61 | $ cd .. | |
|
62 | ||||
|
63 | Test corrupted p1/p2 fields that could cause SEGV at parsers.c: | |||
|
64 | ||||
|
65 | $ mkdir invalidparent | |||
|
66 | $ cd invalidparent | |||
|
67 | ||||
|
68 | $ hg clone --pull -q --config phases.publish=False ../a limit | |||
|
69 | $ hg clone --pull -q --config phases.publish=False ../a segv | |||
|
70 | $ rm -R limit/.hg/cache segv/.hg/cache | |||
|
71 | ||||
|
72 | $ python <<EOF | |||
|
73 | > data = open("limit/.hg/store/00changelog.i", "rb").read() | |||
|
74 | > for n, p in [('limit', '\0\0\0\x02'), ('segv', '\0\x01\0\0')]: | |||
|
75 | > # corrupt p1 at rev0 and p2 at rev1 | |||
|
76 | > d = data[:24] + p + data[28:127 + 28] + p + data[127 + 32:] | |||
|
77 | > open(n + "/.hg/store/00changelog.i", "wb").write(d) | |||
|
78 | > EOF | |||
|
79 | ||||
|
80 | $ hg debugindex -f1 limit/.hg/store/00changelog.i | |||
|
81 | rev flag offset length size base link p1 p2 nodeid | |||
|
82 | 0 0000 0 63 62 0 0 2 -1 7c31755bf9b5 | |||
|
83 | 1 0000 63 66 65 1 1 0 2 26333235a41c | |||
|
84 | $ hg debugindex -f1 segv/.hg/store/00changelog.i | |||
|
85 | rev flag offset length size base link p1 p2 nodeid | |||
|
86 | 0 0000 0 63 62 0 0 65536 -1 7c31755bf9b5 | |||
|
87 | 1 0000 63 66 65 1 1 0 65536 26333235a41c | |||
|
88 | ||||
|
89 | $ cat <<EOF > test.py | |||
|
90 | > import sys | |||
|
91 | > from mercurial import changelog, scmutil | |||
|
92 | > cl = changelog.changelog(scmutil.vfs(sys.argv[1])) | |||
|
93 | > n0, n1 = cl.node(0), cl.node(1) | |||
|
94 | > ops = [ | |||
|
95 | > ('compute_phases_map_sets', lambda: cl.computephases([[0], []])), | |||
|
96 | > ('index_headrevs', lambda: cl.headrevs()), | |||
|
97 | > ('find_gca_candidates', lambda: cl.commonancestorsheads(n0, n1)), | |||
|
98 | > ('find_deepest', lambda: cl.ancestor(n0, n1)), | |||
|
99 | > ] | |||
|
100 | > for l, f in ops: | |||
|
101 | > print l + ':', | |||
|
102 | > try: | |||
|
103 | > f() | |||
|
104 | > print 'uncaught buffer overflow?' | |||
|
105 | > except ValueError, inst: | |||
|
106 | > print inst | |||
|
107 | > EOF | |||
|
108 | ||||
|
109 | $ python test.py limit/.hg/store | |||
|
110 | compute_phases_map_sets: parent out of range | |||
|
111 | index_headrevs: parent out of range | |||
|
112 | find_gca_candidates: parent out of range | |||
|
113 | find_deepest: parent out of range | |||
|
114 | $ python test.py segv/.hg/store | |||
|
115 | compute_phases_map_sets: parent out of range | |||
|
116 | index_headrevs: parent out of range | |||
|
117 | find_gca_candidates: parent out of range | |||
|
118 | find_deepest: parent out of range | |||
|
119 | ||||
|
120 | $ cd .. |
General Comments 0
You need to be logged in to leave comments.
Login now