##// END OF EJS Templates
Fix recursion depth trouble with ancestor algorithm
mpm@selenic.com -
r45:f2b2d5da default
parent child Browse files
Show More
@@ -1,211 +1,229 b''
1 1 # revlog.py - storage back-end for mercurial
2 2 #
3 3 # This provides efficient delta storage with O(1) retrieve and append
4 4 # and O(changes) merge between branches
5 5 #
6 6 # Copyright 2005 Matt Mackall <mpm@selenic.com>
7 7 #
8 8 # This software may be used and distributed according to the terms
9 9 # of the GNU General Public License, incorporated herein by reference.
10 10
11 11 import zlib, struct, sha, os, tempfile, binascii
12 12 from mercurial import mdiff
13 13
14 14 def hex(node): return binascii.hexlify(node)
15 15 def bin(node): return binascii.unhexlify(node)
16 16
17 17 def compress(text):
18 18 return zlib.compress(text)
19 19
20 20 def decompress(bin):
21 21 return zlib.decompress(bin)
22 22
23 23 def hash(text, p1, p2):
24 24 l = [p1, p2]
25 25 l.sort()
26 26 return sha.sha(l[0] + l[1] + text).digest()
27 27
28 28 nullid = "\0" * 20
29 29 indexformat = ">4l20s20s20s"
30 30
31 31 class revlog:
32 32 def __init__(self, opener, indexfile, datafile):
33 33 self.indexfile = indexfile
34 34 self.datafile = datafile
35 35 self.index = []
36 36 self.opener = opener
37 37 self.cache = None
38 38 self.nodemap = {nullid: -1}
39 39 # read the whole index for now, handle on-demand later
40 40 try:
41 41 n = 0
42 42 i = self.opener(self.indexfile).read()
43 43 s = struct.calcsize(indexformat)
44 44 for f in range(0, len(i), s):
45 45 # offset, size, base, linkrev, p1, p2, nodeid
46 46 e = struct.unpack(indexformat, i[f:f + s])
47 47 self.nodemap[e[6]] = n
48 48 self.index.append(e)
49 49 n += 1
50 50 except IOError: pass
51 51
52 52 def tip(self): return self.node(len(self.index) - 1)
53 53 def count(self): return len(self.index)
54 54 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
55 55 def rev(self, node): return self.nodemap[node]
56 56 def linkrev(self, node): return self.index[self.nodemap[node]][3]
57 57 def parents(self, node):
58 58 if node == nullid: return (nullid, nullid)
59 59 return self.index[self.nodemap[node]][4:6]
60 60
61 61 def start(self, rev): return self.index[rev][0]
62 62 def length(self, rev): return self.index[rev][1]
63 63 def end(self, rev): return self.start(rev) + self.length(rev)
64 64 def base(self, rev): return self.index[rev][2]
65 65
66 66 def lookup(self, id):
67 67 try:
68 68 rev = int(id)
69 69 return self.node(rev)
70 70 except ValueError:
71 71 c = []
72 72 for n in self.nodemap:
73 73 if id in hex(n):
74 74 c.append(n)
75 75 if len(c) > 1: raise KeyError("Ambiguous identifier")
76 76 if len(c) < 1: raise KeyError
77 77 return c[0]
78 78
79 79 return None
80 80
81 81 def revisions(self, list):
82 82 # this can be optimized to do spans, etc
83 83 # be stupid for now
84 84 for node in list:
85 85 yield self.revision(node)
86 86
87 87 def diff(self, a, b):
88 88 return mdiff.textdiff(a, b)
89 89
90 90 def patch(self, text, patch):
91 91 return mdiff.patch(text, patch)
92 92
93 93 def revision(self, node):
94 94 if node == nullid: return ""
95 95 if self.cache and self.cache[0] == node: return self.cache[2]
96 96
97 97 text = None
98 98 rev = self.rev(node)
99 99 base = self.base(rev)
100 100 start = self.start(base)
101 101 end = self.end(rev)
102 102
103 103 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
104 104 base = self.cache[1]
105 105 start = self.start(base + 1)
106 106 text = self.cache[2]
107 107 last = 0
108 108
109 109 f = self.opener(self.datafile)
110 110 f.seek(start)
111 111 data = f.read(end - start)
112 112
113 113 if not text:
114 114 last = self.length(base)
115 115 text = decompress(data[:last])
116 116
117 117 for r in range(base + 1, rev + 1):
118 118 s = self.length(r)
119 119 b = decompress(data[last:last + s])
120 120 text = self.patch(text, b)
121 121 last = last + s
122 122
123 123 (p1, p2) = self.parents(node)
124 124 if node != hash(text, p1, p2):
125 125 raise "integrity check failed on %s:%d" % (self.datafile, rev)
126 126
127 127 self.cache = (node, rev, text)
128 128 return text
129 129
130 130 def addrevision(self, text, transaction, link, p1=None, p2=None):
131 131 if text is None: text = ""
132 132 if p1 is None: p1 = self.tip()
133 133 if p2 is None: p2 = nullid
134 134
135 135 node = hash(text, p1, p2)
136 136
137 137 n = self.count()
138 138 t = n - 1
139 139
140 140 if n:
141 141 start = self.start(self.base(t))
142 142 end = self.end(t)
143 143 prev = self.revision(self.tip())
144 144 data = compress(self.diff(prev, text))
145 145
146 146 # full versions are inserted when the needed deltas
147 147 # become comparable to the uncompressed text
148 148 if not n or (end + len(data) - start) > len(text) * 2:
149 149 data = compress(text)
150 150 base = n
151 151 else:
152 152 base = self.base(t)
153 153
154 154 offset = 0
155 155 if t >= 0:
156 156 offset = self.end(t)
157 157
158 158 e = (offset, len(data), base, link, p1, p2, node)
159 159
160 160 self.index.append(e)
161 161 self.nodemap[node] = n
162 162 entry = struct.pack(indexformat, *e)
163 163
164 164 transaction.add(self.datafile, e[0])
165 165 self.opener(self.datafile, "a").write(data)
166 166 transaction.add(self.indexfile, n * len(entry))
167 167 self.opener(self.indexfile, "a").write(entry)
168 168
169 169 self.cache = (node, n, text)
170 170 return node
171 171
172 172 def ancestor(self, a, b):
173 def expand(e1, e2, a1, a2):
174 ne = []
175 for n in e1:
176 (p1, p2) = self.parents(n)
177 if p1 in a2: return p1
178 if p2 in a2: return p2
179 if p1 != nullid and p1 not in a1:
180 a1[p1] = 1
181 ne.append(p1)
182 if p2 != nullid and p2 not in a1:
183 a1[p2] = 1
184 ne.append(p2)
185 return expand(e2, ne, a2, a1)
186 return expand([a], [b], {a:1}, {b:1})
173 def expand(list, map):
174 a = []
175 while list:
176 n = list.pop(0)
177 map[n] = 1
178 yield n
179 for p in self.parents(n):
180 if p != nullid and p not in map:
181 list.append(p)
182 yield nullid
183
184 amap = {}
185 bmap = {}
186 ag = expand([a], amap)
187 bg = expand([b], bmap)
188 adone = bdone = 0
189
190 while not adone or not bdone:
191 if not adone:
192 an = ag.next()
193 if an == nullid:
194 adone = 1
195 elif an in bmap:
196 return an
197 if not bdone:
198 bn = bg.next()
199 if bn == nullid:
200 bdone = 1
201 elif bn in amap:
202 return bn
203
204 return nullid
187 205
188 206 def mergedag(self, other, transaction, linkseq, accumulate = None):
189 207 """combine the nodes from other's DAG into ours"""
190 208 old = self.tip()
191 209 i = self.count()
192 210 l = []
193 211
194 212 # merge the other revision log into our DAG
195 213 for r in range(other.count()):
196 214 id = other.node(r)
197 215 if id not in self.nodemap:
198 216 (xn, yn) = other.parents(id)
199 217 l.append((id, xn, yn))
200 218 self.nodemap[id] = i
201 219 i += 1
202 220
203 221 # merge node date for new nodes
204 222 r = other.revisions([e[0] for e in l])
205 223 for e in l:
206 224 t = r.next()
207 225 if accumulate: accumulate(t)
208 226 self.addrevision(t, transaction, linkseq.next(), e[1], e[2])
209 227
210 228 # return the unmerged heads for later resolving
211 229 return (old, self.tip())
General Comments 0
You need to be logged in to leave comments. Login now