##// END OF EJS Templates
Add iterator to the lazymap code
mpm@selenic.com -
r97:7a2abee6 default
parent child Browse files
Show More
@@ -1,450 +1,457 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # This provides efficient delta storage with O(1) retrieve and append
3 # This provides efficient delta storage with O(1) retrieve and append
4 # and O(changes) merge between branches
4 # and O(changes) merge between branches
5 #
5 #
6 # Copyright 2005 Matt Mackall <mpm@selenic.com>
6 # Copyright 2005 Matt Mackall <mpm@selenic.com>
7 #
7 #
8 # This software may be used and distributed according to the terms
8 # This software may be used and distributed according to the terms
9 # of the GNU General Public License, incorporated herein by reference.
9 # of the GNU General Public License, incorporated herein by reference.
10
10
11 import zlib, struct, sha, os, tempfile, binascii
11 import zlib, struct, sha, os, tempfile, binascii
12 from mercurial import mdiff
12 from mercurial import mdiff
13
13
14 def hex(node): return binascii.hexlify(node)
14 def hex(node): return binascii.hexlify(node)
15 def bin(node): return binascii.unhexlify(node)
15 def bin(node): return binascii.unhexlify(node)
16 def short(node): return hex(node[:4])
16 def short(node): return hex(node[:4])
17
17
18 def compress(text):
18 def compress(text):
19 return zlib.compress(text)
19 return zlib.compress(text)
20
20
21 def decompress(bin):
21 def decompress(bin):
22 return zlib.decompress(bin)
22 return zlib.decompress(bin)
23
23
24 def hash(text, p1, p2):
24 def hash(text, p1, p2):
25 l = [p1, p2]
25 l = [p1, p2]
26 l.sort()
26 l.sort()
27 return sha.sha(l[0] + l[1] + text).digest()
27 return sha.sha(l[0] + l[1] + text).digest()
28
28
29 nullid = "\0" * 20
29 nullid = "\0" * 20
30 indexformat = ">4l20s20s20s"
30 indexformat = ">4l20s20s20s"
31
31
32 class lazyparser:
32 class lazyparser:
33 def __init__(self, data):
33 def __init__(self, data):
34 self.data = data
34 self.data = data
35 self.s = struct.calcsize(indexformat)
35 self.s = struct.calcsize(indexformat)
36 self.l = len(data)/self.s
36 self.l = len(data)/self.s
37 self.index = [None] * self.l
37 self.index = [None] * self.l
38 self.map = {nullid: -1}
38 self.map = {nullid: -1}
39
39
40 if 0:
40 if 0:
41 n = 0
41 n = 0
42 i = self.data
42 i = self.data
43 s = struct.calcsize(indexformat)
43 s = struct.calcsize(indexformat)
44 for f in xrange(0, len(i), s):
44 for f in xrange(0, len(i), s):
45 # offset, size, base, linkrev, p1, p2, nodeid
45 # offset, size, base, linkrev, p1, p2, nodeid
46 e = struct.unpack(indexformat, i[f:f + s])
46 e = struct.unpack(indexformat, i[f:f + s])
47 self.map[e[6]] = n
47 self.map[e[6]] = n
48 self.index.append(e)
48 self.index.append(e)
49 n += 1
49 n += 1
50
50
51 def load(self, pos):
51 def load(self, pos):
52 block = pos / 1000
52 block = pos / 1000
53 i = block * 1000
53 i = block * 1000
54 end = min(self.l, i + 1000)
54 end = min(self.l, i + 1000)
55 while i < end:
55 while i < end:
56 d = self.data[i * self.s: (i + 1) * self.s]
56 d = self.data[i * self.s: (i + 1) * self.s]
57 e = struct.unpack(indexformat, d)
57 e = struct.unpack(indexformat, d)
58 self.index[i] = e
58 self.index[i] = e
59 self.map[e[6]] = i
59 self.map[e[6]] = i
60 i += 1
60 i += 1
61
61
62 class lazyindex:
62 class lazyindex:
63 def __init__(self, parser):
63 def __init__(self, parser):
64 self.p = parser
64 self.p = parser
65 def __len__(self):
65 def __len__(self):
66 return len(self.p.index)
66 return len(self.p.index)
67 def __getitem__(self, pos):
67 def __getitem__(self, pos):
68 i = self.p.index[pos]
68 i = self.p.index[pos]
69 if not i:
69 if not i:
70 self.p.load(pos)
70 self.p.load(pos)
71 return self.p.index[pos]
71 return self.p.index[pos]
72 return i
72 return i
73 def append(self, e):
73 def append(self, e):
74 self.p.index.append(e)
74 self.p.index.append(e)
75
75
76 class lazymap:
76 class lazymap:
77 def __init__(self, parser):
77 def __init__(self, parser):
78 self.p = parser
78 self.p = parser
79 def load(self, key):
79 def load(self, key):
80 n = self.p.data.find(key)
80 n = self.p.data.find(key)
81 if n < 0: raise KeyError("node " + hex(key))
81 if n < 0: raise KeyError("node " + hex(key))
82 pos = n / self.p.s
82 pos = n / self.p.s
83 self.p.load(pos)
83 self.p.load(pos)
84 def __contains__(self, key):
84 def __contains__(self, key):
85 try:
85 try:
86 self[key]
86 self[key]
87 return True
87 return True
88 except KeyError:
88 except KeyError:
89 return False
89 return False
90 def __iter__(self):
91 for i in xrange(self.p.l):
92 try:
93 yield self.p.index[i][6]
94 except:
95 self.p.load(i)
96 yield self.p.index[i][6]
90 def __getitem__(self, key):
97 def __getitem__(self, key):
91 try:
98 try:
92 return self.p.map[key]
99 return self.p.map[key]
93 except KeyError:
100 except KeyError:
94 try:
101 try:
95 self.load(key)
102 self.load(key)
96 return self.p.map[key]
103 return self.p.map[key]
97 except KeyError:
104 except KeyError:
98 raise KeyError("node " + hex(key))
105 raise KeyError("node " + hex(key))
99 def __setitem__(self, key, val):
106 def __setitem__(self, key, val):
100 self.p.map[key] = val
107 self.p.map[key] = val
101
108
102 class revlog:
109 class revlog:
103 def __init__(self, opener, indexfile, datafile):
110 def __init__(self, opener, indexfile, datafile):
104 self.indexfile = indexfile
111 self.indexfile = indexfile
105 self.datafile = datafile
112 self.datafile = datafile
106 self.opener = opener
113 self.opener = opener
107 self.cache = None
114 self.cache = None
108 # read the whole index for now, handle on-demand later
115 # read the whole index for now, handle on-demand later
109 try:
116 try:
110 i = self.opener(self.indexfile).read()
117 i = self.opener(self.indexfile).read()
111 except IOError:
118 except IOError:
112 i = ""
119 i = ""
113 parser = lazyparser(i)
120 parser = lazyparser(i)
114 self.index = lazyindex(parser)
121 self.index = lazyindex(parser)
115 self.nodemap = lazymap(parser)
122 self.nodemap = lazymap(parser)
116
123
117 def tip(self): return self.node(len(self.index) - 1)
124 def tip(self): return self.node(len(self.index) - 1)
118 def count(self): return len(self.index)
125 def count(self): return len(self.index)
119 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
126 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
120 def rev(self, node): return self.nodemap[node]
127 def rev(self, node): return self.nodemap[node]
121 def linkrev(self, node): return self.index[self.nodemap[node]][3]
128 def linkrev(self, node): return self.index[self.nodemap[node]][3]
122 def parents(self, node):
129 def parents(self, node):
123 if node == nullid: return (nullid, nullid)
130 if node == nullid: return (nullid, nullid)
124 return self.index[self.nodemap[node]][4:6]
131 return self.index[self.nodemap[node]][4:6]
125
132
126 def start(self, rev): return self.index[rev][0]
133 def start(self, rev): return self.index[rev][0]
127 def length(self, rev): return self.index[rev][1]
134 def length(self, rev): return self.index[rev][1]
128 def end(self, rev): return self.start(rev) + self.length(rev)
135 def end(self, rev): return self.start(rev) + self.length(rev)
129 def base(self, rev): return self.index[rev][2]
136 def base(self, rev): return self.index[rev][2]
130
137
131 def lookup(self, id):
138 def lookup(self, id):
132 try:
139 try:
133 rev = int(id)
140 rev = int(id)
134 return self.node(rev)
141 return self.node(rev)
135 except ValueError:
142 except ValueError:
136 c = []
143 c = []
137 for n in self.nodemap:
144 for n in self.nodemap:
138 if id in hex(n):
145 if id in hex(n):
139 c.append(n)
146 c.append(n)
140 if len(c) > 1: raise KeyError("Ambiguous identifier")
147 if len(c) > 1: raise KeyError("Ambiguous identifier")
141 if len(c) < 1: raise KeyError("No match found")
148 if len(c) < 1: raise KeyError("No match found")
142 return c[0]
149 return c[0]
143
150
144 return None
151 return None
145
152
146 def diff(self, a, b):
153 def diff(self, a, b):
147 return mdiff.textdiff(a, b)
154 return mdiff.textdiff(a, b)
148
155
149 def patches(self, t, pl):
156 def patches(self, t, pl):
150 return mdiff.patches(t, pl)
157 return mdiff.patches(t, pl)
151
158
152 def revision(self, node):
159 def revision(self, node):
153 if node == nullid: return ""
160 if node == nullid: return ""
154 if self.cache and self.cache[0] == node: return self.cache[2]
161 if self.cache and self.cache[0] == node: return self.cache[2]
155
162
156 text = None
163 text = None
157 rev = self.rev(node)
164 rev = self.rev(node)
158 base = self.base(rev)
165 base = self.base(rev)
159 start = self.start(base)
166 start = self.start(base)
160 end = self.end(rev)
167 end = self.end(rev)
161
168
162 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
169 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
163 base = self.cache[1]
170 base = self.cache[1]
164 start = self.start(base + 1)
171 start = self.start(base + 1)
165 text = self.cache[2]
172 text = self.cache[2]
166 last = 0
173 last = 0
167
174
168 f = self.opener(self.datafile)
175 f = self.opener(self.datafile)
169 f.seek(start)
176 f.seek(start)
170 data = f.read(end - start)
177 data = f.read(end - start)
171
178
172 if not text:
179 if not text:
173 last = self.length(base)
180 last = self.length(base)
174 text = decompress(data[:last])
181 text = decompress(data[:last])
175
182
176 bins = []
183 bins = []
177 for r in xrange(base + 1, rev + 1):
184 for r in xrange(base + 1, rev + 1):
178 s = self.length(r)
185 s = self.length(r)
179 bins.append(decompress(data[last:last + s]))
186 bins.append(decompress(data[last:last + s]))
180 last = last + s
187 last = last + s
181
188
182 text = mdiff.patches(text, bins)
189 text = mdiff.patches(text, bins)
183
190
184 (p1, p2) = self.parents(node)
191 (p1, p2) = self.parents(node)
185 if node != hash(text, p1, p2):
192 if node != hash(text, p1, p2):
186 raise "integrity check failed on %s:%d" % (self.datafile, rev)
193 raise "integrity check failed on %s:%d" % (self.datafile, rev)
187
194
188 self.cache = (node, rev, text)
195 self.cache = (node, rev, text)
189 return text
196 return text
190
197
191 def addrevision(self, text, transaction, link, p1=None, p2=None):
198 def addrevision(self, text, transaction, link, p1=None, p2=None):
192 if text is None: text = ""
199 if text is None: text = ""
193 if p1 is None: p1 = self.tip()
200 if p1 is None: p1 = self.tip()
194 if p2 is None: p2 = nullid
201 if p2 is None: p2 = nullid
195
202
196 node = hash(text, p1, p2)
203 node = hash(text, p1, p2)
197
204
198 n = self.count()
205 n = self.count()
199 t = n - 1
206 t = n - 1
200
207
201 if n:
208 if n:
202 base = self.base(t)
209 base = self.base(t)
203 start = self.start(base)
210 start = self.start(base)
204 end = self.end(t)
211 end = self.end(t)
205 prev = self.revision(self.tip())
212 prev = self.revision(self.tip())
206 data = compress(self.diff(prev, text))
213 data = compress(self.diff(prev, text))
207 dist = end - start + len(data)
214 dist = end - start + len(data)
208
215
209 # full versions are inserted when the needed deltas
216 # full versions are inserted when the needed deltas
210 # become comparable to the uncompressed text
217 # become comparable to the uncompressed text
211 if not n or dist > len(text) * 2:
218 if not n or dist > len(text) * 2:
212 data = compress(text)
219 data = compress(text)
213 base = n
220 base = n
214 else:
221 else:
215 base = self.base(t)
222 base = self.base(t)
216
223
217 offset = 0
224 offset = 0
218 if t >= 0:
225 if t >= 0:
219 offset = self.end(t)
226 offset = self.end(t)
220
227
221 e = (offset, len(data), base, link, p1, p2, node)
228 e = (offset, len(data), base, link, p1, p2, node)
222
229
223 self.index.append(e)
230 self.index.append(e)
224 self.nodemap[node] = n
231 self.nodemap[node] = n
225 entry = struct.pack(indexformat, *e)
232 entry = struct.pack(indexformat, *e)
226
233
227 transaction.add(self.datafile, e[0])
234 transaction.add(self.datafile, e[0])
228 self.opener(self.datafile, "a").write(data)
235 self.opener(self.datafile, "a").write(data)
229 transaction.add(self.indexfile, n * len(entry))
236 transaction.add(self.indexfile, n * len(entry))
230 self.opener(self.indexfile, "a").write(entry)
237 self.opener(self.indexfile, "a").write(entry)
231
238
232 self.cache = (node, n, text)
239 self.cache = (node, n, text)
233 return node
240 return node
234
241
235 def ancestor(self, a, b):
242 def ancestor(self, a, b):
236 def expand(list, map):
243 def expand(list, map):
237 a = []
244 a = []
238 while list:
245 while list:
239 n = list.pop(0)
246 n = list.pop(0)
240 map[n] = 1
247 map[n] = 1
241 yield n
248 yield n
242 for p in self.parents(n):
249 for p in self.parents(n):
243 if p != nullid and p not in map:
250 if p != nullid and p not in map:
244 list.append(p)
251 list.append(p)
245 yield nullid
252 yield nullid
246
253
247 amap = {}
254 amap = {}
248 bmap = {}
255 bmap = {}
249 ag = expand([a], amap)
256 ag = expand([a], amap)
250 bg = expand([b], bmap)
257 bg = expand([b], bmap)
251 adone = bdone = 0
258 adone = bdone = 0
252
259
253 while not adone or not bdone:
260 while not adone or not bdone:
254 if not adone:
261 if not adone:
255 an = ag.next()
262 an = ag.next()
256 if an == nullid:
263 if an == nullid:
257 adone = 1
264 adone = 1
258 elif an in bmap:
265 elif an in bmap:
259 return an
266 return an
260 if not bdone:
267 if not bdone:
261 bn = bg.next()
268 bn = bg.next()
262 if bn == nullid:
269 if bn == nullid:
263 bdone = 1
270 bdone = 1
264 elif bn in amap:
271 elif bn in amap:
265 return bn
272 return bn
266
273
267 return nullid
274 return nullid
268
275
269 def group(self, linkmap):
276 def group(self, linkmap):
270 # given a list of changeset revs, return a set of deltas and
277 # given a list of changeset revs, return a set of deltas and
271 # metadata corresponding to nodes. the first delta is
278 # metadata corresponding to nodes. the first delta is
272 # parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
279 # parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
273 # have this parent as it has all history before these
280 # have this parent as it has all history before these
274 # changesets. parent is parent[0]
281 # changesets. parent is parent[0]
275
282
276 revs = []
283 revs = []
277 needed = {}
284 needed = {}
278
285
279 # find file nodes/revs that match changeset revs
286 # find file nodes/revs that match changeset revs
280 for i in xrange(0, self.count()):
287 for i in xrange(0, self.count()):
281 if self.index[i][3] in linkmap:
288 if self.index[i][3] in linkmap:
282 revs.append(i)
289 revs.append(i)
283 needed[i] = 1
290 needed[i] = 1
284
291
285 # if we don't have any revisions touched by these changesets, bail
292 # if we don't have any revisions touched by these changesets, bail
286 if not revs: return struct.pack(">l", 0)
293 if not revs: return struct.pack(">l", 0)
287
294
288 # add the parent of the first rev
295 # add the parent of the first rev
289 p = self.parents(self.node(revs[0]))[0]
296 p = self.parents(self.node(revs[0]))[0]
290 revs.insert(0, self.rev(p))
297 revs.insert(0, self.rev(p))
291
298
292 # for each delta that isn't contiguous in the log, we need to
299 # for each delta that isn't contiguous in the log, we need to
293 # reconstruct the base, reconstruct the result, and then
300 # reconstruct the base, reconstruct the result, and then
294 # calculate the delta. We also need to do this where we've
301 # calculate the delta. We also need to do this where we've
295 # stored a full version and not a delta
302 # stored a full version and not a delta
296 for i in xrange(0, len(revs) - 1):
303 for i in xrange(0, len(revs) - 1):
297 a, b = revs[i], revs[i + 1]
304 a, b = revs[i], revs[i + 1]
298 if a + 1 != b or self.base(b) == b:
305 if a + 1 != b or self.base(b) == b:
299 for j in xrange(self.base(a), a + 1):
306 for j in xrange(self.base(a), a + 1):
300 needed[j] = 1
307 needed[j] = 1
301 for j in xrange(self.base(b), b + 1):
308 for j in xrange(self.base(b), b + 1):
302 needed[j] = 1
309 needed[j] = 1
303
310
304 # calculate spans to retrieve from datafile
311 # calculate spans to retrieve from datafile
305 needed = needed.keys()
312 needed = needed.keys()
306 needed.sort()
313 needed.sort()
307 spans = []
314 spans = []
308 for n in needed:
315 for n in needed:
309 if n < 0: continue
316 if n < 0: continue
310 o = self.start(n)
317 o = self.start(n)
311 l = self.length(n)
318 l = self.length(n)
312 spans.append((o, l, [(n, l)]))
319 spans.append((o, l, [(n, l)]))
313
320
314 # merge spans
321 # merge spans
315 merge = [spans.pop(0)]
322 merge = [spans.pop(0)]
316 while spans:
323 while spans:
317 e = spans.pop(0)
324 e = spans.pop(0)
318 f = merge[-1]
325 f = merge[-1]
319 if e[0] == f[0] + f[1]:
326 if e[0] == f[0] + f[1]:
320 merge[-1] = (f[0], f[1] + e[1], f[2] + e[2])
327 merge[-1] = (f[0], f[1] + e[1], f[2] + e[2])
321 else:
328 else:
322 merge.append(e)
329 merge.append(e)
323
330
324 # read spans in, divide up chunks
331 # read spans in, divide up chunks
325 chunks = {}
332 chunks = {}
326 for span in merge:
333 for span in merge:
327 # we reopen the file for each span to make http happy for now
334 # we reopen the file for each span to make http happy for now
328 f = self.opener(self.datafile)
335 f = self.opener(self.datafile)
329 f.seek(span[0])
336 f.seek(span[0])
330 data = f.read(span[1])
337 data = f.read(span[1])
331
338
332 # divide up the span
339 # divide up the span
333 pos = 0
340 pos = 0
334 for r, l in span[2]:
341 for r, l in span[2]:
335 chunks[r] = data[pos: pos + l]
342 chunks[r] = data[pos: pos + l]
336 pos += l
343 pos += l
337
344
338 # helper to reconstruct intermediate versions
345 # helper to reconstruct intermediate versions
339 def construct(text, base, rev):
346 def construct(text, base, rev):
340 bins = [decompress(chunks[r]) for r in xrange(base + 1, rev + 1)]
347 bins = [decompress(chunks[r]) for r in xrange(base + 1, rev + 1)]
341 return mdiff.patches(text, bins)
348 return mdiff.patches(text, bins)
342
349
343 # build deltas
350 # build deltas
344 deltas = []
351 deltas = []
345 for d in xrange(0, len(revs) - 1):
352 for d in xrange(0, len(revs) - 1):
346 a, b = revs[d], revs[d + 1]
353 a, b = revs[d], revs[d + 1]
347 n = self.node(b)
354 n = self.node(b)
348
355
349 if a + 1 != b or self.base(b) == b:
356 if a + 1 != b or self.base(b) == b:
350 if a >= 0:
357 if a >= 0:
351 base = self.base(a)
358 base = self.base(a)
352 ta = decompress(chunks[self.base(a)])
359 ta = decompress(chunks[self.base(a)])
353 ta = construct(ta, base, a)
360 ta = construct(ta, base, a)
354 else:
361 else:
355 ta = ""
362 ta = ""
356
363
357 base = self.base(b)
364 base = self.base(b)
358 if a > base:
365 if a > base:
359 base = a
366 base = a
360 tb = ta
367 tb = ta
361 else:
368 else:
362 tb = decompress(chunks[self.base(b)])
369 tb = decompress(chunks[self.base(b)])
363 tb = construct(tb, base, b)
370 tb = construct(tb, base, b)
364 d = self.diff(ta, tb)
371 d = self.diff(ta, tb)
365 else:
372 else:
366 d = decompress(chunks[b])
373 d = decompress(chunks[b])
367
374
368 p = self.parents(n)
375 p = self.parents(n)
369 meta = n + p[0] + p[1] + linkmap[self.linkrev(n)]
376 meta = n + p[0] + p[1] + linkmap[self.linkrev(n)]
370 l = struct.pack(">l", len(meta) + len(d) + 4)
377 l = struct.pack(">l", len(meta) + len(d) + 4)
371 deltas.append(l + meta + d)
378 deltas.append(l + meta + d)
372
379
373 l = struct.pack(">l", sum(map(len, deltas)) + 4)
380 l = struct.pack(">l", sum(map(len, deltas)) + 4)
374 deltas.insert(0, l)
381 deltas.insert(0, l)
375 return "".join(deltas)
382 return "".join(deltas)
376
383
377 def addgroup(self, data, linkmapper, transaction):
384 def addgroup(self, data, linkmapper, transaction):
378 # given a set of deltas, add them to the revision log. the
385 # given a set of deltas, add them to the revision log. the
379 # first delta is against its parent, which should be in our
386 # first delta is against its parent, which should be in our
380 # log, the rest are against the previous delta.
387 # log, the rest are against the previous delta.
381
388
382 if not data: return self.tip()
389 if not data: return self.tip()
383
390
384 # retrieve the parent revision of the delta chain
391 # retrieve the parent revision of the delta chain
385 chain = data[24:44]
392 chain = data[24:44]
386 if not chain in self.nodemap:
393 if not chain in self.nodemap:
387 raise "unknown base %s" % short(chain[:4])
394 raise "unknown base %s" % short(chain[:4])
388
395
389 # track the base of the current delta log
396 # track the base of the current delta log
390 r = self.count()
397 r = self.count()
391 t = r - 1
398 t = r - 1
392
399
393 base = prev = -1
400 base = prev = -1
394 start = end = 0
401 start = end = 0
395 if r:
402 if r:
396 start = self.start(self.base(t))
403 start = self.start(self.base(t))
397 end = self.end(t)
404 end = self.end(t)
398 measure = self.length(self.base(t))
405 measure = self.length(self.base(t))
399 base = self.base(t)
406 base = self.base(t)
400 prev = self.tip()
407 prev = self.tip()
401
408
402 transaction.add(self.datafile, end)
409 transaction.add(self.datafile, end)
403 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
410 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
404 dfh = self.opener(self.datafile, "a")
411 dfh = self.opener(self.datafile, "a")
405 ifh = self.opener(self.indexfile, "a")
412 ifh = self.opener(self.indexfile, "a")
406
413
407 # loop through our set of deltas
414 # loop through our set of deltas
408 pos = 0
415 pos = 0
409 while pos < len(data):
416 while pos < len(data):
410 l, node, p1, p2, cs = struct.unpack(">l20s20s20s20s",
417 l, node, p1, p2, cs = struct.unpack(">l20s20s20s20s",
411 data[pos:pos+84])
418 data[pos:pos+84])
412 link = linkmapper(cs)
419 link = linkmapper(cs)
413 if node in self.nodemap:
420 if node in self.nodemap:
414 raise "already have %s" % hex(node[:4])
421 raise "already have %s" % hex(node[:4])
415 delta = data[pos + 84:pos + l]
422 delta = data[pos + 84:pos + l]
416 pos += l
423 pos += l
417
424
418 # full versions are inserted when the needed deltas become
425 # full versions are inserted when the needed deltas become
419 # comparable to the uncompressed text or when the previous
426 # comparable to the uncompressed text or when the previous
420 # version is not the one we have a delta against. We use
427 # version is not the one we have a delta against. We use
421 # the size of the previous full rev as a proxy for the
428 # the size of the previous full rev as a proxy for the
422 # current size.
429 # current size.
423
430
424 if chain == prev:
431 if chain == prev:
425 cdelta = compress(delta)
432 cdelta = compress(delta)
426
433
427 if chain != prev or (end - start + len(cdelta)) > measure * 2:
434 if chain != prev or (end - start + len(cdelta)) > measure * 2:
428 # flush our writes here so we can read it in revision
435 # flush our writes here so we can read it in revision
429 dfh.flush()
436 dfh.flush()
430 ifh.flush()
437 ifh.flush()
431 text = self.revision(chain)
438 text = self.revision(chain)
432 text = self.patches(text, [delta])
439 text = self.patches(text, [delta])
433 chk = self.addrevision(text, transaction, link, p1, p2)
440 chk = self.addrevision(text, transaction, link, p1, p2)
434 if chk != node:
441 if chk != node:
435 raise "consistency error adding group"
442 raise "consistency error adding group"
436 measure = len(text)
443 measure = len(text)
437 else:
444 else:
438 e = (end, len(cdelta), self.base(t), link, p1, p2, node)
445 e = (end, len(cdelta), self.base(t), link, p1, p2, node)
439 self.index.append(e)
446 self.index.append(e)
440 self.nodemap[node] = r
447 self.nodemap[node] = r
441 dfh.write(cdelta)
448 dfh.write(cdelta)
442 ifh.write(struct.pack(indexformat, *e))
449 ifh.write(struct.pack(indexformat, *e))
443
450
444 t, r, chain, prev = r, r + 1, node, node
451 t, r, chain, prev = r, r + 1, node, node
445 start = self.start(self.base(t))
452 start = self.start(self.base(t))
446 end = self.end(t)
453 end = self.end(t)
447
454
448 dfh.close()
455 dfh.close()
449 ifh.close()
456 ifh.close()
450 return node
457 return node
General Comments 0
You need to be logged in to leave comments. Login now