##// END OF EJS Templates
Make lazyindex load slightly faster
mpm@selenic.com -
r115:39b438ee default
parent child Browse files
Show More
@@ -1,474 +1,473 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # This provides efficient delta storage with O(1) retrieve and append
3 # This provides efficient delta storage with O(1) retrieve and append
4 # and O(changes) merge between branches
4 # and O(changes) merge between branches
5 #
5 #
6 # Copyright 2005 Matt Mackall <mpm@selenic.com>
6 # Copyright 2005 Matt Mackall <mpm@selenic.com>
7 #
7 #
8 # This software may be used and distributed according to the terms
8 # This software may be used and distributed according to the terms
9 # of the GNU General Public License, incorporated herein by reference.
9 # of the GNU General Public License, incorporated herein by reference.
10
10
11 import zlib, struct, sha, os, tempfile, binascii
11 import zlib, struct, sha, os, tempfile, binascii
12 from mercurial import mdiff
12 from mercurial import mdiff
13
13
14 def hex(node): return binascii.hexlify(node)
14 def hex(node): return binascii.hexlify(node)
15 def bin(node): return binascii.unhexlify(node)
15 def bin(node): return binascii.unhexlify(node)
16 def short(node): return hex(node[:4])
16 def short(node): return hex(node[:4])
17
17
18 def compress(text):
18 def compress(text):
19 if not text: return text
19 if not text: return text
20 if len(text) < 44:
20 if len(text) < 44:
21 if text[0] == '\0': return text
21 if text[0] == '\0': return text
22 return 'u' + text
22 return 'u' + text
23 bin = zlib.compress(text)
23 bin = zlib.compress(text)
24 if len(bin) > len(text):
24 if len(bin) > len(text):
25 if text[0] == '\0': return text
25 if text[0] == '\0': return text
26 return 'u' + text
26 return 'u' + text
27 return bin
27 return bin
28
28
29 def decompress(bin):
29 def decompress(bin):
30 if not bin: return bin
30 if not bin: return bin
31 t = bin[0]
31 t = bin[0]
32 if t == '\0': return bin
32 if t == '\0': return bin
33 if t == 'x': return zlib.decompress(bin)
33 if t == 'x': return zlib.decompress(bin)
34 if t == 'u': return bin[1:]
34 if t == 'u': return bin[1:]
35 raise "unknown compression type %s" % t
35 raise "unknown compression type %s" % t
36
36
37 def hash(text, p1, p2):
37 def hash(text, p1, p2):
38 l = [p1, p2]
38 l = [p1, p2]
39 l.sort()
39 l.sort()
40 return sha.sha(l[0] + l[1] + text).digest()
40 return sha.sha(l[0] + l[1] + text).digest()
41
41
42 nullid = "\0" * 20
42 nullid = "\0" * 20
43 indexformat = ">4l20s20s20s"
43 indexformat = ">4l20s20s20s"
44
44
45 class lazyparser:
45 class lazyparser:
46 def __init__(self, data):
46 def __init__(self, data):
47 self.data = data
47 self.data = data
48 self.s = struct.calcsize(indexformat)
48 self.s = struct.calcsize(indexformat)
49 self.l = len(data)/self.s
49 self.l = len(data)/self.s
50 self.index = [None] * self.l
50 self.index = [None] * self.l
51 self.map = {nullid: -1}
51 self.map = {nullid: -1}
52
52
53 if 0:
53 if 0:
54 n = 0
54 n = 0
55 i = self.data
55 i = self.data
56 s = struct.calcsize(indexformat)
56 s = struct.calcsize(indexformat)
57 for f in xrange(0, len(i), s):
57 for f in xrange(0, len(i), s):
58 # offset, size, base, linkrev, p1, p2, nodeid
58 # offset, size, base, linkrev, p1, p2, nodeid
59 e = struct.unpack(indexformat, i[f:f + s])
59 e = struct.unpack(indexformat, i[f:f + s])
60 self.map[e[6]] = n
60 self.map[e[6]] = n
61 self.index.append(e)
61 self.index.append(e)
62 n += 1
62 n += 1
63
63
64 def load(self, pos):
64 def load(self, pos):
65 block = pos / 1000
65 block = pos / 1000
66 i = block * 1000
66 i = block * 1000
67 end = min(self.l, i + 1000)
67 end = min(self.l, i + 1000)
68 while i < end:
68 while i < end:
69 d = self.data[i * self.s: (i + 1) * self.s]
69 d = self.data[i * self.s: (i + 1) * self.s]
70 e = struct.unpack(indexformat, d)
70 e = struct.unpack(indexformat, d)
71 self.index[i] = e
71 self.index[i] = e
72 self.map[e[6]] = i
72 self.map[e[6]] = i
73 i += 1
73 i += 1
74
74
75 class lazyindex:
75 class lazyindex:
76 def __init__(self, parser):
76 def __init__(self, parser):
77 self.p = parser
77 self.p = parser
78 def __len__(self):
78 def __len__(self):
79 return len(self.p.index)
79 return len(self.p.index)
80 def __getitem__(self, pos):
80 def load(self, pos):
81 i = self.p.index[pos]
82 if not i:
83 self.p.load(pos)
81 self.p.load(pos)
84 return self.p.index[pos]
82 return self.p.index[pos]
85 return i
83 def __getitem__(self, pos):
84 return self.p.index[pos] or self.load(pos)
86 def append(self, e):
85 def append(self, e):
87 self.p.index.append(e)
86 self.p.index.append(e)
88
87
89 class lazymap:
88 class lazymap:
90 def __init__(self, parser):
89 def __init__(self, parser):
91 self.p = parser
90 self.p = parser
92 def load(self, key):
91 def load(self, key):
93 n = self.p.data.find(key)
92 n = self.p.data.find(key)
94 if n < 0: raise KeyError("node " + hex(key))
93 if n < 0: raise KeyError("node " + hex(key))
95 pos = n / self.p.s
94 pos = n / self.p.s
96 self.p.load(pos)
95 self.p.load(pos)
97 def __contains__(self, key):
96 def __contains__(self, key):
98 try:
97 try:
99 self[key]
98 self[key]
100 return True
99 return True
101 except KeyError:
100 except KeyError:
102 return False
101 return False
103 def __iter__(self):
102 def __iter__(self):
104 for i in xrange(self.p.l):
103 for i in xrange(self.p.l):
105 try:
104 try:
106 yield self.p.index[i][6]
105 yield self.p.index[i][6]
107 except:
106 except:
108 self.p.load(i)
107 self.p.load(i)
109 yield self.p.index[i][6]
108 yield self.p.index[i][6]
110 def __getitem__(self, key):
109 def __getitem__(self, key):
111 try:
110 try:
112 return self.p.map[key]
111 return self.p.map[key]
113 except KeyError:
112 except KeyError:
114 try:
113 try:
115 self.load(key)
114 self.load(key)
116 return self.p.map[key]
115 return self.p.map[key]
117 except KeyError:
116 except KeyError:
118 raise KeyError("node " + hex(key))
117 raise KeyError("node " + hex(key))
119 def __setitem__(self, key, val):
118 def __setitem__(self, key, val):
120 self.p.map[key] = val
119 self.p.map[key] = val
121
120
122 class revlog:
121 class revlog:
123 def __init__(self, opener, indexfile, datafile):
122 def __init__(self, opener, indexfile, datafile):
124 self.indexfile = indexfile
123 self.indexfile = indexfile
125 self.datafile = datafile
124 self.datafile = datafile
126 self.opener = opener
125 self.opener = opener
127 self.cache = None
126 self.cache = None
128 # read the whole index for now, handle on-demand later
127 # read the whole index for now, handle on-demand later
129 try:
128 try:
130 i = self.opener(self.indexfile).read()
129 i = self.opener(self.indexfile).read()
131 except IOError:
130 except IOError:
132 i = ""
131 i = ""
133 parser = lazyparser(i)
132 parser = lazyparser(i)
134 self.index = lazyindex(parser)
133 self.index = lazyindex(parser)
135 self.nodemap = lazymap(parser)
134 self.nodemap = lazymap(parser)
136
135
137 def tip(self): return self.node(len(self.index) - 1)
136 def tip(self): return self.node(len(self.index) - 1)
138 def count(self): return len(self.index)
137 def count(self): return len(self.index)
139 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
138 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
140 def rev(self, node): return self.nodemap[node]
139 def rev(self, node): return self.nodemap[node]
141 def linkrev(self, node): return self.index[self.nodemap[node]][3]
140 def linkrev(self, node): return self.index[self.nodemap[node]][3]
142 def parents(self, node):
141 def parents(self, node):
143 if node == nullid: return (nullid, nullid)
142 if node == nullid: return (nullid, nullid)
144 return self.index[self.nodemap[node]][4:6]
143 return self.index[self.nodemap[node]][4:6]
145
144
146 def start(self, rev): return self.index[rev][0]
145 def start(self, rev): return self.index[rev][0]
147 def length(self, rev): return self.index[rev][1]
146 def length(self, rev): return self.index[rev][1]
148 def end(self, rev): return self.start(rev) + self.length(rev)
147 def end(self, rev): return self.start(rev) + self.length(rev)
149 def base(self, rev): return self.index[rev][2]
148 def base(self, rev): return self.index[rev][2]
150
149
151 def lookup(self, id):
150 def lookup(self, id):
152 try:
151 try:
153 rev = int(id)
152 rev = int(id)
154 return self.node(rev)
153 return self.node(rev)
155 except ValueError:
154 except ValueError:
156 c = []
155 c = []
157 for n in self.nodemap:
156 for n in self.nodemap:
158 if id in hex(n):
157 if id in hex(n):
159 c.append(n)
158 c.append(n)
160 if len(c) > 1: raise KeyError("Ambiguous identifier")
159 if len(c) > 1: raise KeyError("Ambiguous identifier")
161 if len(c) < 1: raise KeyError("No match found")
160 if len(c) < 1: raise KeyError("No match found")
162 return c[0]
161 return c[0]
163
162
164 return None
163 return None
165
164
166 def diff(self, a, b):
165 def diff(self, a, b):
167 return mdiff.textdiff(a, b)
166 return mdiff.textdiff(a, b)
168
167
169 def patches(self, t, pl):
168 def patches(self, t, pl):
170 return mdiff.patches(t, pl)
169 return mdiff.patches(t, pl)
171
170
172 def revision(self, node):
171 def revision(self, node):
173 if node == nullid: return ""
172 if node == nullid: return ""
174 if self.cache and self.cache[0] == node: return self.cache[2]
173 if self.cache and self.cache[0] == node: return self.cache[2]
175
174
176 text = None
175 text = None
177 rev = self.rev(node)
176 rev = self.rev(node)
178 base = self.base(rev)
177 base = self.base(rev)
179 start = self.start(base)
178 start = self.start(base)
180 end = self.end(rev)
179 end = self.end(rev)
181
180
182 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
181 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
183 base = self.cache[1]
182 base = self.cache[1]
184 start = self.start(base + 1)
183 start = self.start(base + 1)
185 text = self.cache[2]
184 text = self.cache[2]
186 last = 0
185 last = 0
187
186
188 f = self.opener(self.datafile)
187 f = self.opener(self.datafile)
189 f.seek(start)
188 f.seek(start)
190 data = f.read(end - start)
189 data = f.read(end - start)
191
190
192 if not text:
191 if not text:
193 last = self.length(base)
192 last = self.length(base)
194 text = decompress(data[:last])
193 text = decompress(data[:last])
195
194
196 bins = []
195 bins = []
197 for r in xrange(base + 1, rev + 1):
196 for r in xrange(base + 1, rev + 1):
198 s = self.length(r)
197 s = self.length(r)
199 bins.append(decompress(data[last:last + s]))
198 bins.append(decompress(data[last:last + s]))
200 last = last + s
199 last = last + s
201
200
202 text = mdiff.patches(text, bins)
201 text = mdiff.patches(text, bins)
203
202
204 (p1, p2) = self.parents(node)
203 (p1, p2) = self.parents(node)
205 if node != hash(text, p1, p2):
204 if node != hash(text, p1, p2):
206 raise IOError("integrity check failed on %s:%d"
205 raise IOError("integrity check failed on %s:%d"
207 % (self.datafile, rev))
206 % (self.datafile, rev))
208
207
209 self.cache = (node, rev, text)
208 self.cache = (node, rev, text)
210 return text
209 return text
211
210
212 def addrevision(self, text, transaction, link, p1=None, p2=None):
211 def addrevision(self, text, transaction, link, p1=None, p2=None):
213 if text is None: text = ""
212 if text is None: text = ""
214 if p1 is None: p1 = self.tip()
213 if p1 is None: p1 = self.tip()
215 if p2 is None: p2 = nullid
214 if p2 is None: p2 = nullid
216
215
217 node = hash(text, p1, p2)
216 node = hash(text, p1, p2)
218
217
219 n = self.count()
218 n = self.count()
220 t = n - 1
219 t = n - 1
221
220
222 if n:
221 if n:
223 base = self.base(t)
222 base = self.base(t)
224 start = self.start(base)
223 start = self.start(base)
225 end = self.end(t)
224 end = self.end(t)
226 prev = self.revision(self.tip())
225 prev = self.revision(self.tip())
227 d = self.diff(prev, text)
226 d = self.diff(prev, text)
228 if self.patches(prev, [d]) != text:
227 if self.patches(prev, [d]) != text:
229 raise AssertionError("diff failed")
228 raise AssertionError("diff failed")
230 data = compress(d)
229 data = compress(d)
231 dist = end - start + len(data)
230 dist = end - start + len(data)
232
231
233 # full versions are inserted when the needed deltas
232 # full versions are inserted when the needed deltas
234 # become comparable to the uncompressed text
233 # become comparable to the uncompressed text
235 if not n or dist > len(text) * 2:
234 if not n or dist > len(text) * 2:
236 data = compress(text)
235 data = compress(text)
237 base = n
236 base = n
238 else:
237 else:
239 base = self.base(t)
238 base = self.base(t)
240
239
241 offset = 0
240 offset = 0
242 if t >= 0:
241 if t >= 0:
243 offset = self.end(t)
242 offset = self.end(t)
244
243
245 e = (offset, len(data), base, link, p1, p2, node)
244 e = (offset, len(data), base, link, p1, p2, node)
246
245
247 self.index.append(e)
246 self.index.append(e)
248 self.nodemap[node] = n
247 self.nodemap[node] = n
249 entry = struct.pack(indexformat, *e)
248 entry = struct.pack(indexformat, *e)
250
249
251 transaction.add(self.datafile, e[0])
250 transaction.add(self.datafile, e[0])
252 self.opener(self.datafile, "a").write(data)
251 self.opener(self.datafile, "a").write(data)
253 transaction.add(self.indexfile, n * len(entry))
252 transaction.add(self.indexfile, n * len(entry))
254 self.opener(self.indexfile, "a").write(entry)
253 self.opener(self.indexfile, "a").write(entry)
255
254
256 self.cache = (node, n, text)
255 self.cache = (node, n, text)
257 return node
256 return node
258
257
259 def ancestor(self, a, b):
258 def ancestor(self, a, b):
260 def expand(list, map):
259 def expand(list, map):
261 a = []
260 a = []
262 while list:
261 while list:
263 n = list.pop(0)
262 n = list.pop(0)
264 map[n] = 1
263 map[n] = 1
265 yield n
264 yield n
266 for p in self.parents(n):
265 for p in self.parents(n):
267 if p != nullid and p not in map:
266 if p != nullid and p not in map:
268 list.append(p)
267 list.append(p)
269 yield nullid
268 yield nullid
270
269
271 amap = {}
270 amap = {}
272 bmap = {}
271 bmap = {}
273 ag = expand([a], amap)
272 ag = expand([a], amap)
274 bg = expand([b], bmap)
273 bg = expand([b], bmap)
275 adone = bdone = 0
274 adone = bdone = 0
276
275
277 while not adone or not bdone:
276 while not adone or not bdone:
278 if not adone:
277 if not adone:
279 an = ag.next()
278 an = ag.next()
280 if an == nullid:
279 if an == nullid:
281 adone = 1
280 adone = 1
282 elif an in bmap:
281 elif an in bmap:
283 return an
282 return an
284 if not bdone:
283 if not bdone:
285 bn = bg.next()
284 bn = bg.next()
286 if bn == nullid:
285 if bn == nullid:
287 bdone = 1
286 bdone = 1
288 elif bn in amap:
287 elif bn in amap:
289 return bn
288 return bn
290
289
291 return nullid
290 return nullid
292
291
293 def group(self, linkmap):
292 def group(self, linkmap):
294 # given a list of changeset revs, return a set of deltas and
293 # given a list of changeset revs, return a set of deltas and
295 # metadata corresponding to nodes. the first delta is
294 # metadata corresponding to nodes. the first delta is
296 # parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
295 # parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
297 # have this parent as it has all history before these
296 # have this parent as it has all history before these
298 # changesets. parent is parent[0]
297 # changesets. parent is parent[0]
299
298
300 revs = []
299 revs = []
301 needed = {}
300 needed = {}
302
301
303 # find file nodes/revs that match changeset revs
302 # find file nodes/revs that match changeset revs
304 for i in xrange(0, self.count()):
303 for i in xrange(0, self.count()):
305 if self.index[i][3] in linkmap:
304 if self.index[i][3] in linkmap:
306 revs.append(i)
305 revs.append(i)
307 needed[i] = 1
306 needed[i] = 1
308
307
309 # if we don't have any revisions touched by these changesets, bail
308 # if we don't have any revisions touched by these changesets, bail
310 if not revs: return struct.pack(">l", 0)
309 if not revs: return struct.pack(">l", 0)
311
310
312 # add the parent of the first rev
311 # add the parent of the first rev
313 p = self.parents(self.node(revs[0]))[0]
312 p = self.parents(self.node(revs[0]))[0]
314 revs.insert(0, self.rev(p))
313 revs.insert(0, self.rev(p))
315
314
316 # for each delta that isn't contiguous in the log, we need to
315 # for each delta that isn't contiguous in the log, we need to
317 # reconstruct the base, reconstruct the result, and then
316 # reconstruct the base, reconstruct the result, and then
318 # calculate the delta. We also need to do this where we've
317 # calculate the delta. We also need to do this where we've
319 # stored a full version and not a delta
318 # stored a full version and not a delta
320 for i in xrange(0, len(revs) - 1):
319 for i in xrange(0, len(revs) - 1):
321 a, b = revs[i], revs[i + 1]
320 a, b = revs[i], revs[i + 1]
322 if a + 1 != b or self.base(b) == b:
321 if a + 1 != b or self.base(b) == b:
323 for j in xrange(self.base(a), a + 1):
322 for j in xrange(self.base(a), a + 1):
324 needed[j] = 1
323 needed[j] = 1
325 for j in xrange(self.base(b), b + 1):
324 for j in xrange(self.base(b), b + 1):
326 needed[j] = 1
325 needed[j] = 1
327
326
328 # calculate spans to retrieve from datafile
327 # calculate spans to retrieve from datafile
329 needed = needed.keys()
328 needed = needed.keys()
330 needed.sort()
329 needed.sort()
331 spans = []
330 spans = []
332 for n in needed:
331 for n in needed:
333 if n < 0: continue
332 if n < 0: continue
334 o = self.start(n)
333 o = self.start(n)
335 l = self.length(n)
334 l = self.length(n)
336 spans.append((o, l, [(n, l)]))
335 spans.append((o, l, [(n, l)]))
337
336
338 # merge spans
337 # merge spans
339 merge = [spans.pop(0)]
338 merge = [spans.pop(0)]
340 while spans:
339 while spans:
341 e = spans.pop(0)
340 e = spans.pop(0)
342 f = merge[-1]
341 f = merge[-1]
343 if e[0] == f[0] + f[1]:
342 if e[0] == f[0] + f[1]:
344 merge[-1] = (f[0], f[1] + e[1], f[2] + e[2])
343 merge[-1] = (f[0], f[1] + e[1], f[2] + e[2])
345 else:
344 else:
346 merge.append(e)
345 merge.append(e)
347
346
348 # read spans in, divide up chunks
347 # read spans in, divide up chunks
349 chunks = {}
348 chunks = {}
350 for span in merge:
349 for span in merge:
351 # we reopen the file for each span to make http happy for now
350 # we reopen the file for each span to make http happy for now
352 f = self.opener(self.datafile)
351 f = self.opener(self.datafile)
353 f.seek(span[0])
352 f.seek(span[0])
354 data = f.read(span[1])
353 data = f.read(span[1])
355
354
356 # divide up the span
355 # divide up the span
357 pos = 0
356 pos = 0
358 for r, l in span[2]:
357 for r, l in span[2]:
359 chunks[r] = data[pos: pos + l]
358 chunks[r] = data[pos: pos + l]
360 pos += l
359 pos += l
361
360
362 # helper to reconstruct intermediate versions
361 # helper to reconstruct intermediate versions
363 def construct(text, base, rev):
362 def construct(text, base, rev):
364 bins = [decompress(chunks[r]) for r in xrange(base + 1, rev + 1)]
363 bins = [decompress(chunks[r]) for r in xrange(base + 1, rev + 1)]
365 return mdiff.patches(text, bins)
364 return mdiff.patches(text, bins)
366
365
367 # build deltas
366 # build deltas
368 deltas = []
367 deltas = []
369 for d in xrange(0, len(revs) - 1):
368 for d in xrange(0, len(revs) - 1):
370 a, b = revs[d], revs[d + 1]
369 a, b = revs[d], revs[d + 1]
371 n = self.node(b)
370 n = self.node(b)
372
371
373 if a + 1 != b or self.base(b) == b:
372 if a + 1 != b or self.base(b) == b:
374 if a >= 0:
373 if a >= 0:
375 base = self.base(a)
374 base = self.base(a)
376 ta = decompress(chunks[self.base(a)])
375 ta = decompress(chunks[self.base(a)])
377 ta = construct(ta, base, a)
376 ta = construct(ta, base, a)
378 else:
377 else:
379 ta = ""
378 ta = ""
380
379
381 base = self.base(b)
380 base = self.base(b)
382 if a > base:
381 if a > base:
383 base = a
382 base = a
384 tb = ta
383 tb = ta
385 else:
384 else:
386 tb = decompress(chunks[self.base(b)])
385 tb = decompress(chunks[self.base(b)])
387 tb = construct(tb, base, b)
386 tb = construct(tb, base, b)
388 d = self.diff(ta, tb)
387 d = self.diff(ta, tb)
389 else:
388 else:
390 d = decompress(chunks[b])
389 d = decompress(chunks[b])
391
390
392 p = self.parents(n)
391 p = self.parents(n)
393 meta = n + p[0] + p[1] + linkmap[self.linkrev(n)]
392 meta = n + p[0] + p[1] + linkmap[self.linkrev(n)]
394 l = struct.pack(">l", len(meta) + len(d) + 4)
393 l = struct.pack(">l", len(meta) + len(d) + 4)
395 deltas.append(l + meta + d)
394 deltas.append(l + meta + d)
396
395
397 l = struct.pack(">l", sum(map(len, deltas)) + 4)
396 l = struct.pack(">l", sum(map(len, deltas)) + 4)
398 deltas.insert(0, l)
397 deltas.insert(0, l)
399 return "".join(deltas)
398 return "".join(deltas)
400
399
401 def addgroup(self, data, linkmapper, transaction):
400 def addgroup(self, data, linkmapper, transaction):
402 # given a set of deltas, add them to the revision log. the
401 # given a set of deltas, add them to the revision log. the
403 # first delta is against its parent, which should be in our
402 # first delta is against its parent, which should be in our
404 # log, the rest are against the previous delta.
403 # log, the rest are against the previous delta.
405
404
406 if not data: return self.tip()
405 if not data: return self.tip()
407
406
408 # retrieve the parent revision of the delta chain
407 # retrieve the parent revision of the delta chain
409 chain = data[24:44]
408 chain = data[24:44]
410 if not chain in self.nodemap:
409 if not chain in self.nodemap:
411 raise "unknown base %s" % short(chain[:4])
410 raise "unknown base %s" % short(chain[:4])
412
411
413 # track the base of the current delta log
412 # track the base of the current delta log
414 r = self.count()
413 r = self.count()
415 t = r - 1
414 t = r - 1
416
415
417 base = prev = -1
416 base = prev = -1
418 start = end = 0
417 start = end = 0
419 if r:
418 if r:
420 start = self.start(self.base(t))
419 start = self.start(self.base(t))
421 end = self.end(t)
420 end = self.end(t)
422 measure = self.length(self.base(t))
421 measure = self.length(self.base(t))
423 base = self.base(t)
422 base = self.base(t)
424 prev = self.tip()
423 prev = self.tip()
425
424
426 transaction.add(self.datafile, end)
425 transaction.add(self.datafile, end)
427 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
426 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
428 dfh = self.opener(self.datafile, "a")
427 dfh = self.opener(self.datafile, "a")
429 ifh = self.opener(self.indexfile, "a")
428 ifh = self.opener(self.indexfile, "a")
430
429
431 # loop through our set of deltas
430 # loop through our set of deltas
432 pos = 0
431 pos = 0
433 while pos < len(data):
432 while pos < len(data):
434 l, node, p1, p2, cs = struct.unpack(">l20s20s20s20s",
433 l, node, p1, p2, cs = struct.unpack(">l20s20s20s20s",
435 data[pos:pos+84])
434 data[pos:pos+84])
436 link = linkmapper(cs)
435 link = linkmapper(cs)
437 if node in self.nodemap:
436 if node in self.nodemap:
438 raise "already have %s" % hex(node[:4])
437 raise "already have %s" % hex(node[:4])
439 delta = data[pos + 84:pos + l]
438 delta = data[pos + 84:pos + l]
440 pos += l
439 pos += l
441
440
442 # full versions are inserted when the needed deltas become
441 # full versions are inserted when the needed deltas become
443 # comparable to the uncompressed text or when the previous
442 # comparable to the uncompressed text or when the previous
444 # version is not the one we have a delta against. We use
443 # version is not the one we have a delta against. We use
445 # the size of the previous full rev as a proxy for the
444 # the size of the previous full rev as a proxy for the
446 # current size.
445 # current size.
447
446
448 if chain == prev:
447 if chain == prev:
449 cdelta = compress(delta)
448 cdelta = compress(delta)
450
449
451 if chain != prev or (end - start + len(cdelta)) > measure * 2:
450 if chain != prev or (end - start + len(cdelta)) > measure * 2:
452 # flush our writes here so we can read it in revision
451 # flush our writes here so we can read it in revision
453 dfh.flush()
452 dfh.flush()
454 ifh.flush()
453 ifh.flush()
455 text = self.revision(chain)
454 text = self.revision(chain)
456 text = self.patches(text, [delta])
455 text = self.patches(text, [delta])
457 chk = self.addrevision(text, transaction, link, p1, p2)
456 chk = self.addrevision(text, transaction, link, p1, p2)
458 if chk != node:
457 if chk != node:
459 raise "consistency error adding group"
458 raise "consistency error adding group"
460 measure = len(text)
459 measure = len(text)
461 else:
460 else:
462 e = (end, len(cdelta), self.base(t), link, p1, p2, node)
461 e = (end, len(cdelta), self.base(t), link, p1, p2, node)
463 self.index.append(e)
462 self.index.append(e)
464 self.nodemap[node] = r
463 self.nodemap[node] = r
465 dfh.write(cdelta)
464 dfh.write(cdelta)
466 ifh.write(struct.pack(indexformat, *e))
465 ifh.write(struct.pack(indexformat, *e))
467
466
468 t, r, chain, prev = r, r + 1, node, node
467 t, r, chain, prev = r, r + 1, node, node
469 start = self.start(self.base(t))
468 start = self.start(self.base(t))
470 end = self.end(t)
469 end = self.end(t)
471
470
472 dfh.close()
471 dfh.close()
473 ifh.close()
472 ifh.close()
474 return node
473 return node
General Comments 0
You need to be logged in to leave comments. Login now