##// END OF EJS Templates
Fix out of range regression...
Matt Mackall -
r655:b3bba126 default
parent child Browse files
Show More
@@ -1,544 +1,543 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # This provides efficient delta storage with O(1) retrieve and append
3 # This provides efficient delta storage with O(1) retrieve and append
4 # and O(changes) merge between branches
4 # and O(changes) merge between branches
5 #
5 #
6 # Copyright 2005 Matt Mackall <mpm@selenic.com>
6 # Copyright 2005 Matt Mackall <mpm@selenic.com>
7 #
7 #
8 # This software may be used and distributed according to the terms
8 # This software may be used and distributed according to the terms
9 # of the GNU General Public License, incorporated herein by reference.
9 # of the GNU General Public License, incorporated herein by reference.
10
10
11 import zlib, struct, sha, binascii, heapq
11 import zlib, struct, sha, binascii, heapq
12 from mercurial import mdiff
12 from mercurial import mdiff
13
13
14 def hex(node): return binascii.hexlify(node)
14 def hex(node): return binascii.hexlify(node)
15 def bin(node): return binascii.unhexlify(node)
15 def bin(node): return binascii.unhexlify(node)
16 def short(node): return hex(node[:6])
16 def short(node): return hex(node[:6])
17
17
18 def compress(text):
18 def compress(text):
19 if not text: return text
19 if not text: return text
20 if len(text) < 44:
20 if len(text) < 44:
21 if text[0] == '\0': return text
21 if text[0] == '\0': return text
22 return 'u' + text
22 return 'u' + text
23 bin = zlib.compress(text)
23 bin = zlib.compress(text)
24 if len(bin) > len(text):
24 if len(bin) > len(text):
25 if text[0] == '\0': return text
25 if text[0] == '\0': return text
26 return 'u' + text
26 return 'u' + text
27 return bin
27 return bin
28
28
29 def decompress(bin):
29 def decompress(bin):
30 if not bin: return bin
30 if not bin: return bin
31 t = bin[0]
31 t = bin[0]
32 if t == '\0': return bin
32 if t == '\0': return bin
33 if t == 'x': return zlib.decompress(bin)
33 if t == 'x': return zlib.decompress(bin)
34 if t == 'u': return bin[1:]
34 if t == 'u': return bin[1:]
35 raise "unknown compression type %s" % t
35 raise "unknown compression type %s" % t
36
36
37 def hash(text, p1, p2):
37 def hash(text, p1, p2):
38 l = [p1, p2]
38 l = [p1, p2]
39 l.sort()
39 l.sort()
40 s = sha.new(l[0])
40 s = sha.new(l[0])
41 s.update(l[1])
41 s.update(l[1])
42 s.update(text)
42 s.update(text)
43 return s.digest()
43 return s.digest()
44
44
45 nullid = "\0" * 20
45 nullid = "\0" * 20
46 indexformat = ">4l20s20s20s"
46 indexformat = ">4l20s20s20s"
47
47
48 class lazyparser:
48 class lazyparser:
49 def __init__(self, data, revlog):
49 def __init__(self, data, revlog):
50 self.data = data
50 self.data = data
51 self.s = struct.calcsize(indexformat)
51 self.s = struct.calcsize(indexformat)
52 self.l = len(data)/self.s
52 self.l = len(data)/self.s
53 self.index = [None] * self.l
53 self.index = [None] * self.l
54 self.map = {nullid: -1}
54 self.map = {nullid: -1}
55 self.all = 0
55 self.all = 0
56 self.revlog = revlog
56 self.revlog = revlog
57
57
58 def load(self, pos=None):
58 def load(self, pos=None):
59 if self.all: return
59 if self.all: return
60 if pos is not None:
60 if pos is not None:
61 block = pos / 1000
61 block = pos / 1000
62 i = block * 1000
62 i = block * 1000
63 end = min(self.l, i + 1000)
63 end = min(self.l, i + 1000)
64 else:
64 else:
65 self.all = 1
65 self.all = 1
66 i = 0
66 i = 0
67 end = self.l
67 end = self.l
68 self.revlog.index = self.index
68 self.revlog.index = self.index
69 self.revlog.nodemap = self.map
69 self.revlog.nodemap = self.map
70
70
71 while i < end:
71 while i < end:
72 d = self.data[i * self.s: (i + 1) * self.s]
72 d = self.data[i * self.s: (i + 1) * self.s]
73 e = struct.unpack(indexformat, d)
73 e = struct.unpack(indexformat, d)
74 self.index[i] = e
74 self.index[i] = e
75 self.map[e[6]] = i
75 self.map[e[6]] = i
76 i += 1
76 i += 1
77
77
78 class lazyindex:
78 class lazyindex:
79 def __init__(self, parser):
79 def __init__(self, parser):
80 self.p = parser
80 self.p = parser
81 def __len__(self):
81 def __len__(self):
82 return len(self.p.index)
82 return len(self.p.index)
83 def load(self, pos):
83 def load(self, pos):
84 self.p.load(pos)
84 self.p.load(pos)
85 return self.p.index[pos]
85 return self.p.index[pos]
86 def __getitem__(self, pos):
86 def __getitem__(self, pos):
87 return self.p.index[pos] or self.load(pos)
87 return self.p.index[pos] or self.load(pos)
88 def append(self, e):
88 def append(self, e):
89 self.p.index.append(e)
89 self.p.index.append(e)
90
90
91 class lazymap:
91 class lazymap:
92 def __init__(self, parser):
92 def __init__(self, parser):
93 self.p = parser
93 self.p = parser
94 def load(self, key):
94 def load(self, key):
95 if self.p.all: return
95 if self.p.all: return
96 n = self.p.data.find(key)
96 n = self.p.data.find(key)
97 if n < 0: raise KeyError("node " + hex(key))
97 if n < 0: raise KeyError("node " + hex(key))
98 pos = n / self.p.s
98 pos = n / self.p.s
99 self.p.load(pos)
99 self.p.load(pos)
100 def __contains__(self, key):
100 def __contains__(self, key):
101 self.p.load()
101 self.p.load()
102 return key in self.p.map
102 return key in self.p.map
103 def __iter__(self):
103 def __iter__(self):
104 yield nullid
104 yield nullid
105 for i in xrange(self.p.l):
105 for i in xrange(self.p.l):
106 try:
106 try:
107 yield self.p.index[i][6]
107 yield self.p.index[i][6]
108 except:
108 except:
109 self.p.load(i)
109 self.p.load(i)
110 yield self.p.index[i][6]
110 yield self.p.index[i][6]
111 def __getitem__(self, key):
111 def __getitem__(self, key):
112 try:
112 try:
113 return self.p.map[key]
113 return self.p.map[key]
114 except KeyError:
114 except KeyError:
115 try:
115 try:
116 self.load(key)
116 self.load(key)
117 return self.p.map[key]
117 return self.p.map[key]
118 except KeyError:
118 except KeyError:
119 raise KeyError("node " + hex(key))
119 raise KeyError("node " + hex(key))
120 def __setitem__(self, key, val):
120 def __setitem__(self, key, val):
121 self.p.map[key] = val
121 self.p.map[key] = val
122
122
123 class revlog:
123 class revlog:
124 def __init__(self, opener, indexfile, datafile):
124 def __init__(self, opener, indexfile, datafile):
125 self.indexfile = indexfile
125 self.indexfile = indexfile
126 self.datafile = datafile
126 self.datafile = datafile
127 self.opener = opener
127 self.opener = opener
128 self.cache = None
128 self.cache = None
129
129
130 try:
130 try:
131 i = self.opener(self.indexfile).read()
131 i = self.opener(self.indexfile).read()
132 except IOError:
132 except IOError:
133 i = ""
133 i = ""
134
134
135 if len(i) > 10000:
135 if len(i) > 10000:
136 # big index, let's parse it on demand
136 # big index, let's parse it on demand
137 parser = lazyparser(i, self)
137 parser = lazyparser(i, self)
138 self.index = lazyindex(parser)
138 self.index = lazyindex(parser)
139 self.nodemap = lazymap(parser)
139 self.nodemap = lazymap(parser)
140 else:
140 else:
141 s = struct.calcsize(indexformat)
141 s = struct.calcsize(indexformat)
142 l = len(i) / s
142 l = len(i) / s
143 self.index = [None] * l
143 self.index = [None] * l
144 m = [None] * l
144 m = [None] * l
145
145
146 n = 0
146 n = 0
147 for f in xrange(0, len(i), s):
147 for f in xrange(0, len(i), s):
148 # offset, size, base, linkrev, p1, p2, nodeid
148 # offset, size, base, linkrev, p1, p2, nodeid
149 e = struct.unpack(indexformat, i[f:f + s])
149 e = struct.unpack(indexformat, i[f:f + s])
150 m[n] = (e[6], n)
150 m[n] = (e[6], n)
151 self.index[n] = e
151 self.index[n] = e
152 n += 1
152 n += 1
153
153
154 self.nodemap = dict(m)
154 self.nodemap = dict(m)
155 self.nodemap[nullid] = -1
155 self.nodemap[nullid] = -1
156
156
157 def tip(self): return self.node(len(self.index) - 1)
157 def tip(self): return self.node(len(self.index) - 1)
158 def count(self): return len(self.index)
158 def count(self): return len(self.index)
159 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
159 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
160 def rev(self, node): return self.nodemap[node]
160 def rev(self, node): return self.nodemap[node]
161 def linkrev(self, node): return self.index[self.nodemap[node]][3]
161 def linkrev(self, node): return self.index[self.nodemap[node]][3]
162 def parents(self, node):
162 def parents(self, node):
163 if node == nullid: return (nullid, nullid)
163 if node == nullid: return (nullid, nullid)
164 return self.index[self.nodemap[node]][4:6]
164 return self.index[self.nodemap[node]][4:6]
165
165
166 def start(self, rev): return self.index[rev][0]
166 def start(self, rev): return self.index[rev][0]
167 def length(self, rev): return self.index[rev][1]
167 def length(self, rev): return self.index[rev][1]
168 def end(self, rev): return self.start(rev) + self.length(rev)
168 def end(self, rev): return self.start(rev) + self.length(rev)
169 def base(self, rev): return self.index[rev][2]
169 def base(self, rev): return self.index[rev][2]
170
170
171 def heads(self):
171 def heads(self):
172 p = {}
172 p = {}
173 h = []
173 h = []
174 for r in range(self.count() - 1, -1, -1):
174 for r in range(self.count() - 1, -1, -1):
175 n = self.node(r)
175 n = self.node(r)
176 if n not in p:
176 if n not in p:
177 h.append(n)
177 h.append(n)
178 for pn in self.parents(n):
178 for pn in self.parents(n):
179 p[pn] = 1
179 p[pn] = 1
180 return h
180 return h
181
181
182 def children(self, node):
182 def children(self, node):
183 c = []
183 c = []
184 p = self.rev(node)
184 p = self.rev(node)
185 for r in range(p + 1, self.count()):
185 for r in range(p + 1, self.count()):
186 n = self.node(r)
186 n = self.node(r)
187 for pn in self.parents(n):
187 for pn in self.parents(n):
188 if pn == p:
188 if pn == p:
189 c.append(p)
189 c.append(p)
190 continue
190 continue
191 elif pn == nullid:
191 elif pn == nullid:
192 continue
192 continue
193 return c
193 return c
194
194
195 def lookup(self, id):
195 def lookup(self, id):
196 try:
196 try:
197 rev = int(id)
197 rev = int(id)
198 if str(rev) != id: raise ValueError
198 if str(rev) != id: raise ValueError
199 if rev < 0: rev = self.count() + rev
199 if rev < 0: rev = self.count() + rev
200 if rev < 0 or rev >= self.count(): raise ValueError
200 if rev < 0 or rev >= self.count(): raise ValueError
201 return self.node(rev)
201 return self.node(rev)
202 except (ValueError, OverflowError):
202 except (ValueError, OverflowError):
203 c = []
203 c = []
204 for n in self.nodemap:
204 for n in self.nodemap:
205 if hex(n).startswith(id):
205 if hex(n).startswith(id):
206 c.append(n)
206 c.append(n)
207 if len(c) > 1: raise KeyError("Ambiguous identifier")
207 if len(c) > 1: raise KeyError("Ambiguous identifier")
208 if len(c) < 1: raise KeyError("No match found")
208 if len(c) < 1: raise KeyError("No match found")
209 return c[0]
209 return c[0]
210
210
211 return None
211 return None
212
212
213 def diff(self, a, b):
213 def diff(self, a, b):
214 return mdiff.textdiff(a, b)
214 return mdiff.textdiff(a, b)
215
215
216 def patches(self, t, pl):
216 def patches(self, t, pl):
217 return mdiff.patches(t, pl)
217 return mdiff.patches(t, pl)
218
218
219 def delta(self, node):
219 def delta(self, node):
220 r = self.rev(node)
220 r = self.rev(node)
221 b = self.base(r)
221 b = self.base(r)
222 if r == b:
222 if r == b:
223 return self.diff(self.revision(self.node(r - 1)),
223 return self.diff(self.revision(self.node(r - 1)),
224 self.revision(node))
224 self.revision(node))
225 else:
225 else:
226 f = self.opener(self.datafile)
226 f = self.opener(self.datafile)
227 f.seek(self.start(r))
227 f.seek(self.start(r))
228 data = f.read(self.length(r))
228 data = f.read(self.length(r))
229 return decompress(data)
229 return decompress(data)
230
230
231 def revision(self, node):
231 def revision(self, node):
232 if node == nullid: return ""
232 if node == nullid: return ""
233 if self.cache and self.cache[0] == node: return self.cache[2]
233 if self.cache and self.cache[0] == node: return self.cache[2]
234
234
235 text = None
235 text = None
236 rev = self.rev(node)
236 rev = self.rev(node)
237 start, length, base, link, p1, p2, node = self.index[rev]
237 start, length, base, link, p1, p2, node = self.index[rev]
238 end = start + length
238 end = start + length
239 if base != rev: start = self.start(base)
239 if base != rev: start = self.start(base)
240
240
241 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
241 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
242 base = self.cache[1]
242 base = self.cache[1]
243 start = self.start(base + 1)
243 start = self.start(base + 1)
244 text = self.cache[2]
244 text = self.cache[2]
245 last = 0
245 last = 0
246
246
247 f = self.opener(self.datafile)
247 f = self.opener(self.datafile)
248 f.seek(start)
248 f.seek(start)
249 data = f.read(end - start)
249 data = f.read(end - start)
250
250
251 if text is None:
251 if text is None:
252 last = self.length(base)
252 last = self.length(base)
253 text = decompress(data[:last])
253 text = decompress(data[:last])
254
254
255 bins = []
255 bins = []
256 for r in xrange(base + 1, rev + 1):
256 for r in xrange(base + 1, rev + 1):
257 s = self.length(r)
257 s = self.length(r)
258 bins.append(decompress(data[last:last + s]))
258 bins.append(decompress(data[last:last + s]))
259 last = last + s
259 last = last + s
260
260
261 text = mdiff.patches(text, bins)
261 text = mdiff.patches(text, bins)
262
262
263 if node != hash(text, p1, p2):
263 if node != hash(text, p1, p2):
264 raise IOError("integrity check failed on %s:%d"
264 raise IOError("integrity check failed on %s:%d"
265 % (self.datafile, rev))
265 % (self.datafile, rev))
266
266
267 self.cache = (node, rev, text)
267 self.cache = (node, rev, text)
268 return text
268 return text
269
269
270 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
270 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
271 if text is None: text = ""
271 if text is None: text = ""
272 if p1 is None: p1 = self.tip()
272 if p1 is None: p1 = self.tip()
273 if p2 is None: p2 = nullid
273 if p2 is None: p2 = nullid
274
274
275 node = hash(text, p1, p2)
275 node = hash(text, p1, p2)
276
276
277 if node in self.nodemap:
277 if node in self.nodemap:
278 return node
278 return node
279
279
280 n = self.count()
280 n = self.count()
281 t = n - 1
281 t = n - 1
282
282
283 if n:
283 if n:
284 base = self.base(t)
284 base = self.base(t)
285 start = self.start(base)
285 start = self.start(base)
286 end = self.end(t)
286 end = self.end(t)
287 if not d:
287 if not d:
288 prev = self.revision(self.tip())
288 prev = self.revision(self.tip())
289 d = self.diff(prev, text)
289 d = self.diff(prev, text)
290 data = compress(d)
290 data = compress(d)
291 dist = end - start + len(data)
291 dist = end - start + len(data)
292
292
293 # full versions are inserted when the needed deltas
293 # full versions are inserted when the needed deltas
294 # become comparable to the uncompressed text
294 # become comparable to the uncompressed text
295 if not n or dist > len(text) * 2:
295 if not n or dist > len(text) * 2:
296 data = compress(text)
296 data = compress(text)
297 base = n
297 base = n
298 else:
298 else:
299 base = self.base(t)
299 base = self.base(t)
300
300
301 offset = 0
301 offset = 0
302 if t >= 0:
302 if t >= 0:
303 offset = self.end(t)
303 offset = self.end(t)
304
304
305 e = (offset, len(data), base, link, p1, p2, node)
305 e = (offset, len(data), base, link, p1, p2, node)
306
306
307 self.index.append(e)
307 self.index.append(e)
308 self.nodemap[node] = n
308 self.nodemap[node] = n
309 entry = struct.pack(indexformat, *e)
309 entry = struct.pack(indexformat, *e)
310
310
311 transaction.add(self.datafile, e[0])
311 transaction.add(self.datafile, e[0])
312 self.opener(self.datafile, "a").write(data)
312 self.opener(self.datafile, "a").write(data)
313 transaction.add(self.indexfile, n * len(entry))
313 transaction.add(self.indexfile, n * len(entry))
314 self.opener(self.indexfile, "a").write(entry)
314 self.opener(self.indexfile, "a").write(entry)
315
315
316 self.cache = (node, n, text)
316 self.cache = (node, n, text)
317 return node
317 return node
318
318
319 def ancestor(self, a, b):
319 def ancestor(self, a, b):
320 # calculate the distance of every node from root
320 # calculate the distance of every node from root
321 dist = {nullid: 0}
321 dist = {nullid: 0}
322 for i in xrange(self.count()):
322 for i in xrange(self.count()):
323 n = self.node(i)
323 n = self.node(i)
324 p1, p2 = self.parents(n)
324 p1, p2 = self.parents(n)
325 dist[n] = max(dist[p1], dist[p2]) + 1
325 dist[n] = max(dist[p1], dist[p2]) + 1
326
326
327 # traverse ancestors in order of decreasing distance from root
327 # traverse ancestors in order of decreasing distance from root
328 def ancestors(node):
328 def ancestors(node):
329 # we store negative distances because heap returns smallest member
329 # we store negative distances because heap returns smallest member
330 h = [(-dist[node], node)]
330 h = [(-dist[node], node)]
331 seen = {}
331 seen = {}
332 earliest = self.count()
332 earliest = self.count()
333 while h:
333 while h:
334 d, n = heapq.heappop(h)
334 d, n = heapq.heappop(h)
335 if n not in seen:
335 if n not in seen:
336 seen[n] = 1
336 seen[n] = 1
337 r = self.rev(n)
337 r = self.rev(n)
338 yield (-d, r, n)
338 yield (-d, r, n)
339 for p in self.parents(n):
339 for p in self.parents(n):
340 heapq.heappush(h, (-dist[p], p))
340 heapq.heappush(h, (-dist[p], p))
341
341
342 x = ancestors(a)
342 x = ancestors(a)
343 y = ancestors(b)
343 y = ancestors(b)
344 lx = x.next()
344 lx = x.next()
345 ly = y.next()
345 ly = y.next()
346
346
347 # increment each ancestor list until it is closer to root than
347 # increment each ancestor list until it is closer to root than
348 # the other, or they match
348 # the other, or they match
349 while 1:
349 while 1:
350 if lx == ly:
350 if lx == ly:
351 return lx[2]
351 return lx[2]
352 elif lx < ly:
352 elif lx < ly:
353 ly = y.next()
353 ly = y.next()
354 elif lx > ly:
354 elif lx > ly:
355 lx = x.next()
355 lx = x.next()
356
356
357 def group(self, linkmap):
357 def group(self, linkmap):
358 # given a list of changeset revs, return a set of deltas and
358 # given a list of changeset revs, return a set of deltas and
359 # metadata corresponding to nodes. the first delta is
359 # metadata corresponding to nodes. the first delta is
360 # parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
360 # parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
361 # have this parent as it has all history before these
361 # have this parent as it has all history before these
362 # changesets. parent is parent[0]
362 # changesets. parent is parent[0]
363
363
364 revs = []
364 revs = []
365 needed = {}
365 needed = {}
366
366
367 # find file nodes/revs that match changeset revs
367 # find file nodes/revs that match changeset revs
368 for i in xrange(0, self.count()):
368 for i in xrange(0, self.count()):
369 if self.index[i][3] in linkmap:
369 if self.index[i][3] in linkmap:
370 revs.append(i)
370 revs.append(i)
371 needed[i] = 1
371 needed[i] = 1
372
372
373 # if we don't have any revisions touched by these changesets, bail
373 # if we don't have any revisions touched by these changesets, bail
374 if not revs:
374 if not revs:
375 yield struct.pack(">l", 0)
375 yield struct.pack(">l", 0)
376 return
376 return
377
377
378 # add the parent of the first rev
378 # add the parent of the first rev
379 p = self.parents(self.node(revs[0]))[0]
379 p = self.parents(self.node(revs[0]))[0]
380 revs.insert(0, self.rev(p))
380 revs.insert(0, self.rev(p))
381
381
382 # for each delta that isn't contiguous in the log, we need to
382 # for each delta that isn't contiguous in the log, we need to
383 # reconstruct the base, reconstruct the result, and then
383 # reconstruct the base, reconstruct the result, and then
384 # calculate the delta. We also need to do this where we've
384 # calculate the delta. We also need to do this where we've
385 # stored a full version and not a delta
385 # stored a full version and not a delta
386 for i in xrange(0, len(revs) - 1):
386 for i in xrange(0, len(revs) - 1):
387 a, b = revs[i], revs[i + 1]
387 a, b = revs[i], revs[i + 1]
388 if a + 1 != b or self.base(b) == b:
388 if a + 1 != b or self.base(b) == b:
389 for j in xrange(self.base(a), a + 1):
389 for j in xrange(self.base(a), a + 1):
390 needed[j] = 1
390 needed[j] = 1
391 for j in xrange(self.base(b), b + 1):
391 for j in xrange(self.base(b), b + 1):
392 needed[j] = 1
392 needed[j] = 1
393
393
394 # calculate spans to retrieve from datafile
394 # calculate spans to retrieve from datafile
395 needed = needed.keys()
395 needed = needed.keys()
396 needed.sort()
396 needed.sort()
397 spans = []
397 spans = []
398 oo = -1
398 oo = -1
399 ol = 0
399 ol = 0
400 for n in needed:
400 for n in needed:
401 if n < 0: continue
401 if n < 0: continue
402 o = self.start(n)
402 o = self.start(n)
403 l = self.length(n)
403 l = self.length(n)
404 if oo + ol == o: # can we merge with the previous?
404 if oo + ol == o: # can we merge with the previous?
405 nl = spans[-1][2]
405 nl = spans[-1][2]
406 nl.append((n, l))
406 nl.append((n, l))
407 ol += l
407 ol += l
408 spans[-1] = (oo, ol, nl)
408 spans[-1] = (oo, ol, nl)
409 else:
409 else:
410 oo = o
410 oo = o
411 ol = l
411 ol = l
412 spans.append((oo, ol, [(n, l)]))
412 spans.append((oo, ol, [(n, l)]))
413
413
414 # read spans in, divide up chunks
414 # read spans in, divide up chunks
415 chunks = {}
415 chunks = {}
416 for span in spans:
416 for span in spans:
417 # we reopen the file for each span to make http happy for now
417 # we reopen the file for each span to make http happy for now
418 f = self.opener(self.datafile)
418 f = self.opener(self.datafile)
419 f.seek(span[0])
419 f.seek(span[0])
420 data = f.read(span[1])
420 data = f.read(span[1])
421
421
422 # divide up the span
422 # divide up the span
423 pos = 0
423 pos = 0
424 for r, l in span[2]:
424 for r, l in span[2]:
425 chunks[r] = decompress(data[pos: pos + l])
425 chunks[r] = decompress(data[pos: pos + l])
426 pos += l
426 pos += l
427
427
428 # helper to reconstruct intermediate versions
428 # helper to reconstruct intermediate versions
429 def construct(text, base, rev):
429 def construct(text, base, rev):
430 bins = [chunks[r] for r in xrange(base + 1, rev + 1)]
430 bins = [chunks[r] for r in xrange(base + 1, rev + 1)]
431 return mdiff.patches(text, bins)
431 return mdiff.patches(text, bins)
432
432
433 # build deltas
433 # build deltas
434 deltas = []
434 deltas = []
435 for d in xrange(0, len(revs) - 1):
435 for d in xrange(0, len(revs) - 1):
436 a, b = revs[d], revs[d + 1]
436 a, b = revs[d], revs[d + 1]
437 n = self.node(b)
437 n = self.node(b)
438
438
439 # do we need to construct a new delta?
439 # do we need to construct a new delta?
440 if a + 1 != b or self.base(b) == b:
440 if a + 1 != b or self.base(b) == b:
441 if a >= 0:
441 if a >= 0:
442 base = self.base(a)
442 base = self.base(a)
443 ta = chunks[self.base(a)]
443 ta = chunks[self.base(a)]
444 ta = construct(ta, base, a)
444 ta = construct(ta, base, a)
445 else:
445 else:
446 ta = ""
446 ta = ""
447
447
448 base = self.base(b)
448 base = self.base(b)
449 if a > base:
449 if a > base:
450 base = a
450 base = a
451 tb = ta
451 tb = ta
452 else:
452 else:
453 tb = chunks[self.base(b)]
453 tb = chunks[self.base(b)]
454 tb = construct(tb, base, b)
454 tb = construct(tb, base, b)
455 d = self.diff(ta, tb)
455 d = self.diff(ta, tb)
456 else:
456 else:
457 d = chunks[b]
457 d = chunks[b]
458
458
459 p = self.parents(n)
459 p = self.parents(n)
460 meta = n + p[0] + p[1] + linkmap[self.linkrev(n)]
460 meta = n + p[0] + p[1] + linkmap[self.linkrev(n)]
461 l = struct.pack(">l", len(meta) + len(d) + 4)
461 l = struct.pack(">l", len(meta) + len(d) + 4)
462 yield l
462 yield l
463 yield meta
463 yield meta
464 yield d
464 yield d
465
465
466 yield struct.pack(">l", 0)
466 yield struct.pack(">l", 0)
467
467
468 def addgroup(self, revs, linkmapper, transaction, unique = 0):
468 def addgroup(self, revs, linkmapper, transaction, unique = 0):
469 # given a set of deltas, add them to the revision log. the
469 # given a set of deltas, add them to the revision log. the
470 # first delta is against its parent, which should be in our
470 # first delta is against its parent, which should be in our
471 # log, the rest are against the previous delta.
471 # log, the rest are against the previous delta.
472
472
473 # track the base of the current delta log
473 # track the base of the current delta log
474 r = self.count()
474 r = self.count()
475 t = r - 1
475 t = r - 1
476 node = nullid
476 node = nullid
477
477
478 base = -1
478 base = prev = -1
479 start = end = measure = 0
479 start = end = measure = 0
480 if r:
480 if r:
481 start = self.start(self.base(t))
481 start = self.start(self.base(t))
482 end = self.end(t)
482 end = self.end(t)
483 measure = self.length(self.base(t))
483 measure = self.length(self.base(t))
484 base = self.base(t)
484 base = self.base(t)
485 prev = self.tip()
485 prev = self.tip()
486
486
487 transaction.add(self.datafile, end)
487 transaction.add(self.datafile, end)
488 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
488 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
489 dfh = self.opener(self.datafile, "a")
489 dfh = self.opener(self.datafile, "a")
490 ifh = self.opener(self.indexfile, "a")
490 ifh = self.opener(self.indexfile, "a")
491
491
492 # loop through our set of deltas
492 # loop through our set of deltas
493 chain = None
493 chain = None
494 prev = self.tip()
495 for chunk in revs:
494 for chunk in revs:
496 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
495 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
497 link = linkmapper(cs)
496 link = linkmapper(cs)
498 if node in self.nodemap:
497 if node in self.nodemap:
499 # this can happen if two branches make the same change
498 # this can happen if two branches make the same change
500 if unique:
499 if unique:
501 raise "already have %s" % hex(node[:4])
500 raise "already have %s" % hex(node[:4])
502 chain = node
501 chain = node
503 continue
502 continue
504 delta = chunk[80:]
503 delta = chunk[80:]
505
504
506 if not chain:
505 if not chain:
507 # retrieve the parent revision of the delta chain
506 # retrieve the parent revision of the delta chain
508 chain = p1
507 chain = p1
509 if not chain in self.nodemap:
508 if not chain in self.nodemap:
510 raise "unknown base %s" % short(chain[:4])
509 raise "unknown base %s" % short(chain[:4])
511
510
512 # full versions are inserted when the needed deltas become
511 # full versions are inserted when the needed deltas become
513 # comparable to the uncompressed text or when the previous
512 # comparable to the uncompressed text or when the previous
514 # version is not the one we have a delta against. We use
513 # version is not the one we have a delta against. We use
515 # the size of the previous full rev as a proxy for the
514 # the size of the previous full rev as a proxy for the
516 # current size.
515 # current size.
517
516
518 if chain == prev:
517 if chain == prev:
519 cdelta = compress(delta)
518 cdelta = compress(delta)
520
519
521 if chain != prev or (end - start + len(cdelta)) > measure * 2:
520 if chain != prev or (end - start + len(cdelta)) > measure * 2:
522 # flush our writes here so we can read it in revision
521 # flush our writes here so we can read it in revision
523 dfh.flush()
522 dfh.flush()
524 ifh.flush()
523 ifh.flush()
525 text = self.revision(chain)
524 text = self.revision(chain)
526 text = self.patches(text, [delta])
525 text = self.patches(text, [delta])
527 chk = self.addrevision(text, transaction, link, p1, p2)
526 chk = self.addrevision(text, transaction, link, p1, p2)
528 if chk != node:
527 if chk != node:
529 raise "consistency error adding group"
528 raise "consistency error adding group"
530 measure = len(text)
529 measure = len(text)
531 else:
530 else:
532 e = (end, len(cdelta), self.base(t), link, p1, p2, node)
531 e = (end, len(cdelta), self.base(t), link, p1, p2, node)
533 self.index.append(e)
532 self.index.append(e)
534 self.nodemap[node] = r
533 self.nodemap[node] = r
535 dfh.write(cdelta)
534 dfh.write(cdelta)
536 ifh.write(struct.pack(indexformat, *e))
535 ifh.write(struct.pack(indexformat, *e))
537
536
538 t, r, chain, prev = r, r + 1, node, node
537 t, r, chain, prev = r, r + 1, node, node
539 start = self.start(self.base(t))
538 start = self.start(self.base(t))
540 end = self.end(t)
539 end = self.end(t)
541
540
542 dfh.close()
541 dfh.close()
543 ifh.close()
542 ifh.close()
544 return node
543 return node
General Comments 0
You need to be logged in to leave comments. Login now