##// END OF EJS Templates
Change revlog to use new patch code
mpm@selenic.com -
r73:ee1cbe84 default
parent child Browse files
Show More
@@ -1,409 +1,421
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # This provides efficient delta storage with O(1) retrieve and append
3 # This provides efficient delta storage with O(1) retrieve and append
4 # and O(changes) merge between branches
4 # and O(changes) merge between branches
5 #
5 #
6 # Copyright 2005 Matt Mackall <mpm@selenic.com>
6 # Copyright 2005 Matt Mackall <mpm@selenic.com>
7 #
7 #
8 # This software may be used and distributed according to the terms
8 # This software may be used and distributed according to the terms
9 # of the GNU General Public License, incorporated herein by reference.
9 # of the GNU General Public License, incorporated herein by reference.
10
10
11 import zlib, struct, sha, os, tempfile, binascii
11 import zlib, struct, sha, os, tempfile, binascii
12 from mercurial import mdiff
12 from mercurial import mdiff
13
13
14 def hex(node): return binascii.hexlify(node)
14 def hex(node): return binascii.hexlify(node)
15 def bin(node): return binascii.unhexlify(node)
15 def bin(node): return binascii.unhexlify(node)
16
16
17 def compress(text):
17 def compress(text):
18 return zlib.compress(text)
18 return zlib.compress(text)
19
19
20 def decompress(bin):
20 def decompress(bin):
21 return zlib.decompress(bin)
21 return zlib.decompress(bin)
22
22
23 def hash(text, p1, p2):
23 def hash(text, p1, p2):
24 l = [p1, p2]
24 l = [p1, p2]
25 l.sort()
25 l.sort()
26 return sha.sha(l[0] + l[1] + text).digest()
26 return sha.sha(l[0] + l[1] + text).digest()
27
27
28 nullid = "\0" * 20
28 nullid = "\0" * 20
29 indexformat = ">4l20s20s20s"
29 indexformat = ">4l20s20s20s"
30
30
31 class revlog:
31 class revlog:
32 def __init__(self, opener, indexfile, datafile):
32 def __init__(self, opener, indexfile, datafile):
33 self.indexfile = indexfile
33 self.indexfile = indexfile
34 self.datafile = datafile
34 self.datafile = datafile
35 self.index = []
35 self.index = []
36 self.opener = opener
36 self.opener = opener
37 self.cache = None
37 self.cache = None
38 self.nodemap = {nullid: -1}
39 # read the whole index for now, handle on-demand later
38 # read the whole index for now, handle on-demand later
40 try:
39 try:
41 n = 0
40 n = 0
42 i = self.opener(self.indexfile).read()
41 i = self.opener(self.indexfile).read()
43 s = struct.calcsize(indexformat)
42 s = struct.calcsize(indexformat)
43
44 # preallocate arrays
45 l = len(i)/s
46 self.index = [None] * l
47 m = [None] * l
48
44 for f in xrange(0, len(i), s):
49 for f in xrange(0, len(i), s):
45 # offset, size, base, linkrev, p1, p2, nodeid
50 # offset, size, base, linkrev, p1, p2, nodeid
46 e = struct.unpack(indexformat, i[f:f + s])
51 e = struct.unpack(indexformat, i[f:f + s])
47 self.nodemap[e[6]] = n
52 self.index[n] = e
48 self.index.append(e)
53 m[n] = (e[6], n)
49 n += 1
54 n += 1
50 except IOError: pass
55
56 self.nodemap = dict(m)
57 except IOError:
58 self.nodemap = {}
59 self.nodemap[nullid] = -1
51
60
52 def tip(self): return self.node(len(self.index) - 1)
61 def tip(self): return self.node(len(self.index) - 1)
53 def count(self): return len(self.index)
62 def count(self): return len(self.index)
54 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
63 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
55 def rev(self, node): return self.nodemap[node]
64 def rev(self, node): return self.nodemap[node]
56 def linkrev(self, node): return self.index[self.nodemap[node]][3]
65 def linkrev(self, node): return self.index[self.nodemap[node]][3]
57 def parents(self, node):
66 def parents(self, node):
58 if node == nullid: return (nullid, nullid)
67 if node == nullid: return (nullid, nullid)
59 return self.index[self.nodemap[node]][4:6]
68 return self.index[self.nodemap[node]][4:6]
60
69
61 def start(self, rev): return self.index[rev][0]
70 def start(self, rev): return self.index[rev][0]
62 def length(self, rev): return self.index[rev][1]
71 def length(self, rev): return self.index[rev][1]
63 def end(self, rev): return self.start(rev) + self.length(rev)
72 def end(self, rev): return self.start(rev) + self.length(rev)
64 def base(self, rev): return self.index[rev][2]
73 def base(self, rev): return self.index[rev][2]
65
74
66 def lookup(self, id):
75 def lookup(self, id):
67 try:
76 try:
68 rev = int(id)
77 rev = int(id)
69 return self.node(rev)
78 return self.node(rev)
70 except ValueError:
79 except ValueError:
71 c = []
80 c = []
72 for n in self.nodemap:
81 for n in self.nodemap:
73 if id in hex(n):
82 if id in hex(n):
74 c.append(n)
83 c.append(n)
75 if len(c) > 1: raise KeyError("Ambiguous identifier")
84 if len(c) > 1: raise KeyError("Ambiguous identifier")
76 if len(c) < 1: raise KeyError("No match found")
85 if len(c) < 1: raise KeyError("No match found")
77 return c[0]
86 return c[0]
78
87
79 return None
88 return None
80
89
81 def revisions(self, list):
90 def revisions(self, list):
82 # this can be optimized to do spans, etc
91 # this can be optimized to do spans, etc
83 # be stupid for now
92 # be stupid for now
84 for node in list:
93 for node in list:
85 yield self.revision(node)
94 yield self.revision(node)
86
95
87 def diff(self, a, b):
96 def diff(self, a, b):
88 return mdiff.textdiff(a, b)
97 return mdiff.textdiff(a, b)
89
98
99 def patches(self, t, pl):
100 return mdiff.patches(t, pl)
101
90 def revision(self, node):
102 def revision(self, node):
91 if node == nullid: return ""
103 if node == nullid: return ""
92 if self.cache and self.cache[0] == node: return self.cache[2]
104 if self.cache and self.cache[0] == node: return self.cache[2]
93
105
94 text = None
106 text = None
95 rev = self.rev(node)
107 rev = self.rev(node)
96 base = self.base(rev)
108 base = self.base(rev)
97 start = self.start(base)
109 start = self.start(base)
98 end = self.end(rev)
110 end = self.end(rev)
99
111
100 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
112 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
101 base = self.cache[1]
113 base = self.cache[1]
102 start = self.start(base + 1)
114 start = self.start(base + 1)
103 text = self.cache[2]
115 text = self.cache[2]
104 last = 0
116 last = 0
105
117
106 f = self.opener(self.datafile)
118 f = self.opener(self.datafile)
107 f.seek(start)
119 f.seek(start)
108 data = f.read(end - start)
120 data = f.read(end - start)
109
121
110 if not text:
122 if not text:
111 last = self.length(base)
123 last = self.length(base)
112 text = decompress(data[:last])
124 text = decompress(data[:last])
113
125
114 bins = []
126 bins = []
115 for r in xrange(base + 1, rev + 1):
127 for r in xrange(base + 1, rev + 1):
116 s = self.length(r)
128 s = self.length(r)
117 bins.append(decompress(data[last:last + s]))
129 bins.append(decompress(data[last:last + s]))
118 last = last + s
130 last = last + s
119
131
120 text = mdiff.patches(text, bins)
132 text = mdiff.patches(text, bins)
121
133
122 (p1, p2) = self.parents(node)
134 (p1, p2) = self.parents(node)
123 if node != hash(text, p1, p2):
135 if node != hash(text, p1, p2):
124 raise "integrity check failed on %s:%d" % (self.datafile, rev)
136 raise "integrity check failed on %s:%d" % (self.datafile, rev)
125
137
126 self.cache = (node, rev, text)
138 self.cache = (node, rev, text)
127 return text
139 return text
128
140
129 def addrevision(self, text, transaction, link, p1=None, p2=None):
141 def addrevision(self, text, transaction, link, p1=None, p2=None):
130 if text is None: text = ""
142 if text is None: text = ""
131 if p1 is None: p1 = self.tip()
143 if p1 is None: p1 = self.tip()
132 if p2 is None: p2 = nullid
144 if p2 is None: p2 = nullid
133
145
134 node = hash(text, p1, p2)
146 node = hash(text, p1, p2)
135
147
136 n = self.count()
148 n = self.count()
137 t = n - 1
149 t = n - 1
138
150
139 if n:
151 if n:
140 base = self.base(t)
152 base = self.base(t)
141 start = self.start(base)
153 start = self.start(base)
142 end = self.end(t)
154 end = self.end(t)
143 prev = self.revision(self.tip())
155 prev = self.revision(self.tip())
144 data = compress(self.diff(prev, text))
156 data = compress(self.diff(prev, text))
145 dist = end - start + len(data)
157 dist = end - start + len(data)
146
158
147 # full versions are inserted when the needed deltas
159 # full versions are inserted when the needed deltas
148 # become comparable to the uncompressed text
160 # become comparable to the uncompressed text
149 if not n or dist > len(text) * 2:
161 if not n or dist > len(text) * 2:
150 data = compress(text)
162 data = compress(text)
151 base = n
163 base = n
152 else:
164 else:
153 base = self.base(t)
165 base = self.base(t)
154
166
155 offset = 0
167 offset = 0
156 if t >= 0:
168 if t >= 0:
157 offset = self.end(t)
169 offset = self.end(t)
158
170
159 e = (offset, len(data), base, link, p1, p2, node)
171 e = (offset, len(data), base, link, p1, p2, node)
160
172
161 self.index.append(e)
173 self.index.append(e)
162 self.nodemap[node] = n
174 self.nodemap[node] = n
163 entry = struct.pack(indexformat, *e)
175 entry = struct.pack(indexformat, *e)
164
176
165 transaction.add(self.datafile, e[0])
177 transaction.add(self.datafile, e[0])
166 self.opener(self.datafile, "a").write(data)
178 self.opener(self.datafile, "a").write(data)
167 transaction.add(self.indexfile, n * len(entry))
179 transaction.add(self.indexfile, n * len(entry))
168 self.opener(self.indexfile, "a").write(entry)
180 self.opener(self.indexfile, "a").write(entry)
169
181
170 self.cache = (node, n, text)
182 self.cache = (node, n, text)
171 return node
183 return node
172
184
173 def ancestor(self, a, b):
185 def ancestor(self, a, b):
174 def expand(list, map):
186 def expand(list, map):
175 a = []
187 a = []
176 while list:
188 while list:
177 n = list.pop(0)
189 n = list.pop(0)
178 map[n] = 1
190 map[n] = 1
179 yield n
191 yield n
180 for p in self.parents(n):
192 for p in self.parents(n):
181 if p != nullid and p not in map:
193 if p != nullid and p not in map:
182 list.append(p)
194 list.append(p)
183 yield nullid
195 yield nullid
184
196
185 amap = {}
197 amap = {}
186 bmap = {}
198 bmap = {}
187 ag = expand([a], amap)
199 ag = expand([a], amap)
188 bg = expand([b], bmap)
200 bg = expand([b], bmap)
189 adone = bdone = 0
201 adone = bdone = 0
190
202
191 while not adone or not bdone:
203 while not adone or not bdone:
192 if not adone:
204 if not adone:
193 an = ag.next()
205 an = ag.next()
194 if an == nullid:
206 if an == nullid:
195 adone = 1
207 adone = 1
196 elif an in bmap:
208 elif an in bmap:
197 return an
209 return an
198 if not bdone:
210 if not bdone:
199 bn = bg.next()
211 bn = bg.next()
200 if bn == nullid:
212 if bn == nullid:
201 bdone = 1
213 bdone = 1
202 elif bn in amap:
214 elif bn in amap:
203 return bn
215 return bn
204
216
205 return nullid
217 return nullid
206
218
207 def mergedag(self, other, transaction, linkseq, accumulate = None):
219 def mergedag(self, other, transaction, linkseq, accumulate = None):
208 """combine the nodes from other's DAG into ours"""
220 """combine the nodes from other's DAG into ours"""
209 old = self.tip()
221 old = self.tip()
210 i = self.count()
222 i = self.count()
211 l = []
223 l = []
212
224
213 # merge the other revision log into our DAG
225 # merge the other revision log into our DAG
214 for r in range(other.count()):
226 for r in range(other.count()):
215 id = other.node(r)
227 id = other.node(r)
216 if id not in self.nodemap:
228 if id not in self.nodemap:
217 (xn, yn) = other.parents(id)
229 (xn, yn) = other.parents(id)
218 l.append((id, xn, yn))
230 l.append((id, xn, yn))
219 self.nodemap[id] = i
231 self.nodemap[id] = i
220 i += 1
232 i += 1
221
233
222 # merge node date for new nodes
234 # merge node date for new nodes
223 r = other.revisions([e[0] for e in l])
235 r = other.revisions([e[0] for e in l])
224 for e in l:
236 for e in l:
225 t = r.next()
237 t = r.next()
226 if accumulate: accumulate(t)
238 if accumulate: accumulate(t)
227 self.addrevision(t, transaction, linkseq.next(), e[1], e[2])
239 self.addrevision(t, transaction, linkseq.next(), e[1], e[2])
228
240
229 # return the unmerged heads for later resolving
241 # return the unmerged heads for later resolving
230 return (old, self.tip())
242 return (old, self.tip())
231
243
232 def group(self, linkmap):
244 def group(self, linkmap):
233 # given a list of changeset revs, return a set of deltas and
245 # given a list of changeset revs, return a set of deltas and
234 # metadata corresponding to nodes the first delta is
246 # metadata corresponding to nodes the first delta is
235 # parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
247 # parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
236 # have this parent as it has all history before these
248 # have this parent as it has all history before these
237 # changesets. parent is parent[0]
249 # changesets. parent is parent[0]
238
250
239 revs = []
251 revs = []
240 needed = {}
252 needed = {}
241
253
242 # find file nodes/revs that match changeset revs
254 # find file nodes/revs that match changeset revs
243 for i in xrange(0, self.count()):
255 for i in xrange(0, self.count()):
244 if self.index[i][3] in linkmap:
256 if self.index[i][3] in linkmap:
245 revs.append(i)
257 revs.append(i)
246 needed[i] = 1
258 needed[i] = 1
247
259
248 # if we don't have any revisions touched by these changesets, bail
260 # if we don't have any revisions touched by these changesets, bail
249 if not revs: return struct.pack(">l", 0)
261 if not revs: return struct.pack(">l", 0)
250
262
251 # add the parent of the first rev
263 # add the parent of the first rev
252 p = self.parents(self.node(revs[0]))[0]
264 p = self.parents(self.node(revs[0]))[0]
253 revs.insert(0, self.rev(p))
265 revs.insert(0, self.rev(p))
254
266
255 # for each delta that isn't contiguous in the log, we need to
267 # for each delta that isn't contiguous in the log, we need to
256 # reconstruct the base, reconstruct the result, and then
268 # reconstruct the base, reconstruct the result, and then
257 # calculate the delta. We also need to do this where we've
269 # calculate the delta. We also need to do this where we've
258 # stored a full version and not a delta
270 # stored a full version and not a delta
259 for i in xrange(0, len(revs) - 1):
271 for i in xrange(0, len(revs) - 1):
260 a, b = revs[i], revs[i + 1]
272 a, b = revs[i], revs[i + 1]
261 if a + 1 != b or self.base(b) == b:
273 if a + 1 != b or self.base(b) == b:
262 for j in xrange(self.base(a), a + 1):
274 for j in xrange(self.base(a), a + 1):
263 needed[j] = 1
275 needed[j] = 1
264 for j in xrange(self.base(b), b + 1):
276 for j in xrange(self.base(b), b + 1):
265 needed[j] = 1
277 needed[j] = 1
266
278
267 # calculate spans to retrieve from datafile
279 # calculate spans to retrieve from datafile
268 needed = needed.keys()
280 needed = needed.keys()
269 needed.sort()
281 needed.sort()
270 spans = []
282 spans = []
271 for n in needed:
283 for n in needed:
272 if n < 0: continue
284 if n < 0: continue
273 o = self.start(n)
285 o = self.start(n)
274 l = self.length(n)
286 l = self.length(n)
275 spans.append((o, l, [(n, l)]))
287 spans.append((o, l, [(n, l)]))
276
288
277 # merge spans
289 # merge spans
278 merge = [spans.pop(0)]
290 merge = [spans.pop(0)]
279 while spans:
291 while spans:
280 e = spans.pop(0)
292 e = spans.pop(0)
281 f = merge[-1]
293 f = merge[-1]
282 if e[0] == f[0] + f[1]:
294 if e[0] == f[0] + f[1]:
283 merge[-1] = (f[0], f[1] + e[1], f[2] + e[2])
295 merge[-1] = (f[0], f[1] + e[1], f[2] + e[2])
284 else:
296 else:
285 merge.append(e)
297 merge.append(e)
286
298
287 # read spans in, divide up chunks
299 # read spans in, divide up chunks
288 chunks = {}
300 chunks = {}
289 for span in merge:
301 for span in merge:
290 # we reopen the file for each span to make http happy for now
302 # we reopen the file for each span to make http happy for now
291 f = self.opener(self.datafile)
303 f = self.opener(self.datafile)
292 f.seek(span[0])
304 f.seek(span[0])
293 data = f.read(span[1])
305 data = f.read(span[1])
294
306
295 # divide up the span
307 # divide up the span
296 pos = 0
308 pos = 0
297 for r, l in span[2]:
309 for r, l in span[2]:
298 chunks[r] = data[pos: pos + l]
310 chunks[r] = data[pos: pos + l]
299 pos += l
311 pos += l
300
312
301 # helper to reconstruct intermediate versions
313 # helper to reconstruct intermediate versions
302 def construct(text, base, rev):
314 def construct(text, base, rev):
303 bins = [decompress(chunks[r]) for r in xrange(base + 1, rev + 1)]
315 bins = [decompress(chunks[r]) for r in xrange(base + 1, rev + 1)]
304 return mdiff.patches(text, bins)
316 return mdiff.patches(text, bins)
305
317
306 # build deltas
318 # build deltas
307 deltas = []
319 deltas = []
308 for d in xrange(0, len(revs) - 1):
320 for d in xrange(0, len(revs) - 1):
309 a, b = revs[d], revs[d + 1]
321 a, b = revs[d], revs[d + 1]
310 n = self.node(b)
322 n = self.node(b)
311
323
312 if a + 1 != b or self.base(b) == b:
324 if a + 1 != b or self.base(b) == b:
313 if a >= 0:
325 if a >= 0:
314 base = self.base(a)
326 base = self.base(a)
315 ta = decompress(chunks[self.base(a)])
327 ta = decompress(chunks[self.base(a)])
316 ta = construct(ta, base, a)
328 ta = construct(ta, base, a)
317 else:
329 else:
318 ta = ""
330 ta = ""
319
331
320 base = self.base(b)
332 base = self.base(b)
321 if a > base:
333 if a > base:
322 base = a
334 base = a
323 tb = ta
335 tb = ta
324 else:
336 else:
325 tb = decompress(chunks[self.base(b)])
337 tb = decompress(chunks[self.base(b)])
326 tb = construct(tb, base, b)
338 tb = construct(tb, base, b)
327 d = self.diff(ta, tb)
339 d = self.diff(ta, tb)
328 else:
340 else:
329 d = decompress(chunks[b])
341 d = decompress(chunks[b])
330
342
331 p = self.parents(n)
343 p = self.parents(n)
332 meta = n + p[0] + p[1] + linkmap[self.linkrev(n)]
344 meta = n + p[0] + p[1] + linkmap[self.linkrev(n)]
333 l = struct.pack(">l", len(meta) + len(d) + 4)
345 l = struct.pack(">l", len(meta) + len(d) + 4)
334 deltas.append(l + meta + d)
346 deltas.append(l + meta + d)
335
347
336 l = struct.pack(">l", sum(map(len, deltas)) + 4)
348 l = struct.pack(">l", sum(map(len, deltas)) + 4)
337 deltas.insert(0, l)
349 deltas.insert(0, l)
338 return "".join(deltas)
350 return "".join(deltas)
339
351
340 def addgroup(self, data, linkmapper, transaction):
352 def addgroup(self, data, linkmapper, transaction):
341 # given a set of deltas, add them to the revision log. the
353 # given a set of deltas, add them to the revision log. the
342 # first delta is against its parent, which should be in our
354 # first delta is against its parent, which should be in our
343 # log, the rest are against the previous delta.
355 # log, the rest are against the previous delta.
344
356
345 if not data: return self.tip()
357 if not data: return self.tip()
346
358
347 # retrieve the parent revision of the delta chain
359 # retrieve the parent revision of the delta chain
348 chain = data[24:44]
360 chain = data[24:44]
349
361
350 # track the base of the current delta log
362 # track the base of the current delta log
351 r = self.count()
363 r = self.count()
352 t = r - 1
364 t = r - 1
353
365
354 base = prev = -1
366 base = prev = -1
355 start = end = 0
367 start = end = 0
356 if r:
368 if r:
357 start = self.start(self.base(t))
369 start = self.start(self.base(t))
358 end = self.end(t)
370 end = self.end(t)
359 measure = self.length(self.base(t))
371 measure = self.length(self.base(t))
360 base = self.base(t)
372 base = self.base(t)
361 prev = self.tip()
373 prev = self.tip()
362
374
363 transaction.add(self.datafile, end)
375 transaction.add(self.datafile, end)
364 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
376 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
365 dfh = self.opener(self.datafile, "a")
377 dfh = self.opener(self.datafile, "a")
366 ifh = self.opener(self.indexfile, "a")
378 ifh = self.opener(self.indexfile, "a")
367
379
368 # loop through our set of deltas
380 # loop through our set of deltas
369 pos = 0
381 pos = 0
370 while pos < len(data):
382 while pos < len(data):
371 l, node, p1, p2, cs = struct.unpack(">l20s20s20s20s",
383 l, node, p1, p2, cs = struct.unpack(">l20s20s20s20s",
372 data[pos:pos+84])
384 data[pos:pos+84])
373 link = linkmapper(cs)
385 link = linkmapper(cs)
374 delta = data[pos + 84:pos + l]
386 delta = data[pos + 84:pos + l]
375 pos += l
387 pos += l
376
388
377 # full versions are inserted when the needed deltas become
389 # full versions are inserted when the needed deltas become
378 # comparable to the uncompressed text or when the previous
390 # comparable to the uncompressed text or when the previous
379 # version is not the one we have a delta against. We use
391 # version is not the one we have a delta against. We use
380 # the size of the previous full rev as a proxy for the
392 # the size of the previous full rev as a proxy for the
381 # current size.
393 # current size.
382
394
383 if chain == prev:
395 if chain == prev:
384 cdelta = compress(delta)
396 cdelta = compress(delta)
385
397
386 if chain != prev or (end - start + len(cdelta)) > measure * 2:
398 if chain != prev or (end - start + len(cdelta)) > measure * 2:
387 # flush our writes here so we can read it in revision
399 # flush our writes here so we can read it in revision
388 dfh.flush()
400 dfh.flush()
389 ifh.flush()
401 ifh.flush()
390 text = self.revision(chain)
402 text = self.revision(chain)
391 text = self.patch(text, delta)
403 text = self.patches(text, [delta])
392 chk = self.addrevision(text, transaction, link, p1, p2)
404 chk = self.addrevision(text, transaction, link, p1, p2)
393 if chk != node:
405 if chk != node:
394 raise "consistency error adding group"
406 raise "consistency error adding group"
395 measure = len(text)
407 measure = len(text)
396 else:
408 else:
397 e = (end, len(cdelta), self.base(t), link, p1, p2, node)
409 e = (end, len(cdelta), self.base(t), link, p1, p2, node)
398 self.index.append(e)
410 self.index.append(e)
399 self.nodemap[node] = r
411 self.nodemap[node] = r
400 dfh.write(cdelta)
412 dfh.write(cdelta)
401 ifh.write(struct.pack(indexformat, *e))
413 ifh.write(struct.pack(indexformat, *e))
402
414
403 t, r, chain, prev = r, r + 1, node, node
415 t, r, chain, prev = r, r + 1, node, node
404 start = self.start(self.base(t))
416 start = self.start(self.base(t))
405 end = self.end(t)
417 end = self.end(t)
406
418
407 dfh.close()
419 dfh.close()
408 ifh.close()
420 ifh.close()
409 return node
421 return node
General Comments 0
You need to be logged in to leave comments. Login now