##// END OF EJS Templates
Fix small bug in nodesbetween if heads is [nullid].
Eric Hopper -
r1459:106fdec8 default
parent child Browse files
Show More
@@ -1,814 +1,816 b''
1 """
1 """
2 revlog.py - storage back-end for mercurial
2 revlog.py - storage back-end for mercurial
3
3
4 This provides efficient delta storage with O(1) retrieve and append
4 This provides efficient delta storage with O(1) retrieve and append
5 and O(changes) merge between branches
5 and O(changes) merge between branches
6
6
7 Copyright 2005 Matt Mackall <mpm@selenic.com>
7 Copyright 2005 Matt Mackall <mpm@selenic.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License, incorporated herein by reference.
10 of the GNU General Public License, incorporated herein by reference.
11 """
11 """
12
12
13 from node import *
13 from node import *
14 from demandload import demandload
14 from demandload import demandload
15 demandload(globals(), "binascii errno heapq mdiff sha struct zlib")
15 demandload(globals(), "binascii errno heapq mdiff sha struct zlib")
16
16
17 def hash(text, p1, p2):
17 def hash(text, p1, p2):
18 """generate a hash from the given text and its parent hashes
18 """generate a hash from the given text and its parent hashes
19
19
20 This hash combines both the current file contents and its history
20 This hash combines both the current file contents and its history
21 in a manner that makes it easy to distinguish nodes with the same
21 in a manner that makes it easy to distinguish nodes with the same
22 content in the revision graph.
22 content in the revision graph.
23 """
23 """
24 l = [p1, p2]
24 l = [p1, p2]
25 l.sort()
25 l.sort()
26 s = sha.new(l[0])
26 s = sha.new(l[0])
27 s.update(l[1])
27 s.update(l[1])
28 s.update(text)
28 s.update(text)
29 return s.digest()
29 return s.digest()
30
30
31 def compress(text):
31 def compress(text):
32 """ generate a possibly-compressed representation of text """
32 """ generate a possibly-compressed representation of text """
33 if not text: return text
33 if not text: return text
34 if len(text) < 44:
34 if len(text) < 44:
35 if text[0] == '\0': return text
35 if text[0] == '\0': return text
36 return 'u' + text
36 return 'u' + text
37 bin = zlib.compress(text)
37 bin = zlib.compress(text)
38 if len(bin) > len(text):
38 if len(bin) > len(text):
39 if text[0] == '\0': return text
39 if text[0] == '\0': return text
40 return 'u' + text
40 return 'u' + text
41 return bin
41 return bin
42
42
43 def decompress(bin):
43 def decompress(bin):
44 """ decompress the given input """
44 """ decompress the given input """
45 if not bin: return bin
45 if not bin: return bin
46 t = bin[0]
46 t = bin[0]
47 if t == '\0': return bin
47 if t == '\0': return bin
48 if t == 'x': return zlib.decompress(bin)
48 if t == 'x': return zlib.decompress(bin)
49 if t == 'u': return bin[1:]
49 if t == 'u': return bin[1:]
50 raise RevlogError("unknown compression type %s" % t)
50 raise RevlogError("unknown compression type %s" % t)
51
51
52 indexformat = ">4l20s20s20s"
52 indexformat = ">4l20s20s20s"
53
53
54 class lazyparser:
54 class lazyparser:
55 """
55 """
56 this class avoids the need to parse the entirety of large indices
56 this class avoids the need to parse the entirety of large indices
57
57
58 By default we parse and load 1000 entries at a time.
58 By default we parse and load 1000 entries at a time.
59
59
60 If no position is specified, we load the whole index, and replace
60 If no position is specified, we load the whole index, and replace
61 the lazy objects in revlog with the underlying objects for
61 the lazy objects in revlog with the underlying objects for
62 efficiency in cases where we look at most of the nodes.
62 efficiency in cases where we look at most of the nodes.
63 """
63 """
64 def __init__(self, data, revlog):
64 def __init__(self, data, revlog):
65 self.data = data
65 self.data = data
66 self.s = struct.calcsize(indexformat)
66 self.s = struct.calcsize(indexformat)
67 self.l = len(data)/self.s
67 self.l = len(data)/self.s
68 self.index = [None] * self.l
68 self.index = [None] * self.l
69 self.map = {nullid: -1}
69 self.map = {nullid: -1}
70 self.all = 0
70 self.all = 0
71 self.revlog = revlog
71 self.revlog = revlog
72
72
73 def load(self, pos=None):
73 def load(self, pos=None):
74 if self.all: return
74 if self.all: return
75 if pos is not None:
75 if pos is not None:
76 block = pos / 1000
76 block = pos / 1000
77 i = block * 1000
77 i = block * 1000
78 end = min(self.l, i + 1000)
78 end = min(self.l, i + 1000)
79 else:
79 else:
80 self.all = 1
80 self.all = 1
81 i = 0
81 i = 0
82 end = self.l
82 end = self.l
83 self.revlog.index = self.index
83 self.revlog.index = self.index
84 self.revlog.nodemap = self.map
84 self.revlog.nodemap = self.map
85
85
86 while i < end:
86 while i < end:
87 d = self.data[i * self.s: (i + 1) * self.s]
87 d = self.data[i * self.s: (i + 1) * self.s]
88 e = struct.unpack(indexformat, d)
88 e = struct.unpack(indexformat, d)
89 self.index[i] = e
89 self.index[i] = e
90 self.map[e[6]] = i
90 self.map[e[6]] = i
91 i += 1
91 i += 1
92
92
93 class lazyindex:
93 class lazyindex:
94 """a lazy version of the index array"""
94 """a lazy version of the index array"""
95 def __init__(self, parser):
95 def __init__(self, parser):
96 self.p = parser
96 self.p = parser
97 def __len__(self):
97 def __len__(self):
98 return len(self.p.index)
98 return len(self.p.index)
99 def load(self, pos):
99 def load(self, pos):
100 self.p.load(pos)
100 self.p.load(pos)
101 return self.p.index[pos]
101 return self.p.index[pos]
102 def __getitem__(self, pos):
102 def __getitem__(self, pos):
103 return self.p.index[pos] or self.load(pos)
103 return self.p.index[pos] or self.load(pos)
104 def append(self, e):
104 def append(self, e):
105 self.p.index.append(e)
105 self.p.index.append(e)
106
106
107 class lazymap:
107 class lazymap:
108 """a lazy version of the node map"""
108 """a lazy version of the node map"""
109 def __init__(self, parser):
109 def __init__(self, parser):
110 self.p = parser
110 self.p = parser
111 def load(self, key):
111 def load(self, key):
112 if self.p.all: return
112 if self.p.all: return
113 n = self.p.data.find(key)
113 n = self.p.data.find(key)
114 if n < 0:
114 if n < 0:
115 raise KeyError(key)
115 raise KeyError(key)
116 pos = n / self.p.s
116 pos = n / self.p.s
117 self.p.load(pos)
117 self.p.load(pos)
118 def __contains__(self, key):
118 def __contains__(self, key):
119 self.p.load()
119 self.p.load()
120 return key in self.p.map
120 return key in self.p.map
121 def __iter__(self):
121 def __iter__(self):
122 yield nullid
122 yield nullid
123 for i in xrange(self.p.l):
123 for i in xrange(self.p.l):
124 try:
124 try:
125 yield self.p.index[i][6]
125 yield self.p.index[i][6]
126 except:
126 except:
127 self.p.load(i)
127 self.p.load(i)
128 yield self.p.index[i][6]
128 yield self.p.index[i][6]
129 def __getitem__(self, key):
129 def __getitem__(self, key):
130 try:
130 try:
131 return self.p.map[key]
131 return self.p.map[key]
132 except KeyError:
132 except KeyError:
133 try:
133 try:
134 self.load(key)
134 self.load(key)
135 return self.p.map[key]
135 return self.p.map[key]
136 except KeyError:
136 except KeyError:
137 raise KeyError("node " + hex(key))
137 raise KeyError("node " + hex(key))
138 def __setitem__(self, key, val):
138 def __setitem__(self, key, val):
139 self.p.map[key] = val
139 self.p.map[key] = val
140
140
141 class RevlogError(Exception): pass
141 class RevlogError(Exception): pass
142
142
143 class revlog:
143 class revlog:
144 """
144 """
145 the underlying revision storage object
145 the underlying revision storage object
146
146
147 A revlog consists of two parts, an index and the revision data.
147 A revlog consists of two parts, an index and the revision data.
148
148
149 The index is a file with a fixed record size containing
149 The index is a file with a fixed record size containing
150 information on each revision, includings its nodeid (hash), the
150 information on each revision, includings its nodeid (hash), the
151 nodeids of its parents, the position and offset of its data within
151 nodeids of its parents, the position and offset of its data within
152 the data file, and the revision it's based on. Finally, each entry
152 the data file, and the revision it's based on. Finally, each entry
153 contains a linkrev entry that can serve as a pointer to external
153 contains a linkrev entry that can serve as a pointer to external
154 data.
154 data.
155
155
156 The revision data itself is a linear collection of data chunks.
156 The revision data itself is a linear collection of data chunks.
157 Each chunk represents a revision and is usually represented as a
157 Each chunk represents a revision and is usually represented as a
158 delta against the previous chunk. To bound lookup time, runs of
158 delta against the previous chunk. To bound lookup time, runs of
159 deltas are limited to about 2 times the length of the original
159 deltas are limited to about 2 times the length of the original
160 version data. This makes retrieval of a version proportional to
160 version data. This makes retrieval of a version proportional to
161 its size, or O(1) relative to the number of revisions.
161 its size, or O(1) relative to the number of revisions.
162
162
163 Both pieces of the revlog are written to in an append-only
163 Both pieces of the revlog are written to in an append-only
164 fashion, which means we never need to rewrite a file to insert or
164 fashion, which means we never need to rewrite a file to insert or
165 remove data, and can use some simple techniques to avoid the need
165 remove data, and can use some simple techniques to avoid the need
166 for locking while reading.
166 for locking while reading.
167 """
167 """
168 def __init__(self, opener, indexfile, datafile):
168 def __init__(self, opener, indexfile, datafile):
169 """
169 """
170 create a revlog object
170 create a revlog object
171
171
172 opener is a function that abstracts the file opening operation
172 opener is a function that abstracts the file opening operation
173 and can be used to implement COW semantics or the like.
173 and can be used to implement COW semantics or the like.
174 """
174 """
175 self.indexfile = indexfile
175 self.indexfile = indexfile
176 self.datafile = datafile
176 self.datafile = datafile
177 self.opener = opener
177 self.opener = opener
178 self.cache = None
178 self.cache = None
179
179
180 try:
180 try:
181 i = self.opener(self.indexfile).read()
181 i = self.opener(self.indexfile).read()
182 except IOError, inst:
182 except IOError, inst:
183 if inst.errno != errno.ENOENT:
183 if inst.errno != errno.ENOENT:
184 raise
184 raise
185 i = ""
185 i = ""
186
186
187 if len(i) > 10000:
187 if len(i) > 10000:
188 # big index, let's parse it on demand
188 # big index, let's parse it on demand
189 parser = lazyparser(i, self)
189 parser = lazyparser(i, self)
190 self.index = lazyindex(parser)
190 self.index = lazyindex(parser)
191 self.nodemap = lazymap(parser)
191 self.nodemap = lazymap(parser)
192 else:
192 else:
193 s = struct.calcsize(indexformat)
193 s = struct.calcsize(indexformat)
194 l = len(i) / s
194 l = len(i) / s
195 self.index = [None] * l
195 self.index = [None] * l
196 m = [None] * l
196 m = [None] * l
197
197
198 n = 0
198 n = 0
199 for f in xrange(0, len(i), s):
199 for f in xrange(0, len(i), s):
200 # offset, size, base, linkrev, p1, p2, nodeid
200 # offset, size, base, linkrev, p1, p2, nodeid
201 e = struct.unpack(indexformat, i[f:f + s])
201 e = struct.unpack(indexformat, i[f:f + s])
202 m[n] = (e[6], n)
202 m[n] = (e[6], n)
203 self.index[n] = e
203 self.index[n] = e
204 n += 1
204 n += 1
205
205
206 self.nodemap = dict(m)
206 self.nodemap = dict(m)
207 self.nodemap[nullid] = -1
207 self.nodemap[nullid] = -1
208
208
209 def tip(self): return self.node(len(self.index) - 1)
209 def tip(self): return self.node(len(self.index) - 1)
210 def count(self): return len(self.index)
210 def count(self): return len(self.index)
211 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
211 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
212 def rev(self, node):
212 def rev(self, node):
213 try:
213 try:
214 return self.nodemap[node]
214 return self.nodemap[node]
215 except KeyError:
215 except KeyError:
216 raise RevlogError('%s: no node %s' % (self.indexfile, hex(node)))
216 raise RevlogError('%s: no node %s' % (self.indexfile, hex(node)))
217 def linkrev(self, node): return self.index[self.rev(node)][3]
217 def linkrev(self, node): return self.index[self.rev(node)][3]
218 def parents(self, node):
218 def parents(self, node):
219 if node == nullid: return (nullid, nullid)
219 if node == nullid: return (nullid, nullid)
220 return self.index[self.rev(node)][4:6]
220 return self.index[self.rev(node)][4:6]
221
221
222 def start(self, rev): return self.index[rev][0]
222 def start(self, rev): return self.index[rev][0]
223 def length(self, rev): return self.index[rev][1]
223 def length(self, rev): return self.index[rev][1]
224 def end(self, rev): return self.start(rev) + self.length(rev)
224 def end(self, rev): return self.start(rev) + self.length(rev)
225 def base(self, rev): return self.index[rev][2]
225 def base(self, rev): return self.index[rev][2]
226
226
227 def reachable(self, rev, stop=None):
227 def reachable(self, rev, stop=None):
228 reachable = {}
228 reachable = {}
229 visit = [rev]
229 visit = [rev]
230 reachable[rev] = 1
230 reachable[rev] = 1
231 if stop:
231 if stop:
232 stopn = self.rev(stop)
232 stopn = self.rev(stop)
233 else:
233 else:
234 stopn = 0
234 stopn = 0
235 while visit:
235 while visit:
236 n = visit.pop(0)
236 n = visit.pop(0)
237 if n == stop:
237 if n == stop:
238 continue
238 continue
239 if n == nullid:
239 if n == nullid:
240 continue
240 continue
241 for p in self.parents(n):
241 for p in self.parents(n):
242 if self.rev(p) < stopn:
242 if self.rev(p) < stopn:
243 continue
243 continue
244 if p not in reachable:
244 if p not in reachable:
245 reachable[p] = 1
245 reachable[p] = 1
246 visit.append(p)
246 visit.append(p)
247 return reachable
247 return reachable
248
248
249 def nodesbetween(self, roots=None, heads=None):
249 def nodesbetween(self, roots=None, heads=None):
250 """Return a tuple containing three elements. Elements 1 and 2 contain
250 """Return a tuple containing three elements. Elements 1 and 2 contain
251 a final list bases and heads after all the unreachable ones have been
251 a final list bases and heads after all the unreachable ones have been
252 pruned. Element 0 contains a topologically sorted list of all
252 pruned. Element 0 contains a topologically sorted list of all
253
253
254 nodes that satisfy these constraints:
254 nodes that satisfy these constraints:
255 1. All nodes must be descended from a node in roots (the nodes on
255 1. All nodes must be descended from a node in roots (the nodes on
256 roots are considered descended from themselves).
256 roots are considered descended from themselves).
257 2. All nodes must also be ancestors of a node in heads (the nodes in
257 2. All nodes must also be ancestors of a node in heads (the nodes in
258 heads are considered to be their own ancestors).
258 heads are considered to be their own ancestors).
259
259
260 If roots is unspecified, nullid is assumed as the only root.
260 If roots is unspecified, nullid is assumed as the only root.
261 If heads is unspecified, it is taken to be the output of the
261 If heads is unspecified, it is taken to be the output of the
262 heads method (i.e. a list of all nodes in the repository that
262 heads method (i.e. a list of all nodes in the repository that
263 have no children)."""
263 have no children)."""
264 if roots is not None:
264 if roots is not None:
265 roots = list(roots)
265 roots = list(roots)
266 lowestrev = min([self.rev(n) for n in roots])
266 lowestrev = min([self.rev(n) for n in roots])
267 else:
267 else:
268 roots = [nullid] # Everybody's a descendent of nullid
268 roots = [nullid] # Everybody's a descendent of nullid
269 lowestrev = -1
269 lowestrev = -1
270 if (lowestrev == -1) and (heads is None):
270 if (lowestrev == -1) and (heads is None):
271 # We want _all_ the nodes!
271 # We want _all_ the nodes!
272 return ([self.node(r) for r in xrange(0, self.count())],
272 return ([self.node(r) for r in xrange(0, self.count())],
273 [nullid], list(self.heads()))
273 [nullid], list(self.heads()))
274 if heads is None:
274 if heads is None:
275 # All nodes are ancestors, so the latest ancestor is the last
275 # All nodes are ancestors, so the latest ancestor is the last
276 # node.
276 # node.
277 highestrev = self.count() - 1
277 highestrev = self.count() - 1
278 # Set ancestors to None to signal that every node is an ancestor.
278 # Set ancestors to None to signal that every node is an ancestor.
279 ancestors = None
279 ancestors = None
280 # Set heads to an empty dictionary for later discovery of heads
280 # Set heads to an empty dictionary for later discovery of heads
281 heads = {}
281 heads = {}
282 else:
282 else:
283 ancestors = {}
283 ancestors = {}
284 # Start at the top and keep marking parents until we're done.
284 # Start at the top and keep marking parents until we're done.
285 nodestotag = list(heads)
285 nodestotag = list(heads)
286 # Turn heads into a dictionary so we can remove 'fake' heads.
286 # Turn heads into a dictionary so we can remove 'fake' heads.
287 # Also, later we will be using it to filter out the heads we can't
287 # Also, later we will be using it to filter out the heads we can't
288 # find from roots.
288 # find from roots.
289 heads = dict.fromkeys(heads, 0)
289 heads = dict.fromkeys(heads, 0)
290 # Remember where the top was so we can use it as a limit later.
290 # Remember where the top was so we can use it as a limit later.
291 highestrev = max([self.rev(n) for n in nodestotag])
291 highestrev = max([self.rev(n) for n in nodestotag])
292 while nodestotag:
292 while nodestotag:
293 # grab a node to tag
293 # grab a node to tag
294 n = nodestotag.pop()
294 n = nodestotag.pop()
295 # Never tag nullid
295 # Never tag nullid
296 if n == nullid:
296 if n == nullid:
297 continue
297 continue
298 # A node's revision number represents its place in a
298 # A node's revision number represents its place in a
299 # topologically sorted list of nodes.
299 # topologically sorted list of nodes.
300 r = self.rev(n)
300 r = self.rev(n)
301 if r >= lowestrev:
301 if r >= lowestrev:
302 if n not in ancestors:
302 if n not in ancestors:
303 # If we are possibly a descendent of one of the roots
303 # If we are possibly a descendent of one of the roots
304 # and we haven't already been marked as an ancestor
304 # and we haven't already been marked as an ancestor
305 ancestors[n] = 1 # Mark as ancestor
305 ancestors[n] = 1 # Mark as ancestor
306 # Add non-nullid parents to list of nodes to tag.
306 # Add non-nullid parents to list of nodes to tag.
307 nodestotag.extend([p for p in self.parents(n) if
307 nodestotag.extend([p for p in self.parents(n) if
308 p != nullid])
308 p != nullid])
309 elif n in heads: # We've seen it before, is it a fake head?
309 elif n in heads: # We've seen it before, is it a fake head?
310 # So it is, real heads should not be the ancestors of
310 # So it is, real heads should not be the ancestors of
311 # any other heads.
311 # any other heads.
312 heads.pop(n)
312 heads.pop(n)
313 if not ancestors:
314 return ([], [], [])
313 # Now that we have our set of ancestors, we want to remove any
315 # Now that we have our set of ancestors, we want to remove any
314 # roots that are not ancestors.
316 # roots that are not ancestors.
315
317
316 # If one of the roots was nullid, everything is included anyway.
318 # If one of the roots was nullid, everything is included anyway.
317 if lowestrev > -1:
319 if lowestrev > -1:
318 # But, since we weren't, let's recompute the lowest rev to not
320 # But, since we weren't, let's recompute the lowest rev to not
319 # include roots that aren't ancestors.
321 # include roots that aren't ancestors.
320
322
321 # Filter out roots that aren't ancestors of heads
323 # Filter out roots that aren't ancestors of heads
322 roots = [n for n in roots if n in ancestors]
324 roots = [n for n in roots if n in ancestors]
323 # Recompute the lowest revision
325 # Recompute the lowest revision
324 if roots:
326 if roots:
325 lowestrev = min([self.rev(n) for n in roots])
327 lowestrev = min([self.rev(n) for n in roots])
326 else:
328 else:
327 # No more roots? Return empty list
329 # No more roots? Return empty list
328 return ([], [], [])
330 return ([], [], [])
329 else:
331 else:
330 # We are descending from nullid, and don't need to care about
332 # We are descending from nullid, and don't need to care about
331 # any other roots.
333 # any other roots.
332 lowestrev = -1
334 lowestrev = -1
333 roots = [nullid]
335 roots = [nullid]
334 # Transform our roots list into a 'set' (i.e. a dictionary where the
336 # Transform our roots list into a 'set' (i.e. a dictionary where the
335 # values don't matter.
337 # values don't matter.
336 descendents = dict.fromkeys(roots, 1)
338 descendents = dict.fromkeys(roots, 1)
337 # Also, keep the original roots so we can filter out roots that aren't
339 # Also, keep the original roots so we can filter out roots that aren't
338 # 'real' roots (i.e. are descended from other roots).
340 # 'real' roots (i.e. are descended from other roots).
339 roots = descendents.copy()
341 roots = descendents.copy()
340 # Our topologically sorted list of output nodes.
342 # Our topologically sorted list of output nodes.
341 orderedout = []
343 orderedout = []
342 # Don't start at nullid since we don't want nullid in our output list,
344 # Don't start at nullid since we don't want nullid in our output list,
343 # and if nullid shows up in descedents, empty parents will look like
345 # and if nullid shows up in descedents, empty parents will look like
344 # they're descendents.
346 # they're descendents.
345 for r in xrange(max(lowestrev, 0), highestrev + 1):
347 for r in xrange(max(lowestrev, 0), highestrev + 1):
346 n = self.node(r)
348 n = self.node(r)
347 isdescendent = False
349 isdescendent = False
348 if lowestrev == -1: # Everybody is a descendent of nullid
350 if lowestrev == -1: # Everybody is a descendent of nullid
349 isdescendent = True
351 isdescendent = True
350 elif n in descendents:
352 elif n in descendents:
351 # n is already a descendent
353 # n is already a descendent
352 isdescendent = True
354 isdescendent = True
353 # This check only needs to be done here because all the roots
355 # This check only needs to be done here because all the roots
354 # will start being marked is descendents before the loop.
356 # will start being marked is descendents before the loop.
355 if n in roots:
357 if n in roots:
356 # If n was a root, check if it's a 'real' root.
358 # If n was a root, check if it's a 'real' root.
357 p = tuple(self.parents(n))
359 p = tuple(self.parents(n))
358 # If any of its parents are descendents, it's not a root.
360 # If any of its parents are descendents, it's not a root.
359 if (p[0] in descendents) or (p[1] in descendents):
361 if (p[0] in descendents) or (p[1] in descendents):
360 roots.pop(n)
362 roots.pop(n)
361 else:
363 else:
362 p = tuple(self.parents(n))
364 p = tuple(self.parents(n))
363 # A node is a descendent if either of its parents are
365 # A node is a descendent if either of its parents are
364 # descendents. (We seeded the dependents list with the roots
366 # descendents. (We seeded the dependents list with the roots
365 # up there, remember?)
367 # up there, remember?)
366 if (p[0] in descendents) or (p[1] in descendents):
368 if (p[0] in descendents) or (p[1] in descendents):
367 descendents[n] = 1
369 descendents[n] = 1
368 isdescendent = True
370 isdescendent = True
369 if isdescendent and ((ancestors is None) or (n in ancestors)):
371 if isdescendent and ((ancestors is None) or (n in ancestors)):
370 # Only include nodes that are both descendents and ancestors.
372 # Only include nodes that are both descendents and ancestors.
371 orderedout.append(n)
373 orderedout.append(n)
372 if (ancestors is not None) and (n in heads):
374 if (ancestors is not None) and (n in heads):
373 # We're trying to figure out which heads are reachable
375 # We're trying to figure out which heads are reachable
374 # from roots.
376 # from roots.
375 # Mark this head as having been reached
377 # Mark this head as having been reached
376 heads[n] = 1
378 heads[n] = 1
377 elif ancestors is None:
379 elif ancestors is None:
378 # Otherwise, we're trying to discover the heads.
380 # Otherwise, we're trying to discover the heads.
379 # Assume this is a head because if it isn't, the next step
381 # Assume this is a head because if it isn't, the next step
380 # will eventually remove it.
382 # will eventually remove it.
381 heads[n] = 1
383 heads[n] = 1
382 # But, obviously its parents aren't.
384 # But, obviously its parents aren't.
383 for p in self.parents(n):
385 for p in self.parents(n):
384 heads.pop(p, None)
386 heads.pop(p, None)
385 heads = [n for n in heads.iterkeys() if heads[n] != 0]
387 heads = [n for n in heads.iterkeys() if heads[n] != 0]
386 roots = roots.keys()
388 roots = roots.keys()
387 assert orderedout
389 assert orderedout
388 assert roots
390 assert roots
389 assert heads
391 assert heads
390 return (orderedout, roots, heads)
392 return (orderedout, roots, heads)
391
393
392 def heads(self, stop=None):
394 def heads(self, stop=None):
393 """return the list of all nodes that have no children"""
395 """return the list of all nodes that have no children"""
394 p = {}
396 p = {}
395 h = []
397 h = []
396 stoprev = 0
398 stoprev = 0
397 if stop and stop in self.nodemap:
399 if stop and stop in self.nodemap:
398 stoprev = self.rev(stop)
400 stoprev = self.rev(stop)
399
401
400 for r in range(self.count() - 1, -1, -1):
402 for r in range(self.count() - 1, -1, -1):
401 n = self.node(r)
403 n = self.node(r)
402 if n not in p:
404 if n not in p:
403 h.append(n)
405 h.append(n)
404 if n == stop:
406 if n == stop:
405 break
407 break
406 if r < stoprev:
408 if r < stoprev:
407 break
409 break
408 for pn in self.parents(n):
410 for pn in self.parents(n):
409 p[pn] = 1
411 p[pn] = 1
410 return h
412 return h
411
413
412 def children(self, node):
414 def children(self, node):
413 """find the children of a given node"""
415 """find the children of a given node"""
414 c = []
416 c = []
415 p = self.rev(node)
417 p = self.rev(node)
416 for r in range(p + 1, self.count()):
418 for r in range(p + 1, self.count()):
417 n = self.node(r)
419 n = self.node(r)
418 for pn in self.parents(n):
420 for pn in self.parents(n):
419 if pn == node:
421 if pn == node:
420 c.append(n)
422 c.append(n)
421 continue
423 continue
422 elif pn == nullid:
424 elif pn == nullid:
423 continue
425 continue
424 return c
426 return c
425
427
426 def lookup(self, id):
428 def lookup(self, id):
427 """locate a node based on revision number or subset of hex nodeid"""
429 """locate a node based on revision number or subset of hex nodeid"""
428 try:
430 try:
429 rev = int(id)
431 rev = int(id)
430 if str(rev) != id: raise ValueError
432 if str(rev) != id: raise ValueError
431 if rev < 0: rev = self.count() + rev
433 if rev < 0: rev = self.count() + rev
432 if rev < 0 or rev >= self.count(): raise ValueError
434 if rev < 0 or rev >= self.count(): raise ValueError
433 return self.node(rev)
435 return self.node(rev)
434 except (ValueError, OverflowError):
436 except (ValueError, OverflowError):
435 c = []
437 c = []
436 for n in self.nodemap:
438 for n in self.nodemap:
437 if hex(n).startswith(id):
439 if hex(n).startswith(id):
438 c.append(n)
440 c.append(n)
439 if len(c) > 1: raise KeyError("Ambiguous identifier")
441 if len(c) > 1: raise KeyError("Ambiguous identifier")
440 if len(c) < 1: raise KeyError("No match found")
442 if len(c) < 1: raise KeyError("No match found")
441 return c[0]
443 return c[0]
442
444
443 return None
445 return None
444
446
445 def diff(self, a, b):
447 def diff(self, a, b):
446 """return a delta between two revisions"""
448 """return a delta between two revisions"""
447 return mdiff.textdiff(a, b)
449 return mdiff.textdiff(a, b)
448
450
449 def patches(self, t, pl):
451 def patches(self, t, pl):
450 """apply a list of patches to a string"""
452 """apply a list of patches to a string"""
451 return mdiff.patches(t, pl)
453 return mdiff.patches(t, pl)
452
454
453 def delta(self, node):
455 def delta(self, node):
454 """return or calculate a delta between a node and its predecessor"""
456 """return or calculate a delta between a node and its predecessor"""
455 r = self.rev(node)
457 r = self.rev(node)
456 b = self.base(r)
458 b = self.base(r)
457 if r == b:
459 if r == b:
458 return self.diff(self.revision(self.node(r - 1)),
460 return self.diff(self.revision(self.node(r - 1)),
459 self.revision(node))
461 self.revision(node))
460 else:
462 else:
461 f = self.opener(self.datafile)
463 f = self.opener(self.datafile)
462 f.seek(self.start(r))
464 f.seek(self.start(r))
463 data = f.read(self.length(r))
465 data = f.read(self.length(r))
464 return decompress(data)
466 return decompress(data)
465
467
466 def revision(self, node):
468 def revision(self, node):
467 """return an uncompressed revision of a given"""
469 """return an uncompressed revision of a given"""
468 if node == nullid: return ""
470 if node == nullid: return ""
469 if self.cache and self.cache[0] == node: return self.cache[2]
471 if self.cache and self.cache[0] == node: return self.cache[2]
470
472
471 # look up what we need to read
473 # look up what we need to read
472 text = None
474 text = None
473 rev = self.rev(node)
475 rev = self.rev(node)
474 start, length, base, link, p1, p2, node = self.index[rev]
476 start, length, base, link, p1, p2, node = self.index[rev]
475 end = start + length
477 end = start + length
476 if base != rev: start = self.start(base)
478 if base != rev: start = self.start(base)
477
479
478 # do we have useful data cached?
480 # do we have useful data cached?
479 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
481 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
480 base = self.cache[1]
482 base = self.cache[1]
481 start = self.start(base + 1)
483 start = self.start(base + 1)
482 text = self.cache[2]
484 text = self.cache[2]
483 last = 0
485 last = 0
484
486
485 f = self.opener(self.datafile)
487 f = self.opener(self.datafile)
486 f.seek(start)
488 f.seek(start)
487 data = f.read(end - start)
489 data = f.read(end - start)
488
490
489 if text is None:
491 if text is None:
490 last = self.length(base)
492 last = self.length(base)
491 text = decompress(data[:last])
493 text = decompress(data[:last])
492
494
493 bins = []
495 bins = []
494 for r in xrange(base + 1, rev + 1):
496 for r in xrange(base + 1, rev + 1):
495 s = self.length(r)
497 s = self.length(r)
496 bins.append(decompress(data[last:last + s]))
498 bins.append(decompress(data[last:last + s]))
497 last = last + s
499 last = last + s
498
500
499 text = mdiff.patches(text, bins)
501 text = mdiff.patches(text, bins)
500
502
501 if node != hash(text, p1, p2):
503 if node != hash(text, p1, p2):
502 raise RevlogError("integrity check failed on %s:%d"
504 raise RevlogError("integrity check failed on %s:%d"
503 % (self.datafile, rev))
505 % (self.datafile, rev))
504
506
505 self.cache = (node, rev, text)
507 self.cache = (node, rev, text)
506 return text
508 return text
507
509
508 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
510 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
509 """add a revision to the log
511 """add a revision to the log
510
512
511 text - the revision data to add
513 text - the revision data to add
512 transaction - the transaction object used for rollback
514 transaction - the transaction object used for rollback
513 link - the linkrev data to add
515 link - the linkrev data to add
514 p1, p2 - the parent nodeids of the revision
516 p1, p2 - the parent nodeids of the revision
515 d - an optional precomputed delta
517 d - an optional precomputed delta
516 """
518 """
517 if text is None: text = ""
519 if text is None: text = ""
518 if p1 is None: p1 = self.tip()
520 if p1 is None: p1 = self.tip()
519 if p2 is None: p2 = nullid
521 if p2 is None: p2 = nullid
520
522
521 node = hash(text, p1, p2)
523 node = hash(text, p1, p2)
522
524
523 if node in self.nodemap:
525 if node in self.nodemap:
524 return node
526 return node
525
527
526 n = self.count()
528 n = self.count()
527 t = n - 1
529 t = n - 1
528
530
529 if n:
531 if n:
530 base = self.base(t)
532 base = self.base(t)
531 start = self.start(base)
533 start = self.start(base)
532 end = self.end(t)
534 end = self.end(t)
533 if not d:
535 if not d:
534 prev = self.revision(self.tip())
536 prev = self.revision(self.tip())
535 d = self.diff(prev, text)
537 d = self.diff(prev, text)
536 data = compress(d)
538 data = compress(d)
537 dist = end - start + len(data)
539 dist = end - start + len(data)
538
540
539 # full versions are inserted when the needed deltas
541 # full versions are inserted when the needed deltas
540 # become comparable to the uncompressed text
542 # become comparable to the uncompressed text
541 if not n or dist > len(text) * 2:
543 if not n or dist > len(text) * 2:
542 data = compress(text)
544 data = compress(text)
543 base = n
545 base = n
544 else:
546 else:
545 base = self.base(t)
547 base = self.base(t)
546
548
547 offset = 0
549 offset = 0
548 if t >= 0:
550 if t >= 0:
549 offset = self.end(t)
551 offset = self.end(t)
550
552
551 e = (offset, len(data), base, link, p1, p2, node)
553 e = (offset, len(data), base, link, p1, p2, node)
552
554
553 self.index.append(e)
555 self.index.append(e)
554 self.nodemap[node] = n
556 self.nodemap[node] = n
555 entry = struct.pack(indexformat, *e)
557 entry = struct.pack(indexformat, *e)
556
558
557 transaction.add(self.datafile, e[0])
559 transaction.add(self.datafile, e[0])
558 self.opener(self.datafile, "a").write(data)
560 self.opener(self.datafile, "a").write(data)
559 transaction.add(self.indexfile, n * len(entry))
561 transaction.add(self.indexfile, n * len(entry))
560 self.opener(self.indexfile, "a").write(entry)
562 self.opener(self.indexfile, "a").write(entry)
561
563
562 self.cache = (node, n, text)
564 self.cache = (node, n, text)
563 return node
565 return node
564
566
565 def ancestor(self, a, b):
567 def ancestor(self, a, b):
566 """calculate the least common ancestor of nodes a and b"""
568 """calculate the least common ancestor of nodes a and b"""
567 # calculate the distance of every node from root
569 # calculate the distance of every node from root
568 dist = {nullid: 0}
570 dist = {nullid: 0}
569 for i in xrange(self.count()):
571 for i in xrange(self.count()):
570 n = self.node(i)
572 n = self.node(i)
571 p1, p2 = self.parents(n)
573 p1, p2 = self.parents(n)
572 dist[n] = max(dist[p1], dist[p2]) + 1
574 dist[n] = max(dist[p1], dist[p2]) + 1
573
575
574 # traverse ancestors in order of decreasing distance from root
576 # traverse ancestors in order of decreasing distance from root
575 def ancestors(node):
577 def ancestors(node):
576 # we store negative distances because heap returns smallest member
578 # we store negative distances because heap returns smallest member
577 h = [(-dist[node], node)]
579 h = [(-dist[node], node)]
578 seen = {}
580 seen = {}
579 earliest = self.count()
581 earliest = self.count()
580 while h:
582 while h:
581 d, n = heapq.heappop(h)
583 d, n = heapq.heappop(h)
582 if n not in seen:
584 if n not in seen:
583 seen[n] = 1
585 seen[n] = 1
584 r = self.rev(n)
586 r = self.rev(n)
585 yield (-d, n)
587 yield (-d, n)
586 for p in self.parents(n):
588 for p in self.parents(n):
587 heapq.heappush(h, (-dist[p], p))
589 heapq.heappush(h, (-dist[p], p))
588
590
589 def generations(node):
591 def generations(node):
590 sg, s = None, {}
592 sg, s = None, {}
591 for g,n in ancestors(node):
593 for g,n in ancestors(node):
592 if g != sg:
594 if g != sg:
593 if sg:
595 if sg:
594 yield sg, s
596 yield sg, s
595 sg, s = g, {n:1}
597 sg, s = g, {n:1}
596 else:
598 else:
597 s[n] = 1
599 s[n] = 1
598 yield sg, s
600 yield sg, s
599
601
600 x = generations(a)
602 x = generations(a)
601 y = generations(b)
603 y = generations(b)
602 gx = x.next()
604 gx = x.next()
603 gy = y.next()
605 gy = y.next()
604
606
605 # increment each ancestor list until it is closer to root than
607 # increment each ancestor list until it is closer to root than
606 # the other, or they match
608 # the other, or they match
607 while 1:
609 while 1:
608 #print "ancestor gen %s %s" % (gx[0], gy[0])
610 #print "ancestor gen %s %s" % (gx[0], gy[0])
609 if gx[0] == gy[0]:
611 if gx[0] == gy[0]:
610 # find the intersection
612 # find the intersection
611 i = [ n for n in gx[1] if n in gy[1] ]
613 i = [ n for n in gx[1] if n in gy[1] ]
612 if i:
614 if i:
613 return i[0]
615 return i[0]
614 else:
616 else:
615 #print "next"
617 #print "next"
616 gy = y.next()
618 gy = y.next()
617 gx = x.next()
619 gx = x.next()
618 elif gx[0] < gy[0]:
620 elif gx[0] < gy[0]:
619 #print "next y"
621 #print "next y"
620 gy = y.next()
622 gy = y.next()
621 else:
623 else:
622 #print "next x"
624 #print "next x"
623 gx = x.next()
625 gx = x.next()
624
626
625 def group(self, nodelist, lookup, infocollect = None):
627 def group(self, nodelist, lookup, infocollect = None):
626 """calculate a delta group
628 """calculate a delta group
627
629
628 Given a list of changeset revs, return a set of deltas and
630 Given a list of changeset revs, return a set of deltas and
629 metadata corresponding to nodes. the first delta is
631 metadata corresponding to nodes. the first delta is
630 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
632 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
631 have this parent as it has all history before these
633 have this parent as it has all history before these
632 changesets. parent is parent[0]
634 changesets. parent is parent[0]
633 """
635 """
634 revs = [self.rev(n) for n in nodelist]
636 revs = [self.rev(n) for n in nodelist]
635 needed = dict.fromkeys(revs, 1)
637 needed = dict.fromkeys(revs, 1)
636
638
637 # if we don't have any revisions touched by these changesets, bail
639 # if we don't have any revisions touched by these changesets, bail
638 if not revs:
640 if not revs:
639 yield struct.pack(">l", 0)
641 yield struct.pack(">l", 0)
640 return
642 return
641
643
642 # add the parent of the first rev
644 # add the parent of the first rev
643 p = self.parents(self.node(revs[0]))[0]
645 p = self.parents(self.node(revs[0]))[0]
644 revs.insert(0, self.rev(p))
646 revs.insert(0, self.rev(p))
645
647
646 # for each delta that isn't contiguous in the log, we need to
648 # for each delta that isn't contiguous in the log, we need to
647 # reconstruct the base, reconstruct the result, and then
649 # reconstruct the base, reconstruct the result, and then
648 # calculate the delta. We also need to do this where we've
650 # calculate the delta. We also need to do this where we've
649 # stored a full version and not a delta
651 # stored a full version and not a delta
650 for i in xrange(0, len(revs) - 1):
652 for i in xrange(0, len(revs) - 1):
651 a, b = revs[i], revs[i + 1]
653 a, b = revs[i], revs[i + 1]
652 if a + 1 != b or self.base(b) == b:
654 if a + 1 != b or self.base(b) == b:
653 for j in xrange(self.base(a), a + 1):
655 for j in xrange(self.base(a), a + 1):
654 needed[j] = 1
656 needed[j] = 1
655 for j in xrange(self.base(b), b + 1):
657 for j in xrange(self.base(b), b + 1):
656 needed[j] = 1
658 needed[j] = 1
657
659
658 # calculate spans to retrieve from datafile
660 # calculate spans to retrieve from datafile
659 needed = needed.keys()
661 needed = needed.keys()
660 needed.sort()
662 needed.sort()
661 spans = []
663 spans = []
662 oo = -1
664 oo = -1
663 ol = 0
665 ol = 0
664 for n in needed:
666 for n in needed:
665 if n < 0: continue
667 if n < 0: continue
666 o = self.start(n)
668 o = self.start(n)
667 l = self.length(n)
669 l = self.length(n)
668 if oo + ol == o: # can we merge with the previous?
670 if oo + ol == o: # can we merge with the previous?
669 nl = spans[-1][2]
671 nl = spans[-1][2]
670 nl.append((n, l))
672 nl.append((n, l))
671 ol += l
673 ol += l
672 spans[-1] = (oo, ol, nl)
674 spans[-1] = (oo, ol, nl)
673 else:
675 else:
674 oo = o
676 oo = o
675 ol = l
677 ol = l
676 spans.append((oo, ol, [(n, l)]))
678 spans.append((oo, ol, [(n, l)]))
677
679
678 # read spans in, divide up chunks
680 # read spans in, divide up chunks
679 chunks = {}
681 chunks = {}
680 for span in spans:
682 for span in spans:
681 # we reopen the file for each span to make http happy for now
683 # we reopen the file for each span to make http happy for now
682 f = self.opener(self.datafile)
684 f = self.opener(self.datafile)
683 f.seek(span[0])
685 f.seek(span[0])
684 data = f.read(span[1])
686 data = f.read(span[1])
685
687
686 # divide up the span
688 # divide up the span
687 pos = 0
689 pos = 0
688 for r, l in span[2]:
690 for r, l in span[2]:
689 chunks[r] = decompress(data[pos: pos + l])
691 chunks[r] = decompress(data[pos: pos + l])
690 pos += l
692 pos += l
691
693
692 # helper to reconstruct intermediate versions
694 # helper to reconstruct intermediate versions
693 def construct(text, base, rev):
695 def construct(text, base, rev):
694 bins = [chunks[r] for r in xrange(base + 1, rev + 1)]
696 bins = [chunks[r] for r in xrange(base + 1, rev + 1)]
695 return mdiff.patches(text, bins)
697 return mdiff.patches(text, bins)
696
698
697 # build deltas
699 # build deltas
698 deltas = []
700 deltas = []
699 for d in xrange(0, len(revs) - 1):
701 for d in xrange(0, len(revs) - 1):
700 a, b = revs[d], revs[d + 1]
702 a, b = revs[d], revs[d + 1]
701 n = self.node(b)
703 n = self.node(b)
702
704
703 if infocollect is not None:
705 if infocollect is not None:
704 infocollect(n)
706 infocollect(n)
705
707
706 # do we need to construct a new delta?
708 # do we need to construct a new delta?
707 if a + 1 != b or self.base(b) == b:
709 if a + 1 != b or self.base(b) == b:
708 if a >= 0:
710 if a >= 0:
709 base = self.base(a)
711 base = self.base(a)
710 ta = chunks[self.base(a)]
712 ta = chunks[self.base(a)]
711 ta = construct(ta, base, a)
713 ta = construct(ta, base, a)
712 else:
714 else:
713 ta = ""
715 ta = ""
714
716
715 base = self.base(b)
717 base = self.base(b)
716 if a > base:
718 if a > base:
717 base = a
719 base = a
718 tb = ta
720 tb = ta
719 else:
721 else:
720 tb = chunks[self.base(b)]
722 tb = chunks[self.base(b)]
721 tb = construct(tb, base, b)
723 tb = construct(tb, base, b)
722 d = self.diff(ta, tb)
724 d = self.diff(ta, tb)
723 else:
725 else:
724 d = chunks[b]
726 d = chunks[b]
725
727
726 p = self.parents(n)
728 p = self.parents(n)
727 meta = n + p[0] + p[1] + lookup(n)
729 meta = n + p[0] + p[1] + lookup(n)
728 l = struct.pack(">l", len(meta) + len(d) + 4)
730 l = struct.pack(">l", len(meta) + len(d) + 4)
729 yield l
731 yield l
730 yield meta
732 yield meta
731 yield d
733 yield d
732
734
733 yield struct.pack(">l", 0)
735 yield struct.pack(">l", 0)
734
736
735 def addgroup(self, revs, linkmapper, transaction, unique=0):
737 def addgroup(self, revs, linkmapper, transaction, unique=0):
736 """
738 """
737 add a delta group
739 add a delta group
738
740
739 given a set of deltas, add them to the revision log. the
741 given a set of deltas, add them to the revision log. the
740 first delta is against its parent, which should be in our
742 first delta is against its parent, which should be in our
741 log, the rest are against the previous delta.
743 log, the rest are against the previous delta.
742 """
744 """
743
745
744 #track the base of the current delta log
746 #track the base of the current delta log
745 r = self.count()
747 r = self.count()
746 t = r - 1
748 t = r - 1
747 node = nullid
749 node = nullid
748
750
749 base = prev = -1
751 base = prev = -1
750 start = end = measure = 0
752 start = end = measure = 0
751 if r:
753 if r:
752 start = self.start(self.base(t))
754 start = self.start(self.base(t))
753 end = self.end(t)
755 end = self.end(t)
754 measure = self.length(self.base(t))
756 measure = self.length(self.base(t))
755 base = self.base(t)
757 base = self.base(t)
756 prev = self.tip()
758 prev = self.tip()
757
759
758 transaction.add(self.datafile, end)
760 transaction.add(self.datafile, end)
759 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
761 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
760 dfh = self.opener(self.datafile, "a")
762 dfh = self.opener(self.datafile, "a")
761 ifh = self.opener(self.indexfile, "a")
763 ifh = self.opener(self.indexfile, "a")
762
764
763 # loop through our set of deltas
765 # loop through our set of deltas
764 chain = None
766 chain = None
765 for chunk in revs:
767 for chunk in revs:
766 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
768 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
767 link = linkmapper(cs)
769 link = linkmapper(cs)
768 if node in self.nodemap:
770 if node in self.nodemap:
769 # this can happen if two branches make the same change
771 # this can happen if two branches make the same change
770 # if unique:
772 # if unique:
771 # raise RevlogError("already have %s" % hex(node[:4]))
773 # raise RevlogError("already have %s" % hex(node[:4]))
772 chain = node
774 chain = node
773 continue
775 continue
774 delta = chunk[80:]
776 delta = chunk[80:]
775
777
776 if not chain:
778 if not chain:
777 # retrieve the parent revision of the delta chain
779 # retrieve the parent revision of the delta chain
778 chain = p1
780 chain = p1
779 if not chain in self.nodemap:
781 if not chain in self.nodemap:
780 raise RevlogError("unknown base %s" % short(chain[:4]))
782 raise RevlogError("unknown base %s" % short(chain[:4]))
781
783
782 # full versions are inserted when the needed deltas become
784 # full versions are inserted when the needed deltas become
783 # comparable to the uncompressed text or when the previous
785 # comparable to the uncompressed text or when the previous
784 # version is not the one we have a delta against. We use
786 # version is not the one we have a delta against. We use
785 # the size of the previous full rev as a proxy for the
787 # the size of the previous full rev as a proxy for the
786 # current size.
788 # current size.
787
789
788 if chain == prev:
790 if chain == prev:
789 cdelta = compress(delta)
791 cdelta = compress(delta)
790
792
791 if chain != prev or (end - start + len(cdelta)) > measure * 2:
793 if chain != prev or (end - start + len(cdelta)) > measure * 2:
792 # flush our writes here so we can read it in revision
794 # flush our writes here so we can read it in revision
793 dfh.flush()
795 dfh.flush()
794 ifh.flush()
796 ifh.flush()
795 text = self.revision(chain)
797 text = self.revision(chain)
796 text = self.patches(text, [delta])
798 text = self.patches(text, [delta])
797 chk = self.addrevision(text, transaction, link, p1, p2)
799 chk = self.addrevision(text, transaction, link, p1, p2)
798 if chk != node:
800 if chk != node:
799 raise RevlogError("consistency error adding group")
801 raise RevlogError("consistency error adding group")
800 measure = len(text)
802 measure = len(text)
801 else:
803 else:
802 e = (end, len(cdelta), self.base(t), link, p1, p2, node)
804 e = (end, len(cdelta), self.base(t), link, p1, p2, node)
803 self.index.append(e)
805 self.index.append(e)
804 self.nodemap[node] = r
806 self.nodemap[node] = r
805 dfh.write(cdelta)
807 dfh.write(cdelta)
806 ifh.write(struct.pack(indexformat, *e))
808 ifh.write(struct.pack(indexformat, *e))
807
809
808 t, r, chain, prev = r, r + 1, node, node
810 t, r, chain, prev = r, r + 1, node, node
809 start = self.start(self.base(t))
811 start = self.start(self.base(t))
810 end = self.end(t)
812 end = self.end(t)
811
813
812 dfh.close()
814 dfh.close()
813 ifh.close()
815 ifh.close()
814 return node
816 return node
General Comments 0
You need to be logged in to leave comments. Login now