##// END OF EJS Templates
Fix to handle case of empty list for roots or heads in nodesbetween.
Eric Hopper -
r1463:26e73acc default
parent child Browse files
Show More
@@ -1,816 +1,822 b''
1 """
1 """
2 revlog.py - storage back-end for mercurial
2 revlog.py - storage back-end for mercurial
3
3
4 This provides efficient delta storage with O(1) retrieve and append
4 This provides efficient delta storage with O(1) retrieve and append
5 and O(changes) merge between branches
5 and O(changes) merge between branches
6
6
7 Copyright 2005 Matt Mackall <mpm@selenic.com>
7 Copyright 2005 Matt Mackall <mpm@selenic.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License, incorporated herein by reference.
10 of the GNU General Public License, incorporated herein by reference.
11 """
11 """
12
12
13 from node import *
13 from node import *
14 from demandload import demandload
14 from demandload import demandload
15 demandload(globals(), "binascii errno heapq mdiff sha struct zlib")
15 demandload(globals(), "binascii errno heapq mdiff sha struct zlib")
16
16
17 def hash(text, p1, p2):
17 def hash(text, p1, p2):
18 """generate a hash from the given text and its parent hashes
18 """generate a hash from the given text and its parent hashes
19
19
20 This hash combines both the current file contents and its history
20 This hash combines both the current file contents and its history
21 in a manner that makes it easy to distinguish nodes with the same
21 in a manner that makes it easy to distinguish nodes with the same
22 content in the revision graph.
22 content in the revision graph.
23 """
23 """
24 l = [p1, p2]
24 l = [p1, p2]
25 l.sort()
25 l.sort()
26 s = sha.new(l[0])
26 s = sha.new(l[0])
27 s.update(l[1])
27 s.update(l[1])
28 s.update(text)
28 s.update(text)
29 return s.digest()
29 return s.digest()
30
30
31 def compress(text):
31 def compress(text):
32 """ generate a possibly-compressed representation of text """
32 """ generate a possibly-compressed representation of text """
33 if not text: return text
33 if not text: return text
34 if len(text) < 44:
34 if len(text) < 44:
35 if text[0] == '\0': return text
35 if text[0] == '\0': return text
36 return 'u' + text
36 return 'u' + text
37 bin = zlib.compress(text)
37 bin = zlib.compress(text)
38 if len(bin) > len(text):
38 if len(bin) > len(text):
39 if text[0] == '\0': return text
39 if text[0] == '\0': return text
40 return 'u' + text
40 return 'u' + text
41 return bin
41 return bin
42
42
43 def decompress(bin):
43 def decompress(bin):
44 """ decompress the given input """
44 """ decompress the given input """
45 if not bin: return bin
45 if not bin: return bin
46 t = bin[0]
46 t = bin[0]
47 if t == '\0': return bin
47 if t == '\0': return bin
48 if t == 'x': return zlib.decompress(bin)
48 if t == 'x': return zlib.decompress(bin)
49 if t == 'u': return bin[1:]
49 if t == 'u': return bin[1:]
50 raise RevlogError("unknown compression type %s" % t)
50 raise RevlogError("unknown compression type %s" % t)
51
51
52 indexformat = ">4l20s20s20s"
52 indexformat = ">4l20s20s20s"
53
53
54 class lazyparser:
54 class lazyparser:
55 """
55 """
56 this class avoids the need to parse the entirety of large indices
56 this class avoids the need to parse the entirety of large indices
57
57
58 By default we parse and load 1000 entries at a time.
58 By default we parse and load 1000 entries at a time.
59
59
60 If no position is specified, we load the whole index, and replace
60 If no position is specified, we load the whole index, and replace
61 the lazy objects in revlog with the underlying objects for
61 the lazy objects in revlog with the underlying objects for
62 efficiency in cases where we look at most of the nodes.
62 efficiency in cases where we look at most of the nodes.
63 """
63 """
64 def __init__(self, data, revlog):
64 def __init__(self, data, revlog):
65 self.data = data
65 self.data = data
66 self.s = struct.calcsize(indexformat)
66 self.s = struct.calcsize(indexformat)
67 self.l = len(data)/self.s
67 self.l = len(data)/self.s
68 self.index = [None] * self.l
68 self.index = [None] * self.l
69 self.map = {nullid: -1}
69 self.map = {nullid: -1}
70 self.all = 0
70 self.all = 0
71 self.revlog = revlog
71 self.revlog = revlog
72
72
73 def load(self, pos=None):
73 def load(self, pos=None):
74 if self.all: return
74 if self.all: return
75 if pos is not None:
75 if pos is not None:
76 block = pos / 1000
76 block = pos / 1000
77 i = block * 1000
77 i = block * 1000
78 end = min(self.l, i + 1000)
78 end = min(self.l, i + 1000)
79 else:
79 else:
80 self.all = 1
80 self.all = 1
81 i = 0
81 i = 0
82 end = self.l
82 end = self.l
83 self.revlog.index = self.index
83 self.revlog.index = self.index
84 self.revlog.nodemap = self.map
84 self.revlog.nodemap = self.map
85
85
86 while i < end:
86 while i < end:
87 d = self.data[i * self.s: (i + 1) * self.s]
87 d = self.data[i * self.s: (i + 1) * self.s]
88 e = struct.unpack(indexformat, d)
88 e = struct.unpack(indexformat, d)
89 self.index[i] = e
89 self.index[i] = e
90 self.map[e[6]] = i
90 self.map[e[6]] = i
91 i += 1
91 i += 1
92
92
93 class lazyindex:
93 class lazyindex:
94 """a lazy version of the index array"""
94 """a lazy version of the index array"""
95 def __init__(self, parser):
95 def __init__(self, parser):
96 self.p = parser
96 self.p = parser
97 def __len__(self):
97 def __len__(self):
98 return len(self.p.index)
98 return len(self.p.index)
99 def load(self, pos):
99 def load(self, pos):
100 self.p.load(pos)
100 self.p.load(pos)
101 return self.p.index[pos]
101 return self.p.index[pos]
102 def __getitem__(self, pos):
102 def __getitem__(self, pos):
103 return self.p.index[pos] or self.load(pos)
103 return self.p.index[pos] or self.load(pos)
104 def append(self, e):
104 def append(self, e):
105 self.p.index.append(e)
105 self.p.index.append(e)
106
106
107 class lazymap:
107 class lazymap:
108 """a lazy version of the node map"""
108 """a lazy version of the node map"""
109 def __init__(self, parser):
109 def __init__(self, parser):
110 self.p = parser
110 self.p = parser
111 def load(self, key):
111 def load(self, key):
112 if self.p.all: return
112 if self.p.all: return
113 n = self.p.data.find(key)
113 n = self.p.data.find(key)
114 if n < 0:
114 if n < 0:
115 raise KeyError(key)
115 raise KeyError(key)
116 pos = n / self.p.s
116 pos = n / self.p.s
117 self.p.load(pos)
117 self.p.load(pos)
118 def __contains__(self, key):
118 def __contains__(self, key):
119 self.p.load()
119 self.p.load()
120 return key in self.p.map
120 return key in self.p.map
121 def __iter__(self):
121 def __iter__(self):
122 yield nullid
122 yield nullid
123 for i in xrange(self.p.l):
123 for i in xrange(self.p.l):
124 try:
124 try:
125 yield self.p.index[i][6]
125 yield self.p.index[i][6]
126 except:
126 except:
127 self.p.load(i)
127 self.p.load(i)
128 yield self.p.index[i][6]
128 yield self.p.index[i][6]
129 def __getitem__(self, key):
129 def __getitem__(self, key):
130 try:
130 try:
131 return self.p.map[key]
131 return self.p.map[key]
132 except KeyError:
132 except KeyError:
133 try:
133 try:
134 self.load(key)
134 self.load(key)
135 return self.p.map[key]
135 return self.p.map[key]
136 except KeyError:
136 except KeyError:
137 raise KeyError("node " + hex(key))
137 raise KeyError("node " + hex(key))
138 def __setitem__(self, key, val):
138 def __setitem__(self, key, val):
139 self.p.map[key] = val
139 self.p.map[key] = val
140
140
141 class RevlogError(Exception): pass
141 class RevlogError(Exception): pass
142
142
143 class revlog:
143 class revlog:
144 """
144 """
145 the underlying revision storage object
145 the underlying revision storage object
146
146
147 A revlog consists of two parts, an index and the revision data.
147 A revlog consists of two parts, an index and the revision data.
148
148
149 The index is a file with a fixed record size containing
149 The index is a file with a fixed record size containing
150 information on each revision, includings its nodeid (hash), the
150 information on each revision, includings its nodeid (hash), the
151 nodeids of its parents, the position and offset of its data within
151 nodeids of its parents, the position and offset of its data within
152 the data file, and the revision it's based on. Finally, each entry
152 the data file, and the revision it's based on. Finally, each entry
153 contains a linkrev entry that can serve as a pointer to external
153 contains a linkrev entry that can serve as a pointer to external
154 data.
154 data.
155
155
156 The revision data itself is a linear collection of data chunks.
156 The revision data itself is a linear collection of data chunks.
157 Each chunk represents a revision and is usually represented as a
157 Each chunk represents a revision and is usually represented as a
158 delta against the previous chunk. To bound lookup time, runs of
158 delta against the previous chunk. To bound lookup time, runs of
159 deltas are limited to about 2 times the length of the original
159 deltas are limited to about 2 times the length of the original
160 version data. This makes retrieval of a version proportional to
160 version data. This makes retrieval of a version proportional to
161 its size, or O(1) relative to the number of revisions.
161 its size, or O(1) relative to the number of revisions.
162
162
163 Both pieces of the revlog are written to in an append-only
163 Both pieces of the revlog are written to in an append-only
164 fashion, which means we never need to rewrite a file to insert or
164 fashion, which means we never need to rewrite a file to insert or
165 remove data, and can use some simple techniques to avoid the need
165 remove data, and can use some simple techniques to avoid the need
166 for locking while reading.
166 for locking while reading.
167 """
167 """
168 def __init__(self, opener, indexfile, datafile):
168 def __init__(self, opener, indexfile, datafile):
169 """
169 """
170 create a revlog object
170 create a revlog object
171
171
172 opener is a function that abstracts the file opening operation
172 opener is a function that abstracts the file opening operation
173 and can be used to implement COW semantics or the like.
173 and can be used to implement COW semantics or the like.
174 """
174 """
175 self.indexfile = indexfile
175 self.indexfile = indexfile
176 self.datafile = datafile
176 self.datafile = datafile
177 self.opener = opener
177 self.opener = opener
178 self.cache = None
178 self.cache = None
179
179
180 try:
180 try:
181 i = self.opener(self.indexfile).read()
181 i = self.opener(self.indexfile).read()
182 except IOError, inst:
182 except IOError, inst:
183 if inst.errno != errno.ENOENT:
183 if inst.errno != errno.ENOENT:
184 raise
184 raise
185 i = ""
185 i = ""
186
186
187 if len(i) > 10000:
187 if len(i) > 10000:
188 # big index, let's parse it on demand
188 # big index, let's parse it on demand
189 parser = lazyparser(i, self)
189 parser = lazyparser(i, self)
190 self.index = lazyindex(parser)
190 self.index = lazyindex(parser)
191 self.nodemap = lazymap(parser)
191 self.nodemap = lazymap(parser)
192 else:
192 else:
193 s = struct.calcsize(indexformat)
193 s = struct.calcsize(indexformat)
194 l = len(i) / s
194 l = len(i) / s
195 self.index = [None] * l
195 self.index = [None] * l
196 m = [None] * l
196 m = [None] * l
197
197
198 n = 0
198 n = 0
199 for f in xrange(0, len(i), s):
199 for f in xrange(0, len(i), s):
200 # offset, size, base, linkrev, p1, p2, nodeid
200 # offset, size, base, linkrev, p1, p2, nodeid
201 e = struct.unpack(indexformat, i[f:f + s])
201 e = struct.unpack(indexformat, i[f:f + s])
202 m[n] = (e[6], n)
202 m[n] = (e[6], n)
203 self.index[n] = e
203 self.index[n] = e
204 n += 1
204 n += 1
205
205
206 self.nodemap = dict(m)
206 self.nodemap = dict(m)
207 self.nodemap[nullid] = -1
207 self.nodemap[nullid] = -1
208
208
209 def tip(self): return self.node(len(self.index) - 1)
209 def tip(self): return self.node(len(self.index) - 1)
210 def count(self): return len(self.index)
210 def count(self): return len(self.index)
211 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
211 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
212 def rev(self, node):
212 def rev(self, node):
213 try:
213 try:
214 return self.nodemap[node]
214 return self.nodemap[node]
215 except KeyError:
215 except KeyError:
216 raise RevlogError('%s: no node %s' % (self.indexfile, hex(node)))
216 raise RevlogError('%s: no node %s' % (self.indexfile, hex(node)))
217 def linkrev(self, node): return self.index[self.rev(node)][3]
217 def linkrev(self, node): return self.index[self.rev(node)][3]
218 def parents(self, node):
218 def parents(self, node):
219 if node == nullid: return (nullid, nullid)
219 if node == nullid: return (nullid, nullid)
220 return self.index[self.rev(node)][4:6]
220 return self.index[self.rev(node)][4:6]
221
221
222 def start(self, rev): return self.index[rev][0]
222 def start(self, rev): return self.index[rev][0]
223 def length(self, rev): return self.index[rev][1]
223 def length(self, rev): return self.index[rev][1]
224 def end(self, rev): return self.start(rev) + self.length(rev)
224 def end(self, rev): return self.start(rev) + self.length(rev)
225 def base(self, rev): return self.index[rev][2]
225 def base(self, rev): return self.index[rev][2]
226
226
227 def reachable(self, rev, stop=None):
227 def reachable(self, rev, stop=None):
228 reachable = {}
228 reachable = {}
229 visit = [rev]
229 visit = [rev]
230 reachable[rev] = 1
230 reachable[rev] = 1
231 if stop:
231 if stop:
232 stopn = self.rev(stop)
232 stopn = self.rev(stop)
233 else:
233 else:
234 stopn = 0
234 stopn = 0
235 while visit:
235 while visit:
236 n = visit.pop(0)
236 n = visit.pop(0)
237 if n == stop:
237 if n == stop:
238 continue
238 continue
239 if n == nullid:
239 if n == nullid:
240 continue
240 continue
241 for p in self.parents(n):
241 for p in self.parents(n):
242 if self.rev(p) < stopn:
242 if self.rev(p) < stopn:
243 continue
243 continue
244 if p not in reachable:
244 if p not in reachable:
245 reachable[p] = 1
245 reachable[p] = 1
246 visit.append(p)
246 visit.append(p)
247 return reachable
247 return reachable
248
248
249 def nodesbetween(self, roots=None, heads=None):
249 def nodesbetween(self, roots=None, heads=None):
250 """Return a tuple containing three elements. Elements 1 and 2 contain
250 """Return a tuple containing three elements. Elements 1 and 2 contain
251 a final list bases and heads after all the unreachable ones have been
251 a final list bases and heads after all the unreachable ones have been
252 pruned. Element 0 contains a topologically sorted list of all
252 pruned. Element 0 contains a topologically sorted list of all
253
253
254 nodes that satisfy these constraints:
254 nodes that satisfy these constraints:
255 1. All nodes must be descended from a node in roots (the nodes on
255 1. All nodes must be descended from a node in roots (the nodes on
256 roots are considered descended from themselves).
256 roots are considered descended from themselves).
257 2. All nodes must also be ancestors of a node in heads (the nodes in
257 2. All nodes must also be ancestors of a node in heads (the nodes in
258 heads are considered to be their own ancestors).
258 heads are considered to be their own ancestors).
259
259
260 If roots is unspecified, nullid is assumed as the only root.
260 If roots is unspecified, nullid is assumed as the only root.
261 If heads is unspecified, it is taken to be the output of the
261 If heads is unspecified, it is taken to be the output of the
262 heads method (i.e. a list of all nodes in the repository that
262 heads method (i.e. a list of all nodes in the repository that
263 have no children)."""
263 have no children)."""
264 nonodes = ([], [], [])
264 if roots is not None:
265 if roots is not None:
265 roots = list(roots)
266 roots = list(roots)
267 if not roots:
268 return nonodes
266 lowestrev = min([self.rev(n) for n in roots])
269 lowestrev = min([self.rev(n) for n in roots])
267 else:
270 else:
268 roots = [nullid] # Everybody's a descendent of nullid
271 roots = [nullid] # Everybody's a descendent of nullid
269 lowestrev = -1
272 lowestrev = -1
270 if (lowestrev == -1) and (heads is None):
273 if (lowestrev == -1) and (heads is None):
271 # We want _all_ the nodes!
274 # We want _all_ the nodes!
272 return ([self.node(r) for r in xrange(0, self.count())],
275 return ([self.node(r) for r in xrange(0, self.count())],
273 [nullid], list(self.heads()))
276 [nullid], list(self.heads()))
274 if heads is None:
277 if heads is None:
275 # All nodes are ancestors, so the latest ancestor is the last
278 # All nodes are ancestors, so the latest ancestor is the last
276 # node.
279 # node.
277 highestrev = self.count() - 1
280 highestrev = self.count() - 1
278 # Set ancestors to None to signal that every node is an ancestor.
281 # Set ancestors to None to signal that every node is an ancestor.
279 ancestors = None
282 ancestors = None
280 # Set heads to an empty dictionary for later discovery of heads
283 # Set heads to an empty dictionary for later discovery of heads
281 heads = {}
284 heads = {}
282 else:
285 else:
286 heads = list(heads)
287 if not heads:
288 return nonodes
283 ancestors = {}
289 ancestors = {}
284 # Start at the top and keep marking parents until we're done.
290 # Start at the top and keep marking parents until we're done.
285 nodestotag = list(heads)
291 nodestotag = heads[:]
286 # Turn heads into a dictionary so we can remove 'fake' heads.
292 # Turn heads into a dictionary so we can remove 'fake' heads.
287 # Also, later we will be using it to filter out the heads we can't
293 # Also, later we will be using it to filter out the heads we can't
288 # find from roots.
294 # find from roots.
289 heads = dict.fromkeys(heads, 0)
295 heads = dict.fromkeys(heads, 0)
290 # Remember where the top was so we can use it as a limit later.
296 # Remember where the top was so we can use it as a limit later.
291 highestrev = max([self.rev(n) for n in nodestotag])
297 highestrev = max([self.rev(n) for n in nodestotag])
292 while nodestotag:
298 while nodestotag:
293 # grab a node to tag
299 # grab a node to tag
294 n = nodestotag.pop()
300 n = nodestotag.pop()
295 # Never tag nullid
301 # Never tag nullid
296 if n == nullid:
302 if n == nullid:
297 continue
303 continue
298 # A node's revision number represents its place in a
304 # A node's revision number represents its place in a
299 # topologically sorted list of nodes.
305 # topologically sorted list of nodes.
300 r = self.rev(n)
306 r = self.rev(n)
301 if r >= lowestrev:
307 if r >= lowestrev:
302 if n not in ancestors:
308 if n not in ancestors:
303 # If we are possibly a descendent of one of the roots
309 # If we are possibly a descendent of one of the roots
304 # and we haven't already been marked as an ancestor
310 # and we haven't already been marked as an ancestor
305 ancestors[n] = 1 # Mark as ancestor
311 ancestors[n] = 1 # Mark as ancestor
306 # Add non-nullid parents to list of nodes to tag.
312 # Add non-nullid parents to list of nodes to tag.
307 nodestotag.extend([p for p in self.parents(n) if
313 nodestotag.extend([p for p in self.parents(n) if
308 p != nullid])
314 p != nullid])
309 elif n in heads: # We've seen it before, is it a fake head?
315 elif n in heads: # We've seen it before, is it a fake head?
310 # So it is, real heads should not be the ancestors of
316 # So it is, real heads should not be the ancestors of
311 # any other heads.
317 # any other heads.
312 heads.pop(n)
318 heads.pop(n)
313 if not ancestors:
319 if not ancestors:
314 return ([], [], [])
320 return nonodes
315 # Now that we have our set of ancestors, we want to remove any
321 # Now that we have our set of ancestors, we want to remove any
316 # roots that are not ancestors.
322 # roots that are not ancestors.
317
323
318 # If one of the roots was nullid, everything is included anyway.
324 # If one of the roots was nullid, everything is included anyway.
319 if lowestrev > -1:
325 if lowestrev > -1:
320 # But, since we weren't, let's recompute the lowest rev to not
326 # But, since we weren't, let's recompute the lowest rev to not
321 # include roots that aren't ancestors.
327 # include roots that aren't ancestors.
322
328
323 # Filter out roots that aren't ancestors of heads
329 # Filter out roots that aren't ancestors of heads
324 roots = [n for n in roots if n in ancestors]
330 roots = [n for n in roots if n in ancestors]
325 # Recompute the lowest revision
331 # Recompute the lowest revision
326 if roots:
332 if roots:
327 lowestrev = min([self.rev(n) for n in roots])
333 lowestrev = min([self.rev(n) for n in roots])
328 else:
334 else:
329 # No more roots? Return empty list
335 # No more roots? Return empty list
330 return ([], [], [])
336 return nonodes
331 else:
337 else:
332 # We are descending from nullid, and don't need to care about
338 # We are descending from nullid, and don't need to care about
333 # any other roots.
339 # any other roots.
334 lowestrev = -1
340 lowestrev = -1
335 roots = [nullid]
341 roots = [nullid]
336 # Transform our roots list into a 'set' (i.e. a dictionary where the
342 # Transform our roots list into a 'set' (i.e. a dictionary where the
337 # values don't matter.
343 # values don't matter.
338 descendents = dict.fromkeys(roots, 1)
344 descendents = dict.fromkeys(roots, 1)
339 # Also, keep the original roots so we can filter out roots that aren't
345 # Also, keep the original roots so we can filter out roots that aren't
340 # 'real' roots (i.e. are descended from other roots).
346 # 'real' roots (i.e. are descended from other roots).
341 roots = descendents.copy()
347 roots = descendents.copy()
342 # Our topologically sorted list of output nodes.
348 # Our topologically sorted list of output nodes.
343 orderedout = []
349 orderedout = []
344 # Don't start at nullid since we don't want nullid in our output list,
350 # Don't start at nullid since we don't want nullid in our output list,
345 # and if nullid shows up in descedents, empty parents will look like
351 # and if nullid shows up in descedents, empty parents will look like
346 # they're descendents.
352 # they're descendents.
347 for r in xrange(max(lowestrev, 0), highestrev + 1):
353 for r in xrange(max(lowestrev, 0), highestrev + 1):
348 n = self.node(r)
354 n = self.node(r)
349 isdescendent = False
355 isdescendent = False
350 if lowestrev == -1: # Everybody is a descendent of nullid
356 if lowestrev == -1: # Everybody is a descendent of nullid
351 isdescendent = True
357 isdescendent = True
352 elif n in descendents:
358 elif n in descendents:
353 # n is already a descendent
359 # n is already a descendent
354 isdescendent = True
360 isdescendent = True
355 # This check only needs to be done here because all the roots
361 # This check only needs to be done here because all the roots
356 # will start being marked is descendents before the loop.
362 # will start being marked is descendents before the loop.
357 if n in roots:
363 if n in roots:
358 # If n was a root, check if it's a 'real' root.
364 # If n was a root, check if it's a 'real' root.
359 p = tuple(self.parents(n))
365 p = tuple(self.parents(n))
360 # If any of its parents are descendents, it's not a root.
366 # If any of its parents are descendents, it's not a root.
361 if (p[0] in descendents) or (p[1] in descendents):
367 if (p[0] in descendents) or (p[1] in descendents):
362 roots.pop(n)
368 roots.pop(n)
363 else:
369 else:
364 p = tuple(self.parents(n))
370 p = tuple(self.parents(n))
365 # A node is a descendent if either of its parents are
371 # A node is a descendent if either of its parents are
366 # descendents. (We seeded the dependents list with the roots
372 # descendents. (We seeded the dependents list with the roots
367 # up there, remember?)
373 # up there, remember?)
368 if (p[0] in descendents) or (p[1] in descendents):
374 if (p[0] in descendents) or (p[1] in descendents):
369 descendents[n] = 1
375 descendents[n] = 1
370 isdescendent = True
376 isdescendent = True
371 if isdescendent and ((ancestors is None) or (n in ancestors)):
377 if isdescendent and ((ancestors is None) or (n in ancestors)):
372 # Only include nodes that are both descendents and ancestors.
378 # Only include nodes that are both descendents and ancestors.
373 orderedout.append(n)
379 orderedout.append(n)
374 if (ancestors is not None) and (n in heads):
380 if (ancestors is not None) and (n in heads):
375 # We're trying to figure out which heads are reachable
381 # We're trying to figure out which heads are reachable
376 # from roots.
382 # from roots.
377 # Mark this head as having been reached
383 # Mark this head as having been reached
378 heads[n] = 1
384 heads[n] = 1
379 elif ancestors is None:
385 elif ancestors is None:
380 # Otherwise, we're trying to discover the heads.
386 # Otherwise, we're trying to discover the heads.
381 # Assume this is a head because if it isn't, the next step
387 # Assume this is a head because if it isn't, the next step
382 # will eventually remove it.
388 # will eventually remove it.
383 heads[n] = 1
389 heads[n] = 1
384 # But, obviously its parents aren't.
390 # But, obviously its parents aren't.
385 for p in self.parents(n):
391 for p in self.parents(n):
386 heads.pop(p, None)
392 heads.pop(p, None)
387 heads = [n for n in heads.iterkeys() if heads[n] != 0]
393 heads = [n for n in heads.iterkeys() if heads[n] != 0]
388 roots = roots.keys()
394 roots = roots.keys()
389 assert orderedout
395 assert orderedout
390 assert roots
396 assert roots
391 assert heads
397 assert heads
392 return (orderedout, roots, heads)
398 return (orderedout, roots, heads)
393
399
394 def heads(self, stop=None):
400 def heads(self, stop=None):
395 """return the list of all nodes that have no children"""
401 """return the list of all nodes that have no children"""
396 p = {}
402 p = {}
397 h = []
403 h = []
398 stoprev = 0
404 stoprev = 0
399 if stop and stop in self.nodemap:
405 if stop and stop in self.nodemap:
400 stoprev = self.rev(stop)
406 stoprev = self.rev(stop)
401
407
402 for r in range(self.count() - 1, -1, -1):
408 for r in range(self.count() - 1, -1, -1):
403 n = self.node(r)
409 n = self.node(r)
404 if n not in p:
410 if n not in p:
405 h.append(n)
411 h.append(n)
406 if n == stop:
412 if n == stop:
407 break
413 break
408 if r < stoprev:
414 if r < stoprev:
409 break
415 break
410 for pn in self.parents(n):
416 for pn in self.parents(n):
411 p[pn] = 1
417 p[pn] = 1
412 return h
418 return h
413
419
414 def children(self, node):
420 def children(self, node):
415 """find the children of a given node"""
421 """find the children of a given node"""
416 c = []
422 c = []
417 p = self.rev(node)
423 p = self.rev(node)
418 for r in range(p + 1, self.count()):
424 for r in range(p + 1, self.count()):
419 n = self.node(r)
425 n = self.node(r)
420 for pn in self.parents(n):
426 for pn in self.parents(n):
421 if pn == node:
427 if pn == node:
422 c.append(n)
428 c.append(n)
423 continue
429 continue
424 elif pn == nullid:
430 elif pn == nullid:
425 continue
431 continue
426 return c
432 return c
427
433
428 def lookup(self, id):
434 def lookup(self, id):
429 """locate a node based on revision number or subset of hex nodeid"""
435 """locate a node based on revision number or subset of hex nodeid"""
430 try:
436 try:
431 rev = int(id)
437 rev = int(id)
432 if str(rev) != id: raise ValueError
438 if str(rev) != id: raise ValueError
433 if rev < 0: rev = self.count() + rev
439 if rev < 0: rev = self.count() + rev
434 if rev < 0 or rev >= self.count(): raise ValueError
440 if rev < 0 or rev >= self.count(): raise ValueError
435 return self.node(rev)
441 return self.node(rev)
436 except (ValueError, OverflowError):
442 except (ValueError, OverflowError):
437 c = []
443 c = []
438 for n in self.nodemap:
444 for n in self.nodemap:
439 if hex(n).startswith(id):
445 if hex(n).startswith(id):
440 c.append(n)
446 c.append(n)
441 if len(c) > 1: raise KeyError("Ambiguous identifier")
447 if len(c) > 1: raise KeyError("Ambiguous identifier")
442 if len(c) < 1: raise KeyError("No match found")
448 if len(c) < 1: raise KeyError("No match found")
443 return c[0]
449 return c[0]
444
450
445 return None
451 return None
446
452
447 def diff(self, a, b):
453 def diff(self, a, b):
448 """return a delta between two revisions"""
454 """return a delta between two revisions"""
449 return mdiff.textdiff(a, b)
455 return mdiff.textdiff(a, b)
450
456
451 def patches(self, t, pl):
457 def patches(self, t, pl):
452 """apply a list of patches to a string"""
458 """apply a list of patches to a string"""
453 return mdiff.patches(t, pl)
459 return mdiff.patches(t, pl)
454
460
455 def delta(self, node):
461 def delta(self, node):
456 """return or calculate a delta between a node and its predecessor"""
462 """return or calculate a delta between a node and its predecessor"""
457 r = self.rev(node)
463 r = self.rev(node)
458 b = self.base(r)
464 b = self.base(r)
459 if r == b:
465 if r == b:
460 return self.diff(self.revision(self.node(r - 1)),
466 return self.diff(self.revision(self.node(r - 1)),
461 self.revision(node))
467 self.revision(node))
462 else:
468 else:
463 f = self.opener(self.datafile)
469 f = self.opener(self.datafile)
464 f.seek(self.start(r))
470 f.seek(self.start(r))
465 data = f.read(self.length(r))
471 data = f.read(self.length(r))
466 return decompress(data)
472 return decompress(data)
467
473
468 def revision(self, node):
474 def revision(self, node):
469 """return an uncompressed revision of a given"""
475 """return an uncompressed revision of a given"""
470 if node == nullid: return ""
476 if node == nullid: return ""
471 if self.cache and self.cache[0] == node: return self.cache[2]
477 if self.cache and self.cache[0] == node: return self.cache[2]
472
478
473 # look up what we need to read
479 # look up what we need to read
474 text = None
480 text = None
475 rev = self.rev(node)
481 rev = self.rev(node)
476 start, length, base, link, p1, p2, node = self.index[rev]
482 start, length, base, link, p1, p2, node = self.index[rev]
477 end = start + length
483 end = start + length
478 if base != rev: start = self.start(base)
484 if base != rev: start = self.start(base)
479
485
480 # do we have useful data cached?
486 # do we have useful data cached?
481 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
487 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
482 base = self.cache[1]
488 base = self.cache[1]
483 start = self.start(base + 1)
489 start = self.start(base + 1)
484 text = self.cache[2]
490 text = self.cache[2]
485 last = 0
491 last = 0
486
492
487 f = self.opener(self.datafile)
493 f = self.opener(self.datafile)
488 f.seek(start)
494 f.seek(start)
489 data = f.read(end - start)
495 data = f.read(end - start)
490
496
491 if text is None:
497 if text is None:
492 last = self.length(base)
498 last = self.length(base)
493 text = decompress(data[:last])
499 text = decompress(data[:last])
494
500
495 bins = []
501 bins = []
496 for r in xrange(base + 1, rev + 1):
502 for r in xrange(base + 1, rev + 1):
497 s = self.length(r)
503 s = self.length(r)
498 bins.append(decompress(data[last:last + s]))
504 bins.append(decompress(data[last:last + s]))
499 last = last + s
505 last = last + s
500
506
501 text = mdiff.patches(text, bins)
507 text = mdiff.patches(text, bins)
502
508
503 if node != hash(text, p1, p2):
509 if node != hash(text, p1, p2):
504 raise RevlogError("integrity check failed on %s:%d"
510 raise RevlogError("integrity check failed on %s:%d"
505 % (self.datafile, rev))
511 % (self.datafile, rev))
506
512
507 self.cache = (node, rev, text)
513 self.cache = (node, rev, text)
508 return text
514 return text
509
515
510 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
516 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
511 """add a revision to the log
517 """add a revision to the log
512
518
513 text - the revision data to add
519 text - the revision data to add
514 transaction - the transaction object used for rollback
520 transaction - the transaction object used for rollback
515 link - the linkrev data to add
521 link - the linkrev data to add
516 p1, p2 - the parent nodeids of the revision
522 p1, p2 - the parent nodeids of the revision
517 d - an optional precomputed delta
523 d - an optional precomputed delta
518 """
524 """
519 if text is None: text = ""
525 if text is None: text = ""
520 if p1 is None: p1 = self.tip()
526 if p1 is None: p1 = self.tip()
521 if p2 is None: p2 = nullid
527 if p2 is None: p2 = nullid
522
528
523 node = hash(text, p1, p2)
529 node = hash(text, p1, p2)
524
530
525 if node in self.nodemap:
531 if node in self.nodemap:
526 return node
532 return node
527
533
528 n = self.count()
534 n = self.count()
529 t = n - 1
535 t = n - 1
530
536
531 if n:
537 if n:
532 base = self.base(t)
538 base = self.base(t)
533 start = self.start(base)
539 start = self.start(base)
534 end = self.end(t)
540 end = self.end(t)
535 if not d:
541 if not d:
536 prev = self.revision(self.tip())
542 prev = self.revision(self.tip())
537 d = self.diff(prev, text)
543 d = self.diff(prev, text)
538 data = compress(d)
544 data = compress(d)
539 dist = end - start + len(data)
545 dist = end - start + len(data)
540
546
541 # full versions are inserted when the needed deltas
547 # full versions are inserted when the needed deltas
542 # become comparable to the uncompressed text
548 # become comparable to the uncompressed text
543 if not n or dist > len(text) * 2:
549 if not n or dist > len(text) * 2:
544 data = compress(text)
550 data = compress(text)
545 base = n
551 base = n
546 else:
552 else:
547 base = self.base(t)
553 base = self.base(t)
548
554
549 offset = 0
555 offset = 0
550 if t >= 0:
556 if t >= 0:
551 offset = self.end(t)
557 offset = self.end(t)
552
558
553 e = (offset, len(data), base, link, p1, p2, node)
559 e = (offset, len(data), base, link, p1, p2, node)
554
560
555 self.index.append(e)
561 self.index.append(e)
556 self.nodemap[node] = n
562 self.nodemap[node] = n
557 entry = struct.pack(indexformat, *e)
563 entry = struct.pack(indexformat, *e)
558
564
559 transaction.add(self.datafile, e[0])
565 transaction.add(self.datafile, e[0])
560 self.opener(self.datafile, "a").write(data)
566 self.opener(self.datafile, "a").write(data)
561 transaction.add(self.indexfile, n * len(entry))
567 transaction.add(self.indexfile, n * len(entry))
562 self.opener(self.indexfile, "a").write(entry)
568 self.opener(self.indexfile, "a").write(entry)
563
569
564 self.cache = (node, n, text)
570 self.cache = (node, n, text)
565 return node
571 return node
566
572
567 def ancestor(self, a, b):
573 def ancestor(self, a, b):
568 """calculate the least common ancestor of nodes a and b"""
574 """calculate the least common ancestor of nodes a and b"""
569 # calculate the distance of every node from root
575 # calculate the distance of every node from root
570 dist = {nullid: 0}
576 dist = {nullid: 0}
571 for i in xrange(self.count()):
577 for i in xrange(self.count()):
572 n = self.node(i)
578 n = self.node(i)
573 p1, p2 = self.parents(n)
579 p1, p2 = self.parents(n)
574 dist[n] = max(dist[p1], dist[p2]) + 1
580 dist[n] = max(dist[p1], dist[p2]) + 1
575
581
576 # traverse ancestors in order of decreasing distance from root
582 # traverse ancestors in order of decreasing distance from root
577 def ancestors(node):
583 def ancestors(node):
578 # we store negative distances because heap returns smallest member
584 # we store negative distances because heap returns smallest member
579 h = [(-dist[node], node)]
585 h = [(-dist[node], node)]
580 seen = {}
586 seen = {}
581 earliest = self.count()
587 earliest = self.count()
582 while h:
588 while h:
583 d, n = heapq.heappop(h)
589 d, n = heapq.heappop(h)
584 if n not in seen:
590 if n not in seen:
585 seen[n] = 1
591 seen[n] = 1
586 r = self.rev(n)
592 r = self.rev(n)
587 yield (-d, n)
593 yield (-d, n)
588 for p in self.parents(n):
594 for p in self.parents(n):
589 heapq.heappush(h, (-dist[p], p))
595 heapq.heappush(h, (-dist[p], p))
590
596
591 def generations(node):
597 def generations(node):
592 sg, s = None, {}
598 sg, s = None, {}
593 for g,n in ancestors(node):
599 for g,n in ancestors(node):
594 if g != sg:
600 if g != sg:
595 if sg:
601 if sg:
596 yield sg, s
602 yield sg, s
597 sg, s = g, {n:1}
603 sg, s = g, {n:1}
598 else:
604 else:
599 s[n] = 1
605 s[n] = 1
600 yield sg, s
606 yield sg, s
601
607
602 x = generations(a)
608 x = generations(a)
603 y = generations(b)
609 y = generations(b)
604 gx = x.next()
610 gx = x.next()
605 gy = y.next()
611 gy = y.next()
606
612
607 # increment each ancestor list until it is closer to root than
613 # increment each ancestor list until it is closer to root than
608 # the other, or they match
614 # the other, or they match
609 while 1:
615 while 1:
610 #print "ancestor gen %s %s" % (gx[0], gy[0])
616 #print "ancestor gen %s %s" % (gx[0], gy[0])
611 if gx[0] == gy[0]:
617 if gx[0] == gy[0]:
612 # find the intersection
618 # find the intersection
613 i = [ n for n in gx[1] if n in gy[1] ]
619 i = [ n for n in gx[1] if n in gy[1] ]
614 if i:
620 if i:
615 return i[0]
621 return i[0]
616 else:
622 else:
617 #print "next"
623 #print "next"
618 gy = y.next()
624 gy = y.next()
619 gx = x.next()
625 gx = x.next()
620 elif gx[0] < gy[0]:
626 elif gx[0] < gy[0]:
621 #print "next y"
627 #print "next y"
622 gy = y.next()
628 gy = y.next()
623 else:
629 else:
624 #print "next x"
630 #print "next x"
625 gx = x.next()
631 gx = x.next()
626
632
627 def group(self, nodelist, lookup, infocollect = None):
633 def group(self, nodelist, lookup, infocollect = None):
628 """calculate a delta group
634 """calculate a delta group
629
635
630 Given a list of changeset revs, return a set of deltas and
636 Given a list of changeset revs, return a set of deltas and
631 metadata corresponding to nodes. the first delta is
637 metadata corresponding to nodes. the first delta is
632 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
638 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
633 have this parent as it has all history before these
639 have this parent as it has all history before these
634 changesets. parent is parent[0]
640 changesets. parent is parent[0]
635 """
641 """
636 revs = [self.rev(n) for n in nodelist]
642 revs = [self.rev(n) for n in nodelist]
637 needed = dict.fromkeys(revs, 1)
643 needed = dict.fromkeys(revs, 1)
638
644
639 # if we don't have any revisions touched by these changesets, bail
645 # if we don't have any revisions touched by these changesets, bail
640 if not revs:
646 if not revs:
641 yield struct.pack(">l", 0)
647 yield struct.pack(">l", 0)
642 return
648 return
643
649
644 # add the parent of the first rev
650 # add the parent of the first rev
645 p = self.parents(self.node(revs[0]))[0]
651 p = self.parents(self.node(revs[0]))[0]
646 revs.insert(0, self.rev(p))
652 revs.insert(0, self.rev(p))
647
653
648 # for each delta that isn't contiguous in the log, we need to
654 # for each delta that isn't contiguous in the log, we need to
649 # reconstruct the base, reconstruct the result, and then
655 # reconstruct the base, reconstruct the result, and then
650 # calculate the delta. We also need to do this where we've
656 # calculate the delta. We also need to do this where we've
651 # stored a full version and not a delta
657 # stored a full version and not a delta
652 for i in xrange(0, len(revs) - 1):
658 for i in xrange(0, len(revs) - 1):
653 a, b = revs[i], revs[i + 1]
659 a, b = revs[i], revs[i + 1]
654 if a + 1 != b or self.base(b) == b:
660 if a + 1 != b or self.base(b) == b:
655 for j in xrange(self.base(a), a + 1):
661 for j in xrange(self.base(a), a + 1):
656 needed[j] = 1
662 needed[j] = 1
657 for j in xrange(self.base(b), b + 1):
663 for j in xrange(self.base(b), b + 1):
658 needed[j] = 1
664 needed[j] = 1
659
665
660 # calculate spans to retrieve from datafile
666 # calculate spans to retrieve from datafile
661 needed = needed.keys()
667 needed = needed.keys()
662 needed.sort()
668 needed.sort()
663 spans = []
669 spans = []
664 oo = -1
670 oo = -1
665 ol = 0
671 ol = 0
666 for n in needed:
672 for n in needed:
667 if n < 0: continue
673 if n < 0: continue
668 o = self.start(n)
674 o = self.start(n)
669 l = self.length(n)
675 l = self.length(n)
670 if oo + ol == o: # can we merge with the previous?
676 if oo + ol == o: # can we merge with the previous?
671 nl = spans[-1][2]
677 nl = spans[-1][2]
672 nl.append((n, l))
678 nl.append((n, l))
673 ol += l
679 ol += l
674 spans[-1] = (oo, ol, nl)
680 spans[-1] = (oo, ol, nl)
675 else:
681 else:
676 oo = o
682 oo = o
677 ol = l
683 ol = l
678 spans.append((oo, ol, [(n, l)]))
684 spans.append((oo, ol, [(n, l)]))
679
685
680 # read spans in, divide up chunks
686 # read spans in, divide up chunks
681 chunks = {}
687 chunks = {}
682 for span in spans:
688 for span in spans:
683 # we reopen the file for each span to make http happy for now
689 # we reopen the file for each span to make http happy for now
684 f = self.opener(self.datafile)
690 f = self.opener(self.datafile)
685 f.seek(span[0])
691 f.seek(span[0])
686 data = f.read(span[1])
692 data = f.read(span[1])
687
693
688 # divide up the span
694 # divide up the span
689 pos = 0
695 pos = 0
690 for r, l in span[2]:
696 for r, l in span[2]:
691 chunks[r] = decompress(data[pos: pos + l])
697 chunks[r] = decompress(data[pos: pos + l])
692 pos += l
698 pos += l
693
699
694 # helper to reconstruct intermediate versions
700 # helper to reconstruct intermediate versions
695 def construct(text, base, rev):
701 def construct(text, base, rev):
696 bins = [chunks[r] for r in xrange(base + 1, rev + 1)]
702 bins = [chunks[r] for r in xrange(base + 1, rev + 1)]
697 return mdiff.patches(text, bins)
703 return mdiff.patches(text, bins)
698
704
699 # build deltas
705 # build deltas
700 deltas = []
706 deltas = []
701 for d in xrange(0, len(revs) - 1):
707 for d in xrange(0, len(revs) - 1):
702 a, b = revs[d], revs[d + 1]
708 a, b = revs[d], revs[d + 1]
703 n = self.node(b)
709 n = self.node(b)
704
710
705 if infocollect is not None:
711 if infocollect is not None:
706 infocollect(n)
712 infocollect(n)
707
713
708 # do we need to construct a new delta?
714 # do we need to construct a new delta?
709 if a + 1 != b or self.base(b) == b:
715 if a + 1 != b or self.base(b) == b:
710 if a >= 0:
716 if a >= 0:
711 base = self.base(a)
717 base = self.base(a)
712 ta = chunks[self.base(a)]
718 ta = chunks[self.base(a)]
713 ta = construct(ta, base, a)
719 ta = construct(ta, base, a)
714 else:
720 else:
715 ta = ""
721 ta = ""
716
722
717 base = self.base(b)
723 base = self.base(b)
718 if a > base:
724 if a > base:
719 base = a
725 base = a
720 tb = ta
726 tb = ta
721 else:
727 else:
722 tb = chunks[self.base(b)]
728 tb = chunks[self.base(b)]
723 tb = construct(tb, base, b)
729 tb = construct(tb, base, b)
724 d = self.diff(ta, tb)
730 d = self.diff(ta, tb)
725 else:
731 else:
726 d = chunks[b]
732 d = chunks[b]
727
733
728 p = self.parents(n)
734 p = self.parents(n)
729 meta = n + p[0] + p[1] + lookup(n)
735 meta = n + p[0] + p[1] + lookup(n)
730 l = struct.pack(">l", len(meta) + len(d) + 4)
736 l = struct.pack(">l", len(meta) + len(d) + 4)
731 yield l
737 yield l
732 yield meta
738 yield meta
733 yield d
739 yield d
734
740
735 yield struct.pack(">l", 0)
741 yield struct.pack(">l", 0)
736
742
737 def addgroup(self, revs, linkmapper, transaction, unique=0):
743 def addgroup(self, revs, linkmapper, transaction, unique=0):
738 """
744 """
739 add a delta group
745 add a delta group
740
746
741 given a set of deltas, add them to the revision log. the
747 given a set of deltas, add them to the revision log. the
742 first delta is against its parent, which should be in our
748 first delta is against its parent, which should be in our
743 log, the rest are against the previous delta.
749 log, the rest are against the previous delta.
744 """
750 """
745
751
746 #track the base of the current delta log
752 #track the base of the current delta log
747 r = self.count()
753 r = self.count()
748 t = r - 1
754 t = r - 1
749 node = nullid
755 node = nullid
750
756
751 base = prev = -1
757 base = prev = -1
752 start = end = measure = 0
758 start = end = measure = 0
753 if r:
759 if r:
754 start = self.start(self.base(t))
760 start = self.start(self.base(t))
755 end = self.end(t)
761 end = self.end(t)
756 measure = self.length(self.base(t))
762 measure = self.length(self.base(t))
757 base = self.base(t)
763 base = self.base(t)
758 prev = self.tip()
764 prev = self.tip()
759
765
760 transaction.add(self.datafile, end)
766 transaction.add(self.datafile, end)
761 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
767 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
762 dfh = self.opener(self.datafile, "a")
768 dfh = self.opener(self.datafile, "a")
763 ifh = self.opener(self.indexfile, "a")
769 ifh = self.opener(self.indexfile, "a")
764
770
765 # loop through our set of deltas
771 # loop through our set of deltas
766 chain = None
772 chain = None
767 for chunk in revs:
773 for chunk in revs:
768 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
774 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
769 link = linkmapper(cs)
775 link = linkmapper(cs)
770 if node in self.nodemap:
776 if node in self.nodemap:
771 # this can happen if two branches make the same change
777 # this can happen if two branches make the same change
772 # if unique:
778 # if unique:
773 # raise RevlogError("already have %s" % hex(node[:4]))
779 # raise RevlogError("already have %s" % hex(node[:4]))
774 chain = node
780 chain = node
775 continue
781 continue
776 delta = chunk[80:]
782 delta = chunk[80:]
777
783
778 if not chain:
784 if not chain:
779 # retrieve the parent revision of the delta chain
785 # retrieve the parent revision of the delta chain
780 chain = p1
786 chain = p1
781 if not chain in self.nodemap:
787 if not chain in self.nodemap:
782 raise RevlogError("unknown base %s" % short(chain[:4]))
788 raise RevlogError("unknown base %s" % short(chain[:4]))
783
789
784 # full versions are inserted when the needed deltas become
790 # full versions are inserted when the needed deltas become
785 # comparable to the uncompressed text or when the previous
791 # comparable to the uncompressed text or when the previous
786 # version is not the one we have a delta against. We use
792 # version is not the one we have a delta against. We use
787 # the size of the previous full rev as a proxy for the
793 # the size of the previous full rev as a proxy for the
788 # current size.
794 # current size.
789
795
790 if chain == prev:
796 if chain == prev:
791 cdelta = compress(delta)
797 cdelta = compress(delta)
792
798
793 if chain != prev or (end - start + len(cdelta)) > measure * 2:
799 if chain != prev or (end - start + len(cdelta)) > measure * 2:
794 # flush our writes here so we can read it in revision
800 # flush our writes here so we can read it in revision
795 dfh.flush()
801 dfh.flush()
796 ifh.flush()
802 ifh.flush()
797 text = self.revision(chain)
803 text = self.revision(chain)
798 text = self.patches(text, [delta])
804 text = self.patches(text, [delta])
799 chk = self.addrevision(text, transaction, link, p1, p2)
805 chk = self.addrevision(text, transaction, link, p1, p2)
800 if chk != node:
806 if chk != node:
801 raise RevlogError("consistency error adding group")
807 raise RevlogError("consistency error adding group")
802 measure = len(text)
808 measure = len(text)
803 else:
809 else:
804 e = (end, len(cdelta), self.base(t), link, p1, p2, node)
810 e = (end, len(cdelta), self.base(t), link, p1, p2, node)
805 self.index.append(e)
811 self.index.append(e)
806 self.nodemap[node] = r
812 self.nodemap[node] = r
807 dfh.write(cdelta)
813 dfh.write(cdelta)
808 ifh.write(struct.pack(indexformat, *e))
814 ifh.write(struct.pack(indexformat, *e))
809
815
810 t, r, chain, prev = r, r + 1, node, node
816 t, r, chain, prev = r, r + 1, node, node
811 start = self.start(self.base(t))
817 start = self.start(self.base(t))
812 end = self.end(t)
818 end = self.end(t)
813
819
814 dfh.close()
820 dfh.close()
815 ifh.close()
821 ifh.close()
816 return node
822 return node
General Comments 0
You need to be logged in to leave comments. Login now