##// END OF EJS Templates
Calling revlog.addgroup with an empty changegroup now raises RevlogError....
Thomas Arendsen Hein -
r2002:4aab9065 default
parent child Browse files
Show More
@@ -1,881 +1,883 b''
1 """
1 """
2 revlog.py - storage back-end for mercurial
2 revlog.py - storage back-end for mercurial
3
3
4 This provides efficient delta storage with O(1) retrieve and append
4 This provides efficient delta storage with O(1) retrieve and append
5 and O(changes) merge between branches
5 and O(changes) merge between branches
6
6
7 Copyright 2005 Matt Mackall <mpm@selenic.com>
7 Copyright 2005 Matt Mackall <mpm@selenic.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License, incorporated herein by reference.
10 of the GNU General Public License, incorporated herein by reference.
11 """
11 """
12
12
13 from node import *
13 from node import *
14 from i18n import gettext as _
14 from i18n import gettext as _
15 from demandload import demandload
15 from demandload import demandload
16 demandload(globals(), "binascii changegroup errno heapq mdiff os")
16 demandload(globals(), "binascii changegroup errno heapq mdiff os")
17 demandload(globals(), "sha struct zlib")
17 demandload(globals(), "sha struct zlib")
18
18
19 def hash(text, p1, p2):
19 def hash(text, p1, p2):
20 """generate a hash from the given text and its parent hashes
20 """generate a hash from the given text and its parent hashes
21
21
22 This hash combines both the current file contents and its history
22 This hash combines both the current file contents and its history
23 in a manner that makes it easy to distinguish nodes with the same
23 in a manner that makes it easy to distinguish nodes with the same
24 content in the revision graph.
24 content in the revision graph.
25 """
25 """
26 l = [p1, p2]
26 l = [p1, p2]
27 l.sort()
27 l.sort()
28 s = sha.new(l[0])
28 s = sha.new(l[0])
29 s.update(l[1])
29 s.update(l[1])
30 s.update(text)
30 s.update(text)
31 return s.digest()
31 return s.digest()
32
32
33 def compress(text):
33 def compress(text):
34 """ generate a possibly-compressed representation of text """
34 """ generate a possibly-compressed representation of text """
35 if not text: return ("", text)
35 if not text: return ("", text)
36 if len(text) < 44:
36 if len(text) < 44:
37 if text[0] == '\0': return ("", text)
37 if text[0] == '\0': return ("", text)
38 return ('u', text)
38 return ('u', text)
39 bin = zlib.compress(text)
39 bin = zlib.compress(text)
40 if len(bin) > len(text):
40 if len(bin) > len(text):
41 if text[0] == '\0': return ("", text)
41 if text[0] == '\0': return ("", text)
42 return ('u', text)
42 return ('u', text)
43 return ("", bin)
43 return ("", bin)
44
44
45 def decompress(bin):
45 def decompress(bin):
46 """ decompress the given input """
46 """ decompress the given input """
47 if not bin: return bin
47 if not bin: return bin
48 t = bin[0]
48 t = bin[0]
49 if t == '\0': return bin
49 if t == '\0': return bin
50 if t == 'x': return zlib.decompress(bin)
50 if t == 'x': return zlib.decompress(bin)
51 if t == 'u': return bin[1:]
51 if t == 'u': return bin[1:]
52 raise RevlogError(_("unknown compression type %r") % t)
52 raise RevlogError(_("unknown compression type %r") % t)
53
53
54 indexformat = ">4l20s20s20s"
54 indexformat = ">4l20s20s20s"
55
55
56 class lazyparser(object):
56 class lazyparser(object):
57 """
57 """
58 this class avoids the need to parse the entirety of large indices
58 this class avoids the need to parse the entirety of large indices
59
59
60 By default we parse and load 1000 entries at a time.
60 By default we parse and load 1000 entries at a time.
61
61
62 If no position is specified, we load the whole index, and replace
62 If no position is specified, we load the whole index, and replace
63 the lazy objects in revlog with the underlying objects for
63 the lazy objects in revlog with the underlying objects for
64 efficiency in cases where we look at most of the nodes.
64 efficiency in cases where we look at most of the nodes.
65 """
65 """
66 def __init__(self, data, revlog):
66 def __init__(self, data, revlog):
67 self.data = data
67 self.data = data
68 self.s = struct.calcsize(indexformat)
68 self.s = struct.calcsize(indexformat)
69 self.l = len(data)/self.s
69 self.l = len(data)/self.s
70 self.index = [None] * self.l
70 self.index = [None] * self.l
71 self.map = {nullid: -1}
71 self.map = {nullid: -1}
72 self.all = 0
72 self.all = 0
73 self.revlog = revlog
73 self.revlog = revlog
74
74
75 def trunc(self, pos):
75 def trunc(self, pos):
76 self.l = pos/self.s
76 self.l = pos/self.s
77
77
78 def load(self, pos=None):
78 def load(self, pos=None):
79 if self.all: return
79 if self.all: return
80 if pos is not None:
80 if pos is not None:
81 block = pos / 1000
81 block = pos / 1000
82 i = block * 1000
82 i = block * 1000
83 end = min(self.l, i + 1000)
83 end = min(self.l, i + 1000)
84 else:
84 else:
85 self.all = 1
85 self.all = 1
86 i = 0
86 i = 0
87 end = self.l
87 end = self.l
88 self.revlog.index = self.index
88 self.revlog.index = self.index
89 self.revlog.nodemap = self.map
89 self.revlog.nodemap = self.map
90
90
91 while i < end:
91 while i < end:
92 d = self.data[i * self.s: (i + 1) * self.s]
92 d = self.data[i * self.s: (i + 1) * self.s]
93 e = struct.unpack(indexformat, d)
93 e = struct.unpack(indexformat, d)
94 self.index[i] = e
94 self.index[i] = e
95 self.map[e[6]] = i
95 self.map[e[6]] = i
96 i += 1
96 i += 1
97
97
98 class lazyindex(object):
98 class lazyindex(object):
99 """a lazy version of the index array"""
99 """a lazy version of the index array"""
100 def __init__(self, parser):
100 def __init__(self, parser):
101 self.p = parser
101 self.p = parser
102 def __len__(self):
102 def __len__(self):
103 return len(self.p.index)
103 return len(self.p.index)
104 def load(self, pos):
104 def load(self, pos):
105 if pos < 0:
105 if pos < 0:
106 pos += len(self.p.index)
106 pos += len(self.p.index)
107 self.p.load(pos)
107 self.p.load(pos)
108 return self.p.index[pos]
108 return self.p.index[pos]
109 def __getitem__(self, pos):
109 def __getitem__(self, pos):
110 return self.p.index[pos] or self.load(pos)
110 return self.p.index[pos] or self.load(pos)
111 def __delitem__(self, pos):
111 def __delitem__(self, pos):
112 del self.p.index[pos]
112 del self.p.index[pos]
113 def append(self, e):
113 def append(self, e):
114 self.p.index.append(e)
114 self.p.index.append(e)
115 def trunc(self, pos):
115 def trunc(self, pos):
116 self.p.trunc(pos)
116 self.p.trunc(pos)
117
117
118 class lazymap(object):
118 class lazymap(object):
119 """a lazy version of the node map"""
119 """a lazy version of the node map"""
120 def __init__(self, parser):
120 def __init__(self, parser):
121 self.p = parser
121 self.p = parser
122 def load(self, key):
122 def load(self, key):
123 if self.p.all: return
123 if self.p.all: return
124 n = self.p.data.find(key)
124 n = self.p.data.find(key)
125 if n < 0:
125 if n < 0:
126 raise KeyError(key)
126 raise KeyError(key)
127 pos = n / self.p.s
127 pos = n / self.p.s
128 self.p.load(pos)
128 self.p.load(pos)
129 def __contains__(self, key):
129 def __contains__(self, key):
130 self.p.load()
130 self.p.load()
131 return key in self.p.map
131 return key in self.p.map
132 def __iter__(self):
132 def __iter__(self):
133 yield nullid
133 yield nullid
134 for i in xrange(self.p.l):
134 for i in xrange(self.p.l):
135 try:
135 try:
136 yield self.p.index[i][6]
136 yield self.p.index[i][6]
137 except:
137 except:
138 self.p.load(i)
138 self.p.load(i)
139 yield self.p.index[i][6]
139 yield self.p.index[i][6]
140 def __getitem__(self, key):
140 def __getitem__(self, key):
141 try:
141 try:
142 return self.p.map[key]
142 return self.p.map[key]
143 except KeyError:
143 except KeyError:
144 try:
144 try:
145 self.load(key)
145 self.load(key)
146 return self.p.map[key]
146 return self.p.map[key]
147 except KeyError:
147 except KeyError:
148 raise KeyError("node " + hex(key))
148 raise KeyError("node " + hex(key))
149 def __setitem__(self, key, val):
149 def __setitem__(self, key, val):
150 self.p.map[key] = val
150 self.p.map[key] = val
151 def __delitem__(self, key):
151 def __delitem__(self, key):
152 del self.p.map[key]
152 del self.p.map[key]
153
153
154 class RevlogError(Exception): pass
154 class RevlogError(Exception): pass
155
155
156 class revlog(object):
156 class revlog(object):
157 """
157 """
158 the underlying revision storage object
158 the underlying revision storage object
159
159
160 A revlog consists of two parts, an index and the revision data.
160 A revlog consists of two parts, an index and the revision data.
161
161
162 The index is a file with a fixed record size containing
162 The index is a file with a fixed record size containing
163 information on each revision, includings its nodeid (hash), the
163 information on each revision, includings its nodeid (hash), the
164 nodeids of its parents, the position and offset of its data within
164 nodeids of its parents, the position and offset of its data within
165 the data file, and the revision it's based on. Finally, each entry
165 the data file, and the revision it's based on. Finally, each entry
166 contains a linkrev entry that can serve as a pointer to external
166 contains a linkrev entry that can serve as a pointer to external
167 data.
167 data.
168
168
169 The revision data itself is a linear collection of data chunks.
169 The revision data itself is a linear collection of data chunks.
170 Each chunk represents a revision and is usually represented as a
170 Each chunk represents a revision and is usually represented as a
171 delta against the previous chunk. To bound lookup time, runs of
171 delta against the previous chunk. To bound lookup time, runs of
172 deltas are limited to about 2 times the length of the original
172 deltas are limited to about 2 times the length of the original
173 version data. This makes retrieval of a version proportional to
173 version data. This makes retrieval of a version proportional to
174 its size, or O(1) relative to the number of revisions.
174 its size, or O(1) relative to the number of revisions.
175
175
176 Both pieces of the revlog are written to in an append-only
176 Both pieces of the revlog are written to in an append-only
177 fashion, which means we never need to rewrite a file to insert or
177 fashion, which means we never need to rewrite a file to insert or
178 remove data, and can use some simple techniques to avoid the need
178 remove data, and can use some simple techniques to avoid the need
179 for locking while reading.
179 for locking while reading.
180 """
180 """
181 def __init__(self, opener, indexfile, datafile):
181 def __init__(self, opener, indexfile, datafile):
182 """
182 """
183 create a revlog object
183 create a revlog object
184
184
185 opener is a function that abstracts the file opening operation
185 opener is a function that abstracts the file opening operation
186 and can be used to implement COW semantics or the like.
186 and can be used to implement COW semantics or the like.
187 """
187 """
188 self.indexfile = indexfile
188 self.indexfile = indexfile
189 self.datafile = datafile
189 self.datafile = datafile
190 self.opener = opener
190 self.opener = opener
191
191
192 self.indexstat = None
192 self.indexstat = None
193 self.cache = None
193 self.cache = None
194 self.chunkcache = None
194 self.chunkcache = None
195 self.load()
195 self.load()
196
196
197 def load(self):
197 def load(self):
198 try:
198 try:
199 f = self.opener(self.indexfile)
199 f = self.opener(self.indexfile)
200 except IOError, inst:
200 except IOError, inst:
201 if inst.errno != errno.ENOENT:
201 if inst.errno != errno.ENOENT:
202 raise
202 raise
203 i = ""
203 i = ""
204 else:
204 else:
205 try:
205 try:
206 st = os.fstat(f.fileno())
206 st = os.fstat(f.fileno())
207 except AttributeError, inst:
207 except AttributeError, inst:
208 st = None
208 st = None
209 else:
209 else:
210 oldst = self.indexstat
210 oldst = self.indexstat
211 if (oldst and st.st_dev == oldst.st_dev
211 if (oldst and st.st_dev == oldst.st_dev
212 and st.st_ino == oldst.st_ino
212 and st.st_ino == oldst.st_ino
213 and st.st_mtime == oldst.st_mtime
213 and st.st_mtime == oldst.st_mtime
214 and st.st_ctime == oldst.st_ctime):
214 and st.st_ctime == oldst.st_ctime):
215 return
215 return
216 self.indexstat = st
216 self.indexstat = st
217 i = f.read()
217 i = f.read()
218
218
219 if i and i[:4] != "\0\0\0\0":
219 if i and i[:4] != "\0\0\0\0":
220 raise RevlogError(_("incompatible revlog signature on %s") %
220 raise RevlogError(_("incompatible revlog signature on %s") %
221 self.indexfile)
221 self.indexfile)
222
222
223 if len(i) > 10000:
223 if len(i) > 10000:
224 # big index, let's parse it on demand
224 # big index, let's parse it on demand
225 parser = lazyparser(i, self)
225 parser = lazyparser(i, self)
226 self.index = lazyindex(parser)
226 self.index = lazyindex(parser)
227 self.nodemap = lazymap(parser)
227 self.nodemap = lazymap(parser)
228 else:
228 else:
229 s = struct.calcsize(indexformat)
229 s = struct.calcsize(indexformat)
230 l = len(i) / s
230 l = len(i) / s
231 self.index = [None] * l
231 self.index = [None] * l
232 m = [None] * l
232 m = [None] * l
233
233
234 n = 0
234 n = 0
235 for f in xrange(0, l * s, s):
235 for f in xrange(0, l * s, s):
236 # offset, size, base, linkrev, p1, p2, nodeid
236 # offset, size, base, linkrev, p1, p2, nodeid
237 e = struct.unpack(indexformat, i[f:f + s])
237 e = struct.unpack(indexformat, i[f:f + s])
238 m[n] = (e[6], n)
238 m[n] = (e[6], n)
239 self.index[n] = e
239 self.index[n] = e
240 n += 1
240 n += 1
241
241
242 self.nodemap = dict(m)
242 self.nodemap = dict(m)
243 self.nodemap[nullid] = -1
243 self.nodemap[nullid] = -1
244
244
245 def tip(self): return self.node(len(self.index) - 1)
245 def tip(self): return self.node(len(self.index) - 1)
246 def count(self): return len(self.index)
246 def count(self): return len(self.index)
247 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
247 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
248 def rev(self, node):
248 def rev(self, node):
249 try:
249 try:
250 return self.nodemap[node]
250 return self.nodemap[node]
251 except KeyError:
251 except KeyError:
252 raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node)))
252 raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node)))
253 def linkrev(self, node): return self.index[self.rev(node)][3]
253 def linkrev(self, node): return self.index[self.rev(node)][3]
254 def parents(self, node):
254 def parents(self, node):
255 if node == nullid: return (nullid, nullid)
255 if node == nullid: return (nullid, nullid)
256 return self.index[self.rev(node)][4:6]
256 return self.index[self.rev(node)][4:6]
257
257
258 def start(self, rev): return (rev < 0) and -1 or self.index[rev][0]
258 def start(self, rev): return (rev < 0) and -1 or self.index[rev][0]
259 def length(self, rev):
259 def length(self, rev):
260 if rev < 0:
260 if rev < 0:
261 return 0
261 return 0
262 else:
262 else:
263 return self.index[rev][1]
263 return self.index[rev][1]
264 def end(self, rev): return self.start(rev) + self.length(rev)
264 def end(self, rev): return self.start(rev) + self.length(rev)
265 def base(self, rev): return (rev < 0) and rev or self.index[rev][2]
265 def base(self, rev): return (rev < 0) and rev or self.index[rev][2]
266
266
267 def reachable(self, rev, stop=None):
267 def reachable(self, rev, stop=None):
268 reachable = {}
268 reachable = {}
269 visit = [rev]
269 visit = [rev]
270 reachable[rev] = 1
270 reachable[rev] = 1
271 if stop:
271 if stop:
272 stopn = self.rev(stop)
272 stopn = self.rev(stop)
273 else:
273 else:
274 stopn = 0
274 stopn = 0
275 while visit:
275 while visit:
276 n = visit.pop(0)
276 n = visit.pop(0)
277 if n == stop:
277 if n == stop:
278 continue
278 continue
279 if n == nullid:
279 if n == nullid:
280 continue
280 continue
281 for p in self.parents(n):
281 for p in self.parents(n):
282 if self.rev(p) < stopn:
282 if self.rev(p) < stopn:
283 continue
283 continue
284 if p not in reachable:
284 if p not in reachable:
285 reachable[p] = 1
285 reachable[p] = 1
286 visit.append(p)
286 visit.append(p)
287 return reachable
287 return reachable
288
288
289 def nodesbetween(self, roots=None, heads=None):
289 def nodesbetween(self, roots=None, heads=None):
290 """Return a tuple containing three elements. Elements 1 and 2 contain
290 """Return a tuple containing three elements. Elements 1 and 2 contain
291 a final list bases and heads after all the unreachable ones have been
291 a final list bases and heads after all the unreachable ones have been
292 pruned. Element 0 contains a topologically sorted list of all
292 pruned. Element 0 contains a topologically sorted list of all
293
293
294 nodes that satisfy these constraints:
294 nodes that satisfy these constraints:
295 1. All nodes must be descended from a node in roots (the nodes on
295 1. All nodes must be descended from a node in roots (the nodes on
296 roots are considered descended from themselves).
296 roots are considered descended from themselves).
297 2. All nodes must also be ancestors of a node in heads (the nodes in
297 2. All nodes must also be ancestors of a node in heads (the nodes in
298 heads are considered to be their own ancestors).
298 heads are considered to be their own ancestors).
299
299
300 If roots is unspecified, nullid is assumed as the only root.
300 If roots is unspecified, nullid is assumed as the only root.
301 If heads is unspecified, it is taken to be the output of the
301 If heads is unspecified, it is taken to be the output of the
302 heads method (i.e. a list of all nodes in the repository that
302 heads method (i.e. a list of all nodes in the repository that
303 have no children)."""
303 have no children)."""
304 nonodes = ([], [], [])
304 nonodes = ([], [], [])
305 if roots is not None:
305 if roots is not None:
306 roots = list(roots)
306 roots = list(roots)
307 if not roots:
307 if not roots:
308 return nonodes
308 return nonodes
309 lowestrev = min([self.rev(n) for n in roots])
309 lowestrev = min([self.rev(n) for n in roots])
310 else:
310 else:
311 roots = [nullid] # Everybody's a descendent of nullid
311 roots = [nullid] # Everybody's a descendent of nullid
312 lowestrev = -1
312 lowestrev = -1
313 if (lowestrev == -1) and (heads is None):
313 if (lowestrev == -1) and (heads is None):
314 # We want _all_ the nodes!
314 # We want _all_ the nodes!
315 return ([self.node(r) for r in xrange(0, self.count())],
315 return ([self.node(r) for r in xrange(0, self.count())],
316 [nullid], list(self.heads()))
316 [nullid], list(self.heads()))
317 if heads is None:
317 if heads is None:
318 # All nodes are ancestors, so the latest ancestor is the last
318 # All nodes are ancestors, so the latest ancestor is the last
319 # node.
319 # node.
320 highestrev = self.count() - 1
320 highestrev = self.count() - 1
321 # Set ancestors to None to signal that every node is an ancestor.
321 # Set ancestors to None to signal that every node is an ancestor.
322 ancestors = None
322 ancestors = None
323 # Set heads to an empty dictionary for later discovery of heads
323 # Set heads to an empty dictionary for later discovery of heads
324 heads = {}
324 heads = {}
325 else:
325 else:
326 heads = list(heads)
326 heads = list(heads)
327 if not heads:
327 if not heads:
328 return nonodes
328 return nonodes
329 ancestors = {}
329 ancestors = {}
330 # Start at the top and keep marking parents until we're done.
330 # Start at the top and keep marking parents until we're done.
331 nodestotag = heads[:]
331 nodestotag = heads[:]
332 # Turn heads into a dictionary so we can remove 'fake' heads.
332 # Turn heads into a dictionary so we can remove 'fake' heads.
333 # Also, later we will be using it to filter out the heads we can't
333 # Also, later we will be using it to filter out the heads we can't
334 # find from roots.
334 # find from roots.
335 heads = dict.fromkeys(heads, 0)
335 heads = dict.fromkeys(heads, 0)
336 # Remember where the top was so we can use it as a limit later.
336 # Remember where the top was so we can use it as a limit later.
337 highestrev = max([self.rev(n) for n in nodestotag])
337 highestrev = max([self.rev(n) for n in nodestotag])
338 while nodestotag:
338 while nodestotag:
339 # grab a node to tag
339 # grab a node to tag
340 n = nodestotag.pop()
340 n = nodestotag.pop()
341 # Never tag nullid
341 # Never tag nullid
342 if n == nullid:
342 if n == nullid:
343 continue
343 continue
344 # A node's revision number represents its place in a
344 # A node's revision number represents its place in a
345 # topologically sorted list of nodes.
345 # topologically sorted list of nodes.
346 r = self.rev(n)
346 r = self.rev(n)
347 if r >= lowestrev:
347 if r >= lowestrev:
348 if n not in ancestors:
348 if n not in ancestors:
349 # If we are possibly a descendent of one of the roots
349 # If we are possibly a descendent of one of the roots
350 # and we haven't already been marked as an ancestor
350 # and we haven't already been marked as an ancestor
351 ancestors[n] = 1 # Mark as ancestor
351 ancestors[n] = 1 # Mark as ancestor
352 # Add non-nullid parents to list of nodes to tag.
352 # Add non-nullid parents to list of nodes to tag.
353 nodestotag.extend([p for p in self.parents(n) if
353 nodestotag.extend([p for p in self.parents(n) if
354 p != nullid])
354 p != nullid])
355 elif n in heads: # We've seen it before, is it a fake head?
355 elif n in heads: # We've seen it before, is it a fake head?
356 # So it is, real heads should not be the ancestors of
356 # So it is, real heads should not be the ancestors of
357 # any other heads.
357 # any other heads.
358 heads.pop(n)
358 heads.pop(n)
359 if not ancestors:
359 if not ancestors:
360 return nonodes
360 return nonodes
361 # Now that we have our set of ancestors, we want to remove any
361 # Now that we have our set of ancestors, we want to remove any
362 # roots that are not ancestors.
362 # roots that are not ancestors.
363
363
364 # If one of the roots was nullid, everything is included anyway.
364 # If one of the roots was nullid, everything is included anyway.
365 if lowestrev > -1:
365 if lowestrev > -1:
366 # But, since we weren't, let's recompute the lowest rev to not
366 # But, since we weren't, let's recompute the lowest rev to not
367 # include roots that aren't ancestors.
367 # include roots that aren't ancestors.
368
368
369 # Filter out roots that aren't ancestors of heads
369 # Filter out roots that aren't ancestors of heads
370 roots = [n for n in roots if n in ancestors]
370 roots = [n for n in roots if n in ancestors]
371 # Recompute the lowest revision
371 # Recompute the lowest revision
372 if roots:
372 if roots:
373 lowestrev = min([self.rev(n) for n in roots])
373 lowestrev = min([self.rev(n) for n in roots])
374 else:
374 else:
375 # No more roots? Return empty list
375 # No more roots? Return empty list
376 return nonodes
376 return nonodes
377 else:
377 else:
378 # We are descending from nullid, and don't need to care about
378 # We are descending from nullid, and don't need to care about
379 # any other roots.
379 # any other roots.
380 lowestrev = -1
380 lowestrev = -1
381 roots = [nullid]
381 roots = [nullid]
382 # Transform our roots list into a 'set' (i.e. a dictionary where the
382 # Transform our roots list into a 'set' (i.e. a dictionary where the
383 # values don't matter.
383 # values don't matter.
384 descendents = dict.fromkeys(roots, 1)
384 descendents = dict.fromkeys(roots, 1)
385 # Also, keep the original roots so we can filter out roots that aren't
385 # Also, keep the original roots so we can filter out roots that aren't
386 # 'real' roots (i.e. are descended from other roots).
386 # 'real' roots (i.e. are descended from other roots).
387 roots = descendents.copy()
387 roots = descendents.copy()
388 # Our topologically sorted list of output nodes.
388 # Our topologically sorted list of output nodes.
389 orderedout = []
389 orderedout = []
390 # Don't start at nullid since we don't want nullid in our output list,
390 # Don't start at nullid since we don't want nullid in our output list,
391 # and if nullid shows up in descedents, empty parents will look like
391 # and if nullid shows up in descedents, empty parents will look like
392 # they're descendents.
392 # they're descendents.
393 for r in xrange(max(lowestrev, 0), highestrev + 1):
393 for r in xrange(max(lowestrev, 0), highestrev + 1):
394 n = self.node(r)
394 n = self.node(r)
395 isdescendent = False
395 isdescendent = False
396 if lowestrev == -1: # Everybody is a descendent of nullid
396 if lowestrev == -1: # Everybody is a descendent of nullid
397 isdescendent = True
397 isdescendent = True
398 elif n in descendents:
398 elif n in descendents:
399 # n is already a descendent
399 # n is already a descendent
400 isdescendent = True
400 isdescendent = True
401 # This check only needs to be done here because all the roots
401 # This check only needs to be done here because all the roots
402 # will start being marked is descendents before the loop.
402 # will start being marked is descendents before the loop.
403 if n in roots:
403 if n in roots:
404 # If n was a root, check if it's a 'real' root.
404 # If n was a root, check if it's a 'real' root.
405 p = tuple(self.parents(n))
405 p = tuple(self.parents(n))
406 # If any of its parents are descendents, it's not a root.
406 # If any of its parents are descendents, it's not a root.
407 if (p[0] in descendents) or (p[1] in descendents):
407 if (p[0] in descendents) or (p[1] in descendents):
408 roots.pop(n)
408 roots.pop(n)
409 else:
409 else:
410 p = tuple(self.parents(n))
410 p = tuple(self.parents(n))
411 # A node is a descendent if either of its parents are
411 # A node is a descendent if either of its parents are
412 # descendents. (We seeded the dependents list with the roots
412 # descendents. (We seeded the dependents list with the roots
413 # up there, remember?)
413 # up there, remember?)
414 if (p[0] in descendents) or (p[1] in descendents):
414 if (p[0] in descendents) or (p[1] in descendents):
415 descendents[n] = 1
415 descendents[n] = 1
416 isdescendent = True
416 isdescendent = True
417 if isdescendent and ((ancestors is None) or (n in ancestors)):
417 if isdescendent and ((ancestors is None) or (n in ancestors)):
418 # Only include nodes that are both descendents and ancestors.
418 # Only include nodes that are both descendents and ancestors.
419 orderedout.append(n)
419 orderedout.append(n)
420 if (ancestors is not None) and (n in heads):
420 if (ancestors is not None) and (n in heads):
421 # We're trying to figure out which heads are reachable
421 # We're trying to figure out which heads are reachable
422 # from roots.
422 # from roots.
423 # Mark this head as having been reached
423 # Mark this head as having been reached
424 heads[n] = 1
424 heads[n] = 1
425 elif ancestors is None:
425 elif ancestors is None:
426 # Otherwise, we're trying to discover the heads.
426 # Otherwise, we're trying to discover the heads.
427 # Assume this is a head because if it isn't, the next step
427 # Assume this is a head because if it isn't, the next step
428 # will eventually remove it.
428 # will eventually remove it.
429 heads[n] = 1
429 heads[n] = 1
430 # But, obviously its parents aren't.
430 # But, obviously its parents aren't.
431 for p in self.parents(n):
431 for p in self.parents(n):
432 heads.pop(p, None)
432 heads.pop(p, None)
433 heads = [n for n in heads.iterkeys() if heads[n] != 0]
433 heads = [n for n in heads.iterkeys() if heads[n] != 0]
434 roots = roots.keys()
434 roots = roots.keys()
435 assert orderedout
435 assert orderedout
436 assert roots
436 assert roots
437 assert heads
437 assert heads
438 return (orderedout, roots, heads)
438 return (orderedout, roots, heads)
439
439
440 def heads(self, start=None):
440 def heads(self, start=None):
441 """return the list of all nodes that have no children
441 """return the list of all nodes that have no children
442
442
443 if start is specified, only heads that are descendants of
443 if start is specified, only heads that are descendants of
444 start will be returned
444 start will be returned
445
445
446 """
446 """
447 if start is None:
447 if start is None:
448 start = nullid
448 start = nullid
449 reachable = {start: 1}
449 reachable = {start: 1}
450 heads = {start: 1}
450 heads = {start: 1}
451 startrev = self.rev(start)
451 startrev = self.rev(start)
452
452
453 for r in xrange(startrev + 1, self.count()):
453 for r in xrange(startrev + 1, self.count()):
454 n = self.node(r)
454 n = self.node(r)
455 for pn in self.parents(n):
455 for pn in self.parents(n):
456 if pn in reachable:
456 if pn in reachable:
457 reachable[n] = 1
457 reachable[n] = 1
458 heads[n] = 1
458 heads[n] = 1
459 if pn in heads:
459 if pn in heads:
460 del heads[pn]
460 del heads[pn]
461 return heads.keys()
461 return heads.keys()
462
462
463 def children(self, node):
463 def children(self, node):
464 """find the children of a given node"""
464 """find the children of a given node"""
465 c = []
465 c = []
466 p = self.rev(node)
466 p = self.rev(node)
467 for r in range(p + 1, self.count()):
467 for r in range(p + 1, self.count()):
468 n = self.node(r)
468 n = self.node(r)
469 for pn in self.parents(n):
469 for pn in self.parents(n):
470 if pn == node:
470 if pn == node:
471 c.append(n)
471 c.append(n)
472 continue
472 continue
473 elif pn == nullid:
473 elif pn == nullid:
474 continue
474 continue
475 return c
475 return c
476
476
477 def lookup(self, id):
477 def lookup(self, id):
478 """locate a node based on revision number or subset of hex nodeid"""
478 """locate a node based on revision number or subset of hex nodeid"""
479 try:
479 try:
480 rev = int(id)
480 rev = int(id)
481 if str(rev) != id: raise ValueError
481 if str(rev) != id: raise ValueError
482 if rev < 0: rev = self.count() + rev
482 if rev < 0: rev = self.count() + rev
483 if rev < 0 or rev >= self.count(): raise ValueError
483 if rev < 0 or rev >= self.count(): raise ValueError
484 return self.node(rev)
484 return self.node(rev)
485 except (ValueError, OverflowError):
485 except (ValueError, OverflowError):
486 c = []
486 c = []
487 for n in self.nodemap:
487 for n in self.nodemap:
488 if hex(n).startswith(id):
488 if hex(n).startswith(id):
489 c.append(n)
489 c.append(n)
490 if len(c) > 1: raise RevlogError(_("Ambiguous identifier"))
490 if len(c) > 1: raise RevlogError(_("Ambiguous identifier"))
491 if len(c) < 1: raise RevlogError(_("No match found"))
491 if len(c) < 1: raise RevlogError(_("No match found"))
492 return c[0]
492 return c[0]
493
493
494 return None
494 return None
495
495
496 def diff(self, a, b):
496 def diff(self, a, b):
497 """return a delta between two revisions"""
497 """return a delta between two revisions"""
498 return mdiff.textdiff(a, b)
498 return mdiff.textdiff(a, b)
499
499
500 def patches(self, t, pl):
500 def patches(self, t, pl):
501 """apply a list of patches to a string"""
501 """apply a list of patches to a string"""
502 return mdiff.patches(t, pl)
502 return mdiff.patches(t, pl)
503
503
504 def chunk(self, rev):
504 def chunk(self, rev):
505 start, length = self.start(rev), self.length(rev)
505 start, length = self.start(rev), self.length(rev)
506 end = start + length
506 end = start + length
507
507
508 def loadcache():
508 def loadcache():
509 cache_length = max(4096 * 1024, length) # 4Mo
509 cache_length = max(4096 * 1024, length) # 4Mo
510 df = self.opener(self.datafile)
510 df = self.opener(self.datafile)
511 df.seek(start)
511 df.seek(start)
512 self.chunkcache = (start, df.read(cache_length))
512 self.chunkcache = (start, df.read(cache_length))
513
513
514 if not self.chunkcache:
514 if not self.chunkcache:
515 loadcache()
515 loadcache()
516
516
517 cache_start = self.chunkcache[0]
517 cache_start = self.chunkcache[0]
518 cache_end = cache_start + len(self.chunkcache[1])
518 cache_end = cache_start + len(self.chunkcache[1])
519 if start >= cache_start and end <= cache_end:
519 if start >= cache_start and end <= cache_end:
520 # it is cached
520 # it is cached
521 offset = start - cache_start
521 offset = start - cache_start
522 else:
522 else:
523 loadcache()
523 loadcache()
524 offset = 0
524 offset = 0
525
525
526 #def checkchunk():
526 #def checkchunk():
527 # df = self.opener(self.datafile)
527 # df = self.opener(self.datafile)
528 # df.seek(start)
528 # df.seek(start)
529 # return df.read(length)
529 # return df.read(length)
530 #assert s == checkchunk()
530 #assert s == checkchunk()
531 return decompress(self.chunkcache[1][offset:offset + length])
531 return decompress(self.chunkcache[1][offset:offset + length])
532
532
533 def delta(self, node):
533 def delta(self, node):
534 """return or calculate a delta between a node and its predecessor"""
534 """return or calculate a delta between a node and its predecessor"""
535 r = self.rev(node)
535 r = self.rev(node)
536 return self.revdiff(r - 1, r)
536 return self.revdiff(r - 1, r)
537
537
538 def revdiff(self, rev1, rev2):
538 def revdiff(self, rev1, rev2):
539 """return or calculate a delta between two revisions"""
539 """return or calculate a delta between two revisions"""
540 b1 = self.base(rev1)
540 b1 = self.base(rev1)
541 b2 = self.base(rev2)
541 b2 = self.base(rev2)
542 if b1 == b2 and rev1 + 1 == rev2:
542 if b1 == b2 and rev1 + 1 == rev2:
543 return self.chunk(rev2)
543 return self.chunk(rev2)
544 else:
544 else:
545 return self.diff(self.revision(self.node(rev1)),
545 return self.diff(self.revision(self.node(rev1)),
546 self.revision(self.node(rev2)))
546 self.revision(self.node(rev2)))
547
547
548 def revision(self, node):
548 def revision(self, node):
549 """return an uncompressed revision of a given"""
549 """return an uncompressed revision of a given"""
550 if node == nullid: return ""
550 if node == nullid: return ""
551 if self.cache and self.cache[0] == node: return self.cache[2]
551 if self.cache and self.cache[0] == node: return self.cache[2]
552
552
553 # look up what we need to read
553 # look up what we need to read
554 text = None
554 text = None
555 rev = self.rev(node)
555 rev = self.rev(node)
556 base = self.base(rev)
556 base = self.base(rev)
557
557
558 # do we have useful data cached?
558 # do we have useful data cached?
559 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
559 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
560 base = self.cache[1]
560 base = self.cache[1]
561 text = self.cache[2]
561 text = self.cache[2]
562 else:
562 else:
563 text = self.chunk(base)
563 text = self.chunk(base)
564
564
565 bins = []
565 bins = []
566 for r in xrange(base + 1, rev + 1):
566 for r in xrange(base + 1, rev + 1):
567 bins.append(self.chunk(r))
567 bins.append(self.chunk(r))
568
568
569 text = self.patches(text, bins)
569 text = self.patches(text, bins)
570
570
571 p1, p2 = self.parents(node)
571 p1, p2 = self.parents(node)
572 if node != hash(text, p1, p2):
572 if node != hash(text, p1, p2):
573 raise RevlogError(_("integrity check failed on %s:%d")
573 raise RevlogError(_("integrity check failed on %s:%d")
574 % (self.datafile, rev))
574 % (self.datafile, rev))
575
575
576 self.cache = (node, rev, text)
576 self.cache = (node, rev, text)
577 return text
577 return text
578
578
579 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
579 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
580 """add a revision to the log
580 """add a revision to the log
581
581
582 text - the revision data to add
582 text - the revision data to add
583 transaction - the transaction object used for rollback
583 transaction - the transaction object used for rollback
584 link - the linkrev data to add
584 link - the linkrev data to add
585 p1, p2 - the parent nodeids of the revision
585 p1, p2 - the parent nodeids of the revision
586 d - an optional precomputed delta
586 d - an optional precomputed delta
587 """
587 """
588 if text is None: text = ""
588 if text is None: text = ""
589 if p1 is None: p1 = self.tip()
589 if p1 is None: p1 = self.tip()
590 if p2 is None: p2 = nullid
590 if p2 is None: p2 = nullid
591
591
592 node = hash(text, p1, p2)
592 node = hash(text, p1, p2)
593
593
594 if node in self.nodemap:
594 if node in self.nodemap:
595 return node
595 return node
596
596
597 n = self.count()
597 n = self.count()
598 t = n - 1
598 t = n - 1
599
599
600 if n:
600 if n:
601 base = self.base(t)
601 base = self.base(t)
602 start = self.start(base)
602 start = self.start(base)
603 end = self.end(t)
603 end = self.end(t)
604 if not d:
604 if not d:
605 prev = self.revision(self.tip())
605 prev = self.revision(self.tip())
606 d = self.diff(prev, str(text))
606 d = self.diff(prev, str(text))
607 data = compress(d)
607 data = compress(d)
608 l = len(data[1]) + len(data[0])
608 l = len(data[1]) + len(data[0])
609 dist = end - start + l
609 dist = end - start + l
610
610
611 # full versions are inserted when the needed deltas
611 # full versions are inserted when the needed deltas
612 # become comparable to the uncompressed text
612 # become comparable to the uncompressed text
613 if not n or dist > len(text) * 2:
613 if not n or dist > len(text) * 2:
614 data = compress(text)
614 data = compress(text)
615 l = len(data[1]) + len(data[0])
615 l = len(data[1]) + len(data[0])
616 base = n
616 base = n
617 else:
617 else:
618 base = self.base(t)
618 base = self.base(t)
619
619
620 offset = 0
620 offset = 0
621 if t >= 0:
621 if t >= 0:
622 offset = self.end(t)
622 offset = self.end(t)
623
623
624 e = (offset, l, base, link, p1, p2, node)
624 e = (offset, l, base, link, p1, p2, node)
625
625
626 self.index.append(e)
626 self.index.append(e)
627 self.nodemap[node] = n
627 self.nodemap[node] = n
628 entry = struct.pack(indexformat, *e)
628 entry = struct.pack(indexformat, *e)
629
629
630 transaction.add(self.datafile, e[0])
630 transaction.add(self.datafile, e[0])
631 f = self.opener(self.datafile, "a")
631 f = self.opener(self.datafile, "a")
632 if data[0]:
632 if data[0]:
633 f.write(data[0])
633 f.write(data[0])
634 f.write(data[1])
634 f.write(data[1])
635 transaction.add(self.indexfile, n * len(entry))
635 transaction.add(self.indexfile, n * len(entry))
636 self.opener(self.indexfile, "a").write(entry)
636 self.opener(self.indexfile, "a").write(entry)
637
637
638 self.cache = (node, n, text)
638 self.cache = (node, n, text)
639 return node
639 return node
640
640
641 def ancestor(self, a, b):
641 def ancestor(self, a, b):
642 """calculate the least common ancestor of nodes a and b"""
642 """calculate the least common ancestor of nodes a and b"""
643 # calculate the distance of every node from root
643 # calculate the distance of every node from root
644 dist = {nullid: 0}
644 dist = {nullid: 0}
645 for i in xrange(self.count()):
645 for i in xrange(self.count()):
646 n = self.node(i)
646 n = self.node(i)
647 p1, p2 = self.parents(n)
647 p1, p2 = self.parents(n)
648 dist[n] = max(dist[p1], dist[p2]) + 1
648 dist[n] = max(dist[p1], dist[p2]) + 1
649
649
650 # traverse ancestors in order of decreasing distance from root
650 # traverse ancestors in order of decreasing distance from root
651 def ancestors(node):
651 def ancestors(node):
652 # we store negative distances because heap returns smallest member
652 # we store negative distances because heap returns smallest member
653 h = [(-dist[node], node)]
653 h = [(-dist[node], node)]
654 seen = {}
654 seen = {}
655 while h:
655 while h:
656 d, n = heapq.heappop(h)
656 d, n = heapq.heappop(h)
657 if n not in seen:
657 if n not in seen:
658 seen[n] = 1
658 seen[n] = 1
659 yield (-d, n)
659 yield (-d, n)
660 for p in self.parents(n):
660 for p in self.parents(n):
661 heapq.heappush(h, (-dist[p], p))
661 heapq.heappush(h, (-dist[p], p))
662
662
663 def generations(node):
663 def generations(node):
664 sg, s = None, {}
664 sg, s = None, {}
665 for g,n in ancestors(node):
665 for g,n in ancestors(node):
666 if g != sg:
666 if g != sg:
667 if sg:
667 if sg:
668 yield sg, s
668 yield sg, s
669 sg, s = g, {n:1}
669 sg, s = g, {n:1}
670 else:
670 else:
671 s[n] = 1
671 s[n] = 1
672 yield sg, s
672 yield sg, s
673
673
674 x = generations(a)
674 x = generations(a)
675 y = generations(b)
675 y = generations(b)
676 gx = x.next()
676 gx = x.next()
677 gy = y.next()
677 gy = y.next()
678
678
679 # increment each ancestor list until it is closer to root than
679 # increment each ancestor list until it is closer to root than
680 # the other, or they match
680 # the other, or they match
681 while 1:
681 while 1:
682 #print "ancestor gen %s %s" % (gx[0], gy[0])
682 #print "ancestor gen %s %s" % (gx[0], gy[0])
683 if gx[0] == gy[0]:
683 if gx[0] == gy[0]:
684 # find the intersection
684 # find the intersection
685 i = [ n for n in gx[1] if n in gy[1] ]
685 i = [ n for n in gx[1] if n in gy[1] ]
686 if i:
686 if i:
687 return i[0]
687 return i[0]
688 else:
688 else:
689 #print "next"
689 #print "next"
690 gy = y.next()
690 gy = y.next()
691 gx = x.next()
691 gx = x.next()
692 elif gx[0] < gy[0]:
692 elif gx[0] < gy[0]:
693 #print "next y"
693 #print "next y"
694 gy = y.next()
694 gy = y.next()
695 else:
695 else:
696 #print "next x"
696 #print "next x"
697 gx = x.next()
697 gx = x.next()
698
698
699 def group(self, nodelist, lookup, infocollect=None):
699 def group(self, nodelist, lookup, infocollect=None):
700 """calculate a delta group
700 """calculate a delta group
701
701
702 Given a list of changeset revs, return a set of deltas and
702 Given a list of changeset revs, return a set of deltas and
703 metadata corresponding to nodes. the first delta is
703 metadata corresponding to nodes. the first delta is
704 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
704 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
705 have this parent as it has all history before these
705 have this parent as it has all history before these
706 changesets. parent is parent[0]
706 changesets. parent is parent[0]
707 """
707 """
708 revs = [self.rev(n) for n in nodelist]
708 revs = [self.rev(n) for n in nodelist]
709
709
710 # if we don't have any revisions touched by these changesets, bail
710 # if we don't have any revisions touched by these changesets, bail
711 if not revs:
711 if not revs:
712 yield changegroup.closechunk()
712 yield changegroup.closechunk()
713 return
713 return
714
714
715 # add the parent of the first rev
715 # add the parent of the first rev
716 p = self.parents(self.node(revs[0]))[0]
716 p = self.parents(self.node(revs[0]))[0]
717 revs.insert(0, self.rev(p))
717 revs.insert(0, self.rev(p))
718
718
719 # build deltas
719 # build deltas
720 for d in xrange(0, len(revs) - 1):
720 for d in xrange(0, len(revs) - 1):
721 a, b = revs[d], revs[d + 1]
721 a, b = revs[d], revs[d + 1]
722 nb = self.node(b)
722 nb = self.node(b)
723
723
724 if infocollect is not None:
724 if infocollect is not None:
725 infocollect(nb)
725 infocollect(nb)
726
726
727 d = self.revdiff(a, b)
727 d = self.revdiff(a, b)
728 p = self.parents(nb)
728 p = self.parents(nb)
729 meta = nb + p[0] + p[1] + lookup(nb)
729 meta = nb + p[0] + p[1] + lookup(nb)
730 yield changegroup.genchunk("%s%s" % (meta, d))
730 yield changegroup.genchunk("%s%s" % (meta, d))
731
731
732 yield changegroup.closechunk()
732 yield changegroup.closechunk()
733
733
734 def addgroup(self, revs, linkmapper, transaction, unique=0):
734 def addgroup(self, revs, linkmapper, transaction, unique=0):
735 """
735 """
736 add a delta group
736 add a delta group
737
737
738 given a set of deltas, add them to the revision log. the
738 given a set of deltas, add them to the revision log. the
739 first delta is against its parent, which should be in our
739 first delta is against its parent, which should be in our
740 log, the rest are against the previous delta.
740 log, the rest are against the previous delta.
741 """
741 """
742
742
743 #track the base of the current delta log
743 #track the base of the current delta log
744 r = self.count()
744 r = self.count()
745 t = r - 1
745 t = r - 1
746 node = nullid
746 node = None
747
747
748 base = prev = -1
748 base = prev = -1
749 start = end = measure = 0
749 start = end = measure = 0
750 if r:
750 if r:
751 base = self.base(t)
751 base = self.base(t)
752 start = self.start(base)
752 start = self.start(base)
753 end = self.end(t)
753 end = self.end(t)
754 measure = self.length(base)
754 measure = self.length(base)
755 prev = self.tip()
755 prev = self.tip()
756
756
757 transaction.add(self.datafile, end)
757 transaction.add(self.datafile, end)
758 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
758 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
759 dfh = self.opener(self.datafile, "a")
759 dfh = self.opener(self.datafile, "a")
760 ifh = self.opener(self.indexfile, "a")
760 ifh = self.opener(self.indexfile, "a")
761
761
762 # loop through our set of deltas
762 # loop through our set of deltas
763 chain = None
763 chain = None
764 for chunk in revs:
764 for chunk in revs:
765 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
765 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
766 link = linkmapper(cs)
766 link = linkmapper(cs)
767 if node in self.nodemap:
767 if node in self.nodemap:
768 # this can happen if two branches make the same change
768 # this can happen if two branches make the same change
769 # if unique:
769 # if unique:
770 # raise RevlogError(_("already have %s") % hex(node[:4]))
770 # raise RevlogError(_("already have %s") % hex(node[:4]))
771 chain = node
771 chain = node
772 continue
772 continue
773 delta = chunk[80:]
773 delta = chunk[80:]
774
774
775 for p in (p1, p2):
775 for p in (p1, p2):
776 if not p in self.nodemap:
776 if not p in self.nodemap:
777 raise RevlogError(_("unknown parent %s") % short(p1))
777 raise RevlogError(_("unknown parent %s") % short(p1))
778
778
779 if not chain:
779 if not chain:
780 # retrieve the parent revision of the delta chain
780 # retrieve the parent revision of the delta chain
781 chain = p1
781 chain = p1
782 if not chain in self.nodemap:
782 if not chain in self.nodemap:
783 raise RevlogError(_("unknown base %s") % short(chain[:4]))
783 raise RevlogError(_("unknown base %s") % short(chain[:4]))
784
784
785 # full versions are inserted when the needed deltas become
785 # full versions are inserted when the needed deltas become
786 # comparable to the uncompressed text or when the previous
786 # comparable to the uncompressed text or when the previous
787 # version is not the one we have a delta against. We use
787 # version is not the one we have a delta against. We use
788 # the size of the previous full rev as a proxy for the
788 # the size of the previous full rev as a proxy for the
789 # current size.
789 # current size.
790
790
791 if chain == prev:
791 if chain == prev:
792 tempd = compress(delta)
792 tempd = compress(delta)
793 cdelta = tempd[0] + tempd[1]
793 cdelta = tempd[0] + tempd[1]
794
794
795 if chain != prev or (end - start + len(cdelta)) > measure * 2:
795 if chain != prev or (end - start + len(cdelta)) > measure * 2:
796 # flush our writes here so we can read it in revision
796 # flush our writes here so we can read it in revision
797 dfh.flush()
797 dfh.flush()
798 ifh.flush()
798 ifh.flush()
799 text = self.revision(chain)
799 text = self.revision(chain)
800 text = self.patches(text, [delta])
800 text = self.patches(text, [delta])
801 chk = self.addrevision(text, transaction, link, p1, p2)
801 chk = self.addrevision(text, transaction, link, p1, p2)
802 if chk != node:
802 if chk != node:
803 raise RevlogError(_("consistency error adding group"))
803 raise RevlogError(_("consistency error adding group"))
804 measure = len(text)
804 measure = len(text)
805 else:
805 else:
806 e = (end, len(cdelta), base, link, p1, p2, node)
806 e = (end, len(cdelta), base, link, p1, p2, node)
807 self.index.append(e)
807 self.index.append(e)
808 self.nodemap[node] = r
808 self.nodemap[node] = r
809 dfh.write(cdelta)
809 dfh.write(cdelta)
810 ifh.write(struct.pack(indexformat, *e))
810 ifh.write(struct.pack(indexformat, *e))
811
811
812 t, r, chain, prev = r, r + 1, node, node
812 t, r, chain, prev = r, r + 1, node, node
813 base = self.base(t)
813 base = self.base(t)
814 start = self.start(base)
814 start = self.start(base)
815 end = self.end(t)
815 end = self.end(t)
816
816
817 dfh.close()
817 dfh.close()
818 ifh.close()
818 ifh.close()
819 if node is None:
820 raise RevlogError(_("group to be added is empty"))
819 return node
821 return node
820
822
821 def strip(self, rev, minlink):
823 def strip(self, rev, minlink):
822 if self.count() == 0 or rev >= self.count():
824 if self.count() == 0 or rev >= self.count():
823 return
825 return
824
826
825 # When stripping away a revision, we need to make sure it
827 # When stripping away a revision, we need to make sure it
826 # does not actually belong to an older changeset.
828 # does not actually belong to an older changeset.
827 # The minlink parameter defines the oldest revision
829 # The minlink parameter defines the oldest revision
828 # we're allowed to strip away.
830 # we're allowed to strip away.
829 while minlink > self.index[rev][3]:
831 while minlink > self.index[rev][3]:
830 rev += 1
832 rev += 1
831 if rev >= self.count():
833 if rev >= self.count():
832 return
834 return
833
835
834 # first truncate the files on disk
836 # first truncate the files on disk
835 end = self.start(rev)
837 end = self.start(rev)
836 self.opener(self.datafile, "a").truncate(end)
838 self.opener(self.datafile, "a").truncate(end)
837 end = rev * struct.calcsize(indexformat)
839 end = rev * struct.calcsize(indexformat)
838 self.opener(self.indexfile, "a").truncate(end)
840 self.opener(self.indexfile, "a").truncate(end)
839
841
840 # then reset internal state in memory to forget those revisions
842 # then reset internal state in memory to forget those revisions
841 self.cache = None
843 self.cache = None
842 self.chunkcache = None
844 self.chunkcache = None
843 for p in self.index[rev:]:
845 for p in self.index[rev:]:
844 del self.nodemap[p[6]]
846 del self.nodemap[p[6]]
845 del self.index[rev:]
847 del self.index[rev:]
846
848
847 # truncating the lazyindex also truncates the lazymap.
849 # truncating the lazyindex also truncates the lazymap.
848 if isinstance(self.index, lazyindex):
850 if isinstance(self.index, lazyindex):
849 self.index.trunc(end)
851 self.index.trunc(end)
850
852
851
853
852 def checksize(self):
854 def checksize(self):
853 expected = 0
855 expected = 0
854 if self.count():
856 if self.count():
855 expected = self.end(self.count() - 1)
857 expected = self.end(self.count() - 1)
856
858
857 try:
859 try:
858 f = self.opener(self.datafile)
860 f = self.opener(self.datafile)
859 f.seek(0, 2)
861 f.seek(0, 2)
860 actual = f.tell()
862 actual = f.tell()
861 dd = actual - expected
863 dd = actual - expected
862 except IOError, inst:
864 except IOError, inst:
863 if inst.errno != errno.ENOENT:
865 if inst.errno != errno.ENOENT:
864 raise
866 raise
865 dd = 0
867 dd = 0
866
868
867 try:
869 try:
868 f = self.opener(self.indexfile)
870 f = self.opener(self.indexfile)
869 f.seek(0, 2)
871 f.seek(0, 2)
870 actual = f.tell()
872 actual = f.tell()
871 s = struct.calcsize(indexformat)
873 s = struct.calcsize(indexformat)
872 i = actual / s
874 i = actual / s
873 di = actual - (i * s)
875 di = actual - (i * s)
874 except IOError, inst:
876 except IOError, inst:
875 if inst.errno != errno.ENOENT:
877 if inst.errno != errno.ENOENT:
876 raise
878 raise
877 di = 0
879 di = 0
878
880
879 return (dd, di)
881 return (dd, di)
880
882
881
883
General Comments 0
You need to be logged in to leave comments. Login now