##// END OF EJS Templates
revlog: append delta against p1
Pradeepkumar Gayam -
r11931:6051db13 default
parent child Browse files
Show More
@@ -1,1445 +1,1450 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 # import stuff from node for others to import from revlog
14 # import stuff from node for others to import from revlog
15 from node import bin, hex, nullid, nullrev, short #@UnusedImport
15 from node import bin, hex, nullid, nullrev, short #@UnusedImport
16 from i18n import _
16 from i18n import _
17 import changegroup, ancestor, mdiff, parsers, error, util
17 import changegroup, ancestor, mdiff, parsers, error, util
18 import struct, zlib, errno
18 import struct, zlib, errno
19
19
20 _pack = struct.pack
20 _pack = struct.pack
21 _unpack = struct.unpack
21 _unpack = struct.unpack
22 _compress = zlib.compress
22 _compress = zlib.compress
23 _decompress = zlib.decompress
23 _decompress = zlib.decompress
24 _sha = util.sha1
24 _sha = util.sha1
25
25
26 # revlog header flags
26 # revlog header flags
27 REVLOGV0 = 0
27 REVLOGV0 = 0
28 REVLOGNG = 1
28 REVLOGNG = 1
29 REVLOGNGINLINEDATA = (1 << 16)
29 REVLOGNGINLINEDATA = (1 << 16)
30 REVLOGSHALLOW = (1 << 17)
30 REVLOGSHALLOW = (1 << 17)
31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
32 REVLOG_DEFAULT_FORMAT = REVLOGNG
32 REVLOG_DEFAULT_FORMAT = REVLOGNG
33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGSHALLOW
34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGSHALLOW
35
35
36 # revlog index flags
36 # revlog index flags
37 REVIDX_PARENTDELTA = 1
37 REVIDX_PARENTDELTA = 1
38 REVIDX_PUNCHED_FLAG = 2
38 REVIDX_PUNCHED_FLAG = 2
39 REVIDX_KNOWN_FLAGS = REVIDX_PUNCHED_FLAG | REVIDX_PARENTDELTA
39 REVIDX_KNOWN_FLAGS = REVIDX_PUNCHED_FLAG | REVIDX_PARENTDELTA
40
40
41 # amount of data read unconditionally, should be >= 4
41 # amount of data read unconditionally, should be >= 4
42 # when not inline: threshold for using lazy index
42 # when not inline: threshold for using lazy index
43 _prereadsize = 1048576
43 _prereadsize = 1048576
44 # max size of revlog with inline data
44 # max size of revlog with inline data
45 _maxinline = 131072
45 _maxinline = 131072
46
46
47 RevlogError = error.RevlogError
47 RevlogError = error.RevlogError
48 LookupError = error.LookupError
48 LookupError = error.LookupError
49
49
50 def getoffset(q):
50 def getoffset(q):
51 return int(q >> 16)
51 return int(q >> 16)
52
52
53 def gettype(q):
53 def gettype(q):
54 return int(q & 0xFFFF)
54 return int(q & 0xFFFF)
55
55
56 def offset_type(offset, type):
56 def offset_type(offset, type):
57 return long(long(offset) << 16 | type)
57 return long(long(offset) << 16 | type)
58
58
59 nullhash = _sha(nullid)
59 nullhash = _sha(nullid)
60
60
61 def hash(text, p1, p2):
61 def hash(text, p1, p2):
62 """generate a hash from the given text and its parent hashes
62 """generate a hash from the given text and its parent hashes
63
63
64 This hash combines both the current file contents and its history
64 This hash combines both the current file contents and its history
65 in a manner that makes it easy to distinguish nodes with the same
65 in a manner that makes it easy to distinguish nodes with the same
66 content in the revision graph.
66 content in the revision graph.
67 """
67 """
68 # As of now, if one of the parent node is null, p2 is null
68 # As of now, if one of the parent node is null, p2 is null
69 if p2 == nullid:
69 if p2 == nullid:
70 # deep copy of a hash is faster than creating one
70 # deep copy of a hash is faster than creating one
71 s = nullhash.copy()
71 s = nullhash.copy()
72 s.update(p1)
72 s.update(p1)
73 else:
73 else:
74 # none of the parent nodes are nullid
74 # none of the parent nodes are nullid
75 l = [p1, p2]
75 l = [p1, p2]
76 l.sort()
76 l.sort()
77 s = _sha(l[0])
77 s = _sha(l[0])
78 s.update(l[1])
78 s.update(l[1])
79 s.update(text)
79 s.update(text)
80 return s.digest()
80 return s.digest()
81
81
82 def compress(text):
82 def compress(text):
83 """ generate a possibly-compressed representation of text """
83 """ generate a possibly-compressed representation of text """
84 if not text:
84 if not text:
85 return ("", text)
85 return ("", text)
86 l = len(text)
86 l = len(text)
87 bin = None
87 bin = None
88 if l < 44:
88 if l < 44:
89 pass
89 pass
90 elif l > 1000000:
90 elif l > 1000000:
91 # zlib makes an internal copy, thus doubling memory usage for
91 # zlib makes an internal copy, thus doubling memory usage for
92 # large files, so lets do this in pieces
92 # large files, so lets do this in pieces
93 z = zlib.compressobj()
93 z = zlib.compressobj()
94 p = []
94 p = []
95 pos = 0
95 pos = 0
96 while pos < l:
96 while pos < l:
97 pos2 = pos + 2**20
97 pos2 = pos + 2**20
98 p.append(z.compress(text[pos:pos2]))
98 p.append(z.compress(text[pos:pos2]))
99 pos = pos2
99 pos = pos2
100 p.append(z.flush())
100 p.append(z.flush())
101 if sum(map(len, p)) < l:
101 if sum(map(len, p)) < l:
102 bin = "".join(p)
102 bin = "".join(p)
103 else:
103 else:
104 bin = _compress(text)
104 bin = _compress(text)
105 if bin is None or len(bin) > l:
105 if bin is None or len(bin) > l:
106 if text[0] == '\0':
106 if text[0] == '\0':
107 return ("", text)
107 return ("", text)
108 return ('u', text)
108 return ('u', text)
109 return ("", bin)
109 return ("", bin)
110
110
111 def decompress(bin):
111 def decompress(bin):
112 """ decompress the given input """
112 """ decompress the given input """
113 if not bin:
113 if not bin:
114 return bin
114 return bin
115 t = bin[0]
115 t = bin[0]
116 if t == '\0':
116 if t == '\0':
117 return bin
117 return bin
118 if t == 'x':
118 if t == 'x':
119 return _decompress(bin)
119 return _decompress(bin)
120 if t == 'u':
120 if t == 'u':
121 return bin[1:]
121 return bin[1:]
122 raise RevlogError(_("unknown compression type %r") % t)
122 raise RevlogError(_("unknown compression type %r") % t)
123
123
124 class lazyparser(object):
124 class lazyparser(object):
125 """
125 """
126 this class avoids the need to parse the entirety of large indices
126 this class avoids the need to parse the entirety of large indices
127 """
127 """
128
128
129 # lazyparser is not safe to use on windows if win32 extensions not
129 # lazyparser is not safe to use on windows if win32 extensions not
130 # available. it keeps file handle open, which make it not possible
130 # available. it keeps file handle open, which make it not possible
131 # to break hardlinks on local cloned repos.
131 # to break hardlinks on local cloned repos.
132
132
133 def __init__(self, dataf):
133 def __init__(self, dataf):
134 try:
134 try:
135 size = util.fstat(dataf).st_size
135 size = util.fstat(dataf).st_size
136 except AttributeError:
136 except AttributeError:
137 size = 0
137 size = 0
138 self.dataf = dataf
138 self.dataf = dataf
139 self.s = struct.calcsize(indexformatng)
139 self.s = struct.calcsize(indexformatng)
140 self.datasize = size
140 self.datasize = size
141 self.l = size // self.s
141 self.l = size // self.s
142 self.index = [None] * self.l
142 self.index = [None] * self.l
143 self.map = {nullid: nullrev}
143 self.map = {nullid: nullrev}
144 self.allmap = 0
144 self.allmap = 0
145 self.all = 0
145 self.all = 0
146 self.mapfind_count = 0
146 self.mapfind_count = 0
147
147
148 def loadmap(self):
148 def loadmap(self):
149 """
149 """
150 during a commit, we need to make sure the rev being added is
150 during a commit, we need to make sure the rev being added is
151 not a duplicate. This requires loading the entire index,
151 not a duplicate. This requires loading the entire index,
152 which is fairly slow. loadmap can load up just the node map,
152 which is fairly slow. loadmap can load up just the node map,
153 which takes much less time.
153 which takes much less time.
154 """
154 """
155 if self.allmap:
155 if self.allmap:
156 return
156 return
157 end = self.datasize
157 end = self.datasize
158 self.allmap = 1
158 self.allmap = 1
159 cur = 0
159 cur = 0
160 count = 0
160 count = 0
161 blocksize = self.s * 256
161 blocksize = self.s * 256
162 self.dataf.seek(0)
162 self.dataf.seek(0)
163 while cur < end:
163 while cur < end:
164 data = self.dataf.read(blocksize)
164 data = self.dataf.read(blocksize)
165 off = 0
165 off = 0
166 for x in xrange(256):
166 for x in xrange(256):
167 n = data[off + ngshaoffset:off + ngshaoffset + 20]
167 n = data[off + ngshaoffset:off + ngshaoffset + 20]
168 self.map[n] = count
168 self.map[n] = count
169 count += 1
169 count += 1
170 if count >= self.l:
170 if count >= self.l:
171 break
171 break
172 off += self.s
172 off += self.s
173 cur += blocksize
173 cur += blocksize
174
174
175 def loadblock(self, blockstart, blocksize, data=None):
175 def loadblock(self, blockstart, blocksize, data=None):
176 if self.all:
176 if self.all:
177 return
177 return
178 if data is None:
178 if data is None:
179 self.dataf.seek(blockstart)
179 self.dataf.seek(blockstart)
180 if blockstart + blocksize > self.datasize:
180 if blockstart + blocksize > self.datasize:
181 # the revlog may have grown since we've started running,
181 # the revlog may have grown since we've started running,
182 # but we don't have space in self.index for more entries.
182 # but we don't have space in self.index for more entries.
183 # limit blocksize so that we don't get too much data.
183 # limit blocksize so that we don't get too much data.
184 blocksize = max(self.datasize - blockstart, 0)
184 blocksize = max(self.datasize - blockstart, 0)
185 data = self.dataf.read(blocksize)
185 data = self.dataf.read(blocksize)
186 lend = len(data) // self.s
186 lend = len(data) // self.s
187 i = blockstart // self.s
187 i = blockstart // self.s
188 off = 0
188 off = 0
189 # lazyindex supports __delitem__
189 # lazyindex supports __delitem__
190 if lend > len(self.index) - i:
190 if lend > len(self.index) - i:
191 lend = len(self.index) - i
191 lend = len(self.index) - i
192 for x in xrange(lend):
192 for x in xrange(lend):
193 if self.index[i + x] is None:
193 if self.index[i + x] is None:
194 b = data[off : off + self.s]
194 b = data[off : off + self.s]
195 self.index[i + x] = b
195 self.index[i + x] = b
196 n = b[ngshaoffset:ngshaoffset + 20]
196 n = b[ngshaoffset:ngshaoffset + 20]
197 self.map[n] = i + x
197 self.map[n] = i + x
198 off += self.s
198 off += self.s
199
199
200 def findnode(self, node):
200 def findnode(self, node):
201 """search backwards through the index file for a specific node"""
201 """search backwards through the index file for a specific node"""
202 if self.allmap:
202 if self.allmap:
203 return None
203 return None
204
204
205 # hg log will cause many many searches for the manifest
205 # hg log will cause many many searches for the manifest
206 # nodes. After we get called a few times, just load the whole
206 # nodes. After we get called a few times, just load the whole
207 # thing.
207 # thing.
208 if self.mapfind_count > 8:
208 if self.mapfind_count > 8:
209 self.loadmap()
209 self.loadmap()
210 if node in self.map:
210 if node in self.map:
211 return node
211 return node
212 return None
212 return None
213 self.mapfind_count += 1
213 self.mapfind_count += 1
214 last = self.l - 1
214 last = self.l - 1
215 while self.index[last] != None:
215 while self.index[last] != None:
216 if last == 0:
216 if last == 0:
217 self.all = 1
217 self.all = 1
218 self.allmap = 1
218 self.allmap = 1
219 return None
219 return None
220 last -= 1
220 last -= 1
221 end = (last + 1) * self.s
221 end = (last + 1) * self.s
222 blocksize = self.s * 256
222 blocksize = self.s * 256
223 while end >= 0:
223 while end >= 0:
224 start = max(end - blocksize, 0)
224 start = max(end - blocksize, 0)
225 self.dataf.seek(start)
225 self.dataf.seek(start)
226 data = self.dataf.read(end - start)
226 data = self.dataf.read(end - start)
227 findend = end - start
227 findend = end - start
228 while True:
228 while True:
229 # we're searching backwards, so we have to make sure
229 # we're searching backwards, so we have to make sure
230 # we don't find a changeset where this node is a parent
230 # we don't find a changeset where this node is a parent
231 off = data.find(node, 0, findend)
231 off = data.find(node, 0, findend)
232 findend = off
232 findend = off
233 if off >= 0:
233 if off >= 0:
234 i = off / self.s
234 i = off / self.s
235 off = i * self.s
235 off = i * self.s
236 n = data[off + ngshaoffset:off + ngshaoffset + 20]
236 n = data[off + ngshaoffset:off + ngshaoffset + 20]
237 if n == node:
237 if n == node:
238 self.map[n] = i + start / self.s
238 self.map[n] = i + start / self.s
239 return node
239 return node
240 else:
240 else:
241 break
241 break
242 end -= blocksize
242 end -= blocksize
243 return None
243 return None
244
244
245 def loadindex(self, i=None, end=None):
245 def loadindex(self, i=None, end=None):
246 if self.all:
246 if self.all:
247 return
247 return
248 all = False
248 all = False
249 if i is None:
249 if i is None:
250 blockstart = 0
250 blockstart = 0
251 blocksize = (65536 / self.s) * self.s
251 blocksize = (65536 / self.s) * self.s
252 end = self.datasize
252 end = self.datasize
253 all = True
253 all = True
254 else:
254 else:
255 if end:
255 if end:
256 blockstart = i * self.s
256 blockstart = i * self.s
257 end = end * self.s
257 end = end * self.s
258 blocksize = end - blockstart
258 blocksize = end - blockstart
259 else:
259 else:
260 blockstart = (i & ~1023) * self.s
260 blockstart = (i & ~1023) * self.s
261 blocksize = self.s * 1024
261 blocksize = self.s * 1024
262 end = blockstart + blocksize
262 end = blockstart + blocksize
263 while blockstart < end:
263 while blockstart < end:
264 self.loadblock(blockstart, blocksize)
264 self.loadblock(blockstart, blocksize)
265 blockstart += blocksize
265 blockstart += blocksize
266 if all:
266 if all:
267 self.all = True
267 self.all = True
268
268
269 class lazyindex(object):
269 class lazyindex(object):
270 """a lazy version of the index array"""
270 """a lazy version of the index array"""
271 def __init__(self, parser):
271 def __init__(self, parser):
272 self.p = parser
272 self.p = parser
273 def __len__(self):
273 def __len__(self):
274 return len(self.p.index)
274 return len(self.p.index)
275 def load(self, pos):
275 def load(self, pos):
276 if pos < 0:
276 if pos < 0:
277 pos += len(self.p.index)
277 pos += len(self.p.index)
278 self.p.loadindex(pos)
278 self.p.loadindex(pos)
279 return self.p.index[pos]
279 return self.p.index[pos]
280 def __getitem__(self, pos):
280 def __getitem__(self, pos):
281 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
281 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
282 def __setitem__(self, pos, item):
282 def __setitem__(self, pos, item):
283 self.p.index[pos] = _pack(indexformatng, *item)
283 self.p.index[pos] = _pack(indexformatng, *item)
284 def __delitem__(self, pos):
284 def __delitem__(self, pos):
285 del self.p.index[pos]
285 del self.p.index[pos]
286 def insert(self, pos, e):
286 def insert(self, pos, e):
287 self.p.index.insert(pos, _pack(indexformatng, *e))
287 self.p.index.insert(pos, _pack(indexformatng, *e))
288 def append(self, e):
288 def append(self, e):
289 self.p.index.append(_pack(indexformatng, *e))
289 self.p.index.append(_pack(indexformatng, *e))
290
290
291 class lazymap(object):
291 class lazymap(object):
292 """a lazy version of the node map"""
292 """a lazy version of the node map"""
293 def __init__(self, parser):
293 def __init__(self, parser):
294 self.p = parser
294 self.p = parser
295 def load(self, key):
295 def load(self, key):
296 n = self.p.findnode(key)
296 n = self.p.findnode(key)
297 if n is None:
297 if n is None:
298 raise KeyError(key)
298 raise KeyError(key)
299 def __contains__(self, key):
299 def __contains__(self, key):
300 if key in self.p.map:
300 if key in self.p.map:
301 return True
301 return True
302 self.p.loadmap()
302 self.p.loadmap()
303 return key in self.p.map
303 return key in self.p.map
304 def __iter__(self):
304 def __iter__(self):
305 yield nullid
305 yield nullid
306 for i, ret in enumerate(self.p.index):
306 for i, ret in enumerate(self.p.index):
307 if not ret:
307 if not ret:
308 self.p.loadindex(i)
308 self.p.loadindex(i)
309 ret = self.p.index[i]
309 ret = self.p.index[i]
310 if isinstance(ret, str):
310 if isinstance(ret, str):
311 ret = _unpack(indexformatng, ret)
311 ret = _unpack(indexformatng, ret)
312 yield ret[7]
312 yield ret[7]
313 def __getitem__(self, key):
313 def __getitem__(self, key):
314 try:
314 try:
315 return self.p.map[key]
315 return self.p.map[key]
316 except KeyError:
316 except KeyError:
317 try:
317 try:
318 self.load(key)
318 self.load(key)
319 return self.p.map[key]
319 return self.p.map[key]
320 except KeyError:
320 except KeyError:
321 raise KeyError("node " + hex(key))
321 raise KeyError("node " + hex(key))
322 def __setitem__(self, key, val):
322 def __setitem__(self, key, val):
323 self.p.map[key] = val
323 self.p.map[key] = val
324 def __delitem__(self, key):
324 def __delitem__(self, key):
325 del self.p.map[key]
325 del self.p.map[key]
326
326
327 indexformatv0 = ">4l20s20s20s"
327 indexformatv0 = ">4l20s20s20s"
328 v0shaoffset = 56
328 v0shaoffset = 56
329
329
330 class revlogoldio(object):
330 class revlogoldio(object):
331 def __init__(self):
331 def __init__(self):
332 self.size = struct.calcsize(indexformatv0)
332 self.size = struct.calcsize(indexformatv0)
333
333
334 def parseindex(self, fp, data, inline):
334 def parseindex(self, fp, data, inline):
335 s = self.size
335 s = self.size
336 index = []
336 index = []
337 nodemap = {nullid: nullrev}
337 nodemap = {nullid: nullrev}
338 n = off = 0
338 n = off = 0
339 if len(data) == _prereadsize:
339 if len(data) == _prereadsize:
340 data += fp.read() # read the rest
340 data += fp.read() # read the rest
341 l = len(data)
341 l = len(data)
342 while off + s <= l:
342 while off + s <= l:
343 cur = data[off:off + s]
343 cur = data[off:off + s]
344 off += s
344 off += s
345 e = _unpack(indexformatv0, cur)
345 e = _unpack(indexformatv0, cur)
346 # transform to revlogv1 format
346 # transform to revlogv1 format
347 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
347 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
348 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
348 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
349 index.append(e2)
349 index.append(e2)
350 nodemap[e[6]] = n
350 nodemap[e[6]] = n
351 n += 1
351 n += 1
352
352
353 return index, nodemap, None
353 return index, nodemap, None
354
354
355 def packentry(self, entry, node, version, rev):
355 def packentry(self, entry, node, version, rev):
356 if gettype(entry[0]):
356 if gettype(entry[0]):
357 raise RevlogError(_("index entry flags need RevlogNG"))
357 raise RevlogError(_("index entry flags need RevlogNG"))
358 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
358 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
359 node(entry[5]), node(entry[6]), entry[7])
359 node(entry[5]), node(entry[6]), entry[7])
360 return _pack(indexformatv0, *e2)
360 return _pack(indexformatv0, *e2)
361
361
362 # index ng:
362 # index ng:
363 # 6 bytes: offset
363 # 6 bytes: offset
364 # 2 bytes: flags
364 # 2 bytes: flags
365 # 4 bytes: compressed length
365 # 4 bytes: compressed length
366 # 4 bytes: uncompressed length
366 # 4 bytes: uncompressed length
367 # 4 bytes: base rev
367 # 4 bytes: base rev
368 # 4 bytes: link rev
368 # 4 bytes: link rev
369 # 4 bytes: parent 1 rev
369 # 4 bytes: parent 1 rev
370 # 4 bytes: parent 2 rev
370 # 4 bytes: parent 2 rev
371 # 32 bytes: nodeid
371 # 32 bytes: nodeid
372 indexformatng = ">Qiiiiii20s12x"
372 indexformatng = ">Qiiiiii20s12x"
373 ngshaoffset = 32
373 ngshaoffset = 32
374 versionformat = ">I"
374 versionformat = ">I"
375
375
376 class revlogio(object):
376 class revlogio(object):
377 def __init__(self):
377 def __init__(self):
378 self.size = struct.calcsize(indexformatng)
378 self.size = struct.calcsize(indexformatng)
379
379
380 def parseindex(self, fp, data, inline):
380 def parseindex(self, fp, data, inline):
381 if len(data) == _prereadsize:
381 if len(data) == _prereadsize:
382 if util.openhardlinks() and not inline:
382 if util.openhardlinks() and not inline:
383 # big index, let's parse it on demand
383 # big index, let's parse it on demand
384 parser = lazyparser(fp)
384 parser = lazyparser(fp)
385 index = lazyindex(parser)
385 index = lazyindex(parser)
386 nodemap = lazymap(parser)
386 nodemap = lazymap(parser)
387 e = list(index[0])
387 e = list(index[0])
388 type = gettype(e[0])
388 type = gettype(e[0])
389 e[0] = offset_type(0, type)
389 e[0] = offset_type(0, type)
390 index[0] = e
390 index[0] = e
391 return index, nodemap, None
391 return index, nodemap, None
392 else:
392 else:
393 data += fp.read()
393 data += fp.read()
394
394
395 # call the C implementation to parse the index data
395 # call the C implementation to parse the index data
396 index, nodemap, cache = parsers.parse_index(data, inline)
396 index, nodemap, cache = parsers.parse_index(data, inline)
397 return index, nodemap, cache
397 return index, nodemap, cache
398
398
399 def packentry(self, entry, node, version, rev):
399 def packentry(self, entry, node, version, rev):
400 p = _pack(indexformatng, *entry)
400 p = _pack(indexformatng, *entry)
401 if rev == 0:
401 if rev == 0:
402 p = _pack(versionformat, version) + p[4:]
402 p = _pack(versionformat, version) + p[4:]
403 return p
403 return p
404
404
405 class revlog(object):
405 class revlog(object):
406 """
406 """
407 the underlying revision storage object
407 the underlying revision storage object
408
408
409 A revlog consists of two parts, an index and the revision data.
409 A revlog consists of two parts, an index and the revision data.
410
410
411 The index is a file with a fixed record size containing
411 The index is a file with a fixed record size containing
412 information on each revision, including its nodeid (hash), the
412 information on each revision, including its nodeid (hash), the
413 nodeids of its parents, the position and offset of its data within
413 nodeids of its parents, the position and offset of its data within
414 the data file, and the revision it's based on. Finally, each entry
414 the data file, and the revision it's based on. Finally, each entry
415 contains a linkrev entry that can serve as a pointer to external
415 contains a linkrev entry that can serve as a pointer to external
416 data.
416 data.
417
417
418 The revision data itself is a linear collection of data chunks.
418 The revision data itself is a linear collection of data chunks.
419 Each chunk represents a revision and is usually represented as a
419 Each chunk represents a revision and is usually represented as a
420 delta against the previous chunk. To bound lookup time, runs of
420 delta against the previous chunk. To bound lookup time, runs of
421 deltas are limited to about 2 times the length of the original
421 deltas are limited to about 2 times the length of the original
422 version data. This makes retrieval of a version proportional to
422 version data. This makes retrieval of a version proportional to
423 its size, or O(1) relative to the number of revisions.
423 its size, or O(1) relative to the number of revisions.
424
424
425 Both pieces of the revlog are written to in an append-only
425 Both pieces of the revlog are written to in an append-only
426 fashion, which means we never need to rewrite a file to insert or
426 fashion, which means we never need to rewrite a file to insert or
427 remove data, and can use some simple techniques to avoid the need
427 remove data, and can use some simple techniques to avoid the need
428 for locking while reading.
428 for locking while reading.
429 """
429 """
430 def __init__(self, opener, indexfile, shallowroot=None):
430 def __init__(self, opener, indexfile, shallowroot=None):
431 """
431 """
432 create a revlog object
432 create a revlog object
433
433
434 opener is a function that abstracts the file opening operation
434 opener is a function that abstracts the file opening operation
435 and can be used to implement COW semantics or the like.
435 and can be used to implement COW semantics or the like.
436 """
436 """
437 self.indexfile = indexfile
437 self.indexfile = indexfile
438 self.datafile = indexfile[:-2] + ".d"
438 self.datafile = indexfile[:-2] + ".d"
439 self.opener = opener
439 self.opener = opener
440 self._cache = None
440 self._cache = None
441 self._chunkcache = (0, '')
441 self._chunkcache = (0, '')
442 self.nodemap = {nullid: nullrev}
442 self.nodemap = {nullid: nullrev}
443 self.index = []
443 self.index = []
444 self._shallowroot = shallowroot
444 self._shallowroot = shallowroot
445 self._parentdelta = 0
445 self._parentdelta = 0
446
446
447 v = REVLOG_DEFAULT_VERSION
447 v = REVLOG_DEFAULT_VERSION
448 if hasattr(opener, 'options') and 'defversion' in opener.options:
448 if hasattr(opener, 'options') and 'defversion' in opener.options:
449 v = opener.options['defversion']
449 v = opener.options['defversion']
450 if v & REVLOGNG:
450 if v & REVLOGNG:
451 v |= REVLOGNGINLINEDATA
451 v |= REVLOGNGINLINEDATA
452 if v & REVLOGNG and 'parentdelta' in opener.options:
452 if v & REVLOGNG and 'parentdelta' in opener.options:
453 self._parentdelta = 1
453 self._parentdelta = 1
454
454
455 if shallowroot:
455 if shallowroot:
456 v |= REVLOGSHALLOW
456 v |= REVLOGSHALLOW
457
457
458 i = ''
458 i = ''
459 try:
459 try:
460 f = self.opener(self.indexfile)
460 f = self.opener(self.indexfile)
461 if "nonlazy" in getattr(self.opener, 'options', {}):
461 if "nonlazy" in getattr(self.opener, 'options', {}):
462 i = f.read()
462 i = f.read()
463 else:
463 else:
464 i = f.read(_prereadsize)
464 i = f.read(_prereadsize)
465 if len(i) > 0:
465 if len(i) > 0:
466 v = struct.unpack(versionformat, i[:4])[0]
466 v = struct.unpack(versionformat, i[:4])[0]
467 except IOError, inst:
467 except IOError, inst:
468 if inst.errno != errno.ENOENT:
468 if inst.errno != errno.ENOENT:
469 raise
469 raise
470
470
471 self.version = v
471 self.version = v
472 self._inline = v & REVLOGNGINLINEDATA
472 self._inline = v & REVLOGNGINLINEDATA
473 self._shallow = v & REVLOGSHALLOW
473 self._shallow = v & REVLOGSHALLOW
474 flags = v & ~0xFFFF
474 flags = v & ~0xFFFF
475 fmt = v & 0xFFFF
475 fmt = v & 0xFFFF
476 if fmt == REVLOGV0 and flags:
476 if fmt == REVLOGV0 and flags:
477 raise RevlogError(_("index %s unknown flags %#04x for format v0")
477 raise RevlogError(_("index %s unknown flags %#04x for format v0")
478 % (self.indexfile, flags >> 16))
478 % (self.indexfile, flags >> 16))
479 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
479 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
480 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
480 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
481 % (self.indexfile, flags >> 16))
481 % (self.indexfile, flags >> 16))
482 elif fmt > REVLOGNG:
482 elif fmt > REVLOGNG:
483 raise RevlogError(_("index %s unknown format %d")
483 raise RevlogError(_("index %s unknown format %d")
484 % (self.indexfile, fmt))
484 % (self.indexfile, fmt))
485
485
486 self._io = revlogio()
486 self._io = revlogio()
487 if self.version == REVLOGV0:
487 if self.version == REVLOGV0:
488 self._io = revlogoldio()
488 self._io = revlogoldio()
489 if i:
489 if i:
490 try:
490 try:
491 d = self._io.parseindex(f, i, self._inline)
491 d = self._io.parseindex(f, i, self._inline)
492 except (ValueError, IndexError):
492 except (ValueError, IndexError):
493 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
493 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
494 self.index, self.nodemap, self._chunkcache = d
494 self.index, self.nodemap, self._chunkcache = d
495 if not self._chunkcache:
495 if not self._chunkcache:
496 self._chunkclear()
496 self._chunkclear()
497
497
498 # add the magic null revision at -1 (if it hasn't been done already)
498 # add the magic null revision at -1 (if it hasn't been done already)
499 if (self.index == [] or isinstance(self.index, lazyindex) or
499 if (self.index == [] or isinstance(self.index, lazyindex) or
500 self.index[-1][7] != nullid) :
500 self.index[-1][7] != nullid) :
501 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
501 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
502
502
503 def _loadindex(self, start, end):
503 def _loadindex(self, start, end):
504 """load a block of indexes all at once from the lazy parser"""
504 """load a block of indexes all at once from the lazy parser"""
505 if isinstance(self.index, lazyindex):
505 if isinstance(self.index, lazyindex):
506 self.index.p.loadindex(start, end)
506 self.index.p.loadindex(start, end)
507
507
508 def _loadindexmap(self):
508 def _loadindexmap(self):
509 """loads both the map and the index from the lazy parser"""
509 """loads both the map and the index from the lazy parser"""
510 if isinstance(self.index, lazyindex):
510 if isinstance(self.index, lazyindex):
511 p = self.index.p
511 p = self.index.p
512 p.loadindex()
512 p.loadindex()
513 self.nodemap = p.map
513 self.nodemap = p.map
514
514
515 def _loadmap(self):
515 def _loadmap(self):
516 """loads the map from the lazy parser"""
516 """loads the map from the lazy parser"""
517 if isinstance(self.nodemap, lazymap):
517 if isinstance(self.nodemap, lazymap):
518 self.nodemap.p.loadmap()
518 self.nodemap.p.loadmap()
519 self.nodemap = self.nodemap.p.map
519 self.nodemap = self.nodemap.p.map
520
520
521 def tip(self):
521 def tip(self):
522 return self.node(len(self.index) - 2)
522 return self.node(len(self.index) - 2)
523 def __len__(self):
523 def __len__(self):
524 return len(self.index) - 1
524 return len(self.index) - 1
525 def __iter__(self):
525 def __iter__(self):
526 for i in xrange(len(self)):
526 for i in xrange(len(self)):
527 yield i
527 yield i
528 def rev(self, node):
528 def rev(self, node):
529 try:
529 try:
530 return self.nodemap[node]
530 return self.nodemap[node]
531 except KeyError:
531 except KeyError:
532 raise LookupError(node, self.indexfile, _('no node'))
532 raise LookupError(node, self.indexfile, _('no node'))
533 def node(self, rev):
533 def node(self, rev):
534 return self.index[rev][7]
534 return self.index[rev][7]
535 def linkrev(self, rev):
535 def linkrev(self, rev):
536 return self.index[rev][4]
536 return self.index[rev][4]
537 def parents(self, node):
537 def parents(self, node):
538 i = self.index
538 i = self.index
539 d = i[self.rev(node)]
539 d = i[self.rev(node)]
540 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
540 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
541 def parentrevs(self, rev):
541 def parentrevs(self, rev):
542 return self.index[rev][5:7]
542 return self.index[rev][5:7]
543 def start(self, rev):
543 def start(self, rev):
544 return int(self.index[rev][0] >> 16)
544 return int(self.index[rev][0] >> 16)
545 def end(self, rev):
545 def end(self, rev):
546 return self.start(rev) + self.length(rev)
546 return self.start(rev) + self.length(rev)
547 def length(self, rev):
547 def length(self, rev):
548 return self.index[rev][1]
548 return self.index[rev][1]
549 def base(self, rev):
549 def base(self, rev):
550 return self.index[rev][3]
550 return self.index[rev][3]
551 def flags(self, rev):
551 def flags(self, rev):
552 return self.index[rev][0] & 0xFFFF
552 return self.index[rev][0] & 0xFFFF
553
553
554 def size(self, rev):
554 def size(self, rev):
555 """return the length of the uncompressed text for a given revision"""
555 """return the length of the uncompressed text for a given revision"""
556 l = self.index[rev][2]
556 l = self.index[rev][2]
557 if l >= 0:
557 if l >= 0:
558 return l
558 return l
559
559
560 t = self.revision(self.node(rev))
560 t = self.revision(self.node(rev))
561 return len(t)
561 return len(t)
562
562
563 def reachable(self, node, stop=None):
563 def reachable(self, node, stop=None):
564 """return the set of all nodes ancestral to a given node, including
564 """return the set of all nodes ancestral to a given node, including
565 the node itself, stopping when stop is matched"""
565 the node itself, stopping when stop is matched"""
566 reachable = set((node,))
566 reachable = set((node,))
567 visit = [node]
567 visit = [node]
568 if stop:
568 if stop:
569 stopn = self.rev(stop)
569 stopn = self.rev(stop)
570 else:
570 else:
571 stopn = 0
571 stopn = 0
572 while visit:
572 while visit:
573 n = visit.pop(0)
573 n = visit.pop(0)
574 if n == stop:
574 if n == stop:
575 continue
575 continue
576 if n == nullid:
576 if n == nullid:
577 continue
577 continue
578 for p in self.parents(n):
578 for p in self.parents(n):
579 if self.rev(p) < stopn:
579 if self.rev(p) < stopn:
580 continue
580 continue
581 if p not in reachable:
581 if p not in reachable:
582 reachable.add(p)
582 reachable.add(p)
583 visit.append(p)
583 visit.append(p)
584 return reachable
584 return reachable
585
585
586 def ancestors(self, *revs):
586 def ancestors(self, *revs):
587 """Generate the ancestors of 'revs' in reverse topological order.
587 """Generate the ancestors of 'revs' in reverse topological order.
588
588
589 Yield a sequence of revision numbers starting with the parents
589 Yield a sequence of revision numbers starting with the parents
590 of each revision in revs, i.e., each revision is *not* considered
590 of each revision in revs, i.e., each revision is *not* considered
591 an ancestor of itself. Results are in breadth-first order:
591 an ancestor of itself. Results are in breadth-first order:
592 parents of each rev in revs, then parents of those, etc. Result
592 parents of each rev in revs, then parents of those, etc. Result
593 does not include the null revision."""
593 does not include the null revision."""
594 visit = list(revs)
594 visit = list(revs)
595 seen = set([nullrev])
595 seen = set([nullrev])
596 while visit:
596 while visit:
597 for parent in self.parentrevs(visit.pop(0)):
597 for parent in self.parentrevs(visit.pop(0)):
598 if parent not in seen:
598 if parent not in seen:
599 visit.append(parent)
599 visit.append(parent)
600 seen.add(parent)
600 seen.add(parent)
601 yield parent
601 yield parent
602
602
603 def descendants(self, *revs):
603 def descendants(self, *revs):
604 """Generate the descendants of 'revs' in revision order.
604 """Generate the descendants of 'revs' in revision order.
605
605
606 Yield a sequence of revision numbers starting with a child of
606 Yield a sequence of revision numbers starting with a child of
607 some rev in revs, i.e., each revision is *not* considered a
607 some rev in revs, i.e., each revision is *not* considered a
608 descendant of itself. Results are ordered by revision number (a
608 descendant of itself. Results are ordered by revision number (a
609 topological sort)."""
609 topological sort)."""
610 seen = set(revs)
610 seen = set(revs)
611 for i in xrange(min(revs) + 1, len(self)):
611 for i in xrange(min(revs) + 1, len(self)):
612 for x in self.parentrevs(i):
612 for x in self.parentrevs(i):
613 if x != nullrev and x in seen:
613 if x != nullrev and x in seen:
614 seen.add(i)
614 seen.add(i)
615 yield i
615 yield i
616 break
616 break
617
617
618 def findmissing(self, common=None, heads=None):
618 def findmissing(self, common=None, heads=None):
619 """Return the ancestors of heads that are not ancestors of common.
619 """Return the ancestors of heads that are not ancestors of common.
620
620
621 More specifically, return a list of nodes N such that every N
621 More specifically, return a list of nodes N such that every N
622 satisfies the following constraints:
622 satisfies the following constraints:
623
623
624 1. N is an ancestor of some node in 'heads'
624 1. N is an ancestor of some node in 'heads'
625 2. N is not an ancestor of any node in 'common'
625 2. N is not an ancestor of any node in 'common'
626
626
627 The list is sorted by revision number, meaning it is
627 The list is sorted by revision number, meaning it is
628 topologically sorted.
628 topologically sorted.
629
629
630 'heads' and 'common' are both lists of node IDs. If heads is
630 'heads' and 'common' are both lists of node IDs. If heads is
631 not supplied, uses all of the revlog's heads. If common is not
631 not supplied, uses all of the revlog's heads. If common is not
632 supplied, uses nullid."""
632 supplied, uses nullid."""
633 if common is None:
633 if common is None:
634 common = [nullid]
634 common = [nullid]
635 if heads is None:
635 if heads is None:
636 heads = self.heads()
636 heads = self.heads()
637
637
638 common = [self.rev(n) for n in common]
638 common = [self.rev(n) for n in common]
639 heads = [self.rev(n) for n in heads]
639 heads = [self.rev(n) for n in heads]
640
640
641 # we want the ancestors, but inclusive
641 # we want the ancestors, but inclusive
642 has = set(self.ancestors(*common))
642 has = set(self.ancestors(*common))
643 has.add(nullrev)
643 has.add(nullrev)
644 has.update(common)
644 has.update(common)
645
645
646 # take all ancestors from heads that aren't in has
646 # take all ancestors from heads that aren't in has
647 missing = set()
647 missing = set()
648 visit = [r for r in heads if r not in has]
648 visit = [r for r in heads if r not in has]
649 while visit:
649 while visit:
650 r = visit.pop(0)
650 r = visit.pop(0)
651 if r in missing:
651 if r in missing:
652 continue
652 continue
653 else:
653 else:
654 missing.add(r)
654 missing.add(r)
655 for p in self.parentrevs(r):
655 for p in self.parentrevs(r):
656 if p not in has:
656 if p not in has:
657 visit.append(p)
657 visit.append(p)
658 missing = list(missing)
658 missing = list(missing)
659 missing.sort()
659 missing.sort()
660 return [self.node(r) for r in missing]
660 return [self.node(r) for r in missing]
661
661
662 def nodesbetween(self, roots=None, heads=None):
662 def nodesbetween(self, roots=None, heads=None):
663 """Return a topological path from 'roots' to 'heads'.
663 """Return a topological path from 'roots' to 'heads'.
664
664
665 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
665 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
666 topologically sorted list of all nodes N that satisfy both of
666 topologically sorted list of all nodes N that satisfy both of
667 these constraints:
667 these constraints:
668
668
669 1. N is a descendant of some node in 'roots'
669 1. N is a descendant of some node in 'roots'
670 2. N is an ancestor of some node in 'heads'
670 2. N is an ancestor of some node in 'heads'
671
671
672 Every node is considered to be both a descendant and an ancestor
672 Every node is considered to be both a descendant and an ancestor
673 of itself, so every reachable node in 'roots' and 'heads' will be
673 of itself, so every reachable node in 'roots' and 'heads' will be
674 included in 'nodes'.
674 included in 'nodes'.
675
675
676 'outroots' is the list of reachable nodes in 'roots', i.e., the
676 'outroots' is the list of reachable nodes in 'roots', i.e., the
677 subset of 'roots' that is returned in 'nodes'. Likewise,
677 subset of 'roots' that is returned in 'nodes'. Likewise,
678 'outheads' is the subset of 'heads' that is also in 'nodes'.
678 'outheads' is the subset of 'heads' that is also in 'nodes'.
679
679
680 'roots' and 'heads' are both lists of node IDs. If 'roots' is
680 'roots' and 'heads' are both lists of node IDs. If 'roots' is
681 unspecified, uses nullid as the only root. If 'heads' is
681 unspecified, uses nullid as the only root. If 'heads' is
682 unspecified, uses list of all of the revlog's heads."""
682 unspecified, uses list of all of the revlog's heads."""
683 nonodes = ([], [], [])
683 nonodes = ([], [], [])
684 if roots is not None:
684 if roots is not None:
685 roots = list(roots)
685 roots = list(roots)
686 if not roots:
686 if not roots:
687 return nonodes
687 return nonodes
688 lowestrev = min([self.rev(n) for n in roots])
688 lowestrev = min([self.rev(n) for n in roots])
689 else:
689 else:
690 roots = [nullid] # Everybody's a descendent of nullid
690 roots = [nullid] # Everybody's a descendent of nullid
691 lowestrev = nullrev
691 lowestrev = nullrev
692 if (lowestrev == nullrev) and (heads is None):
692 if (lowestrev == nullrev) and (heads is None):
693 # We want _all_ the nodes!
693 # We want _all_ the nodes!
694 return ([self.node(r) for r in self], [nullid], list(self.heads()))
694 return ([self.node(r) for r in self], [nullid], list(self.heads()))
695 if heads is None:
695 if heads is None:
696 # All nodes are ancestors, so the latest ancestor is the last
696 # All nodes are ancestors, so the latest ancestor is the last
697 # node.
697 # node.
698 highestrev = len(self) - 1
698 highestrev = len(self) - 1
699 # Set ancestors to None to signal that every node is an ancestor.
699 # Set ancestors to None to signal that every node is an ancestor.
700 ancestors = None
700 ancestors = None
701 # Set heads to an empty dictionary for later discovery of heads
701 # Set heads to an empty dictionary for later discovery of heads
702 heads = {}
702 heads = {}
703 else:
703 else:
704 heads = list(heads)
704 heads = list(heads)
705 if not heads:
705 if not heads:
706 return nonodes
706 return nonodes
707 ancestors = set()
707 ancestors = set()
708 # Turn heads into a dictionary so we can remove 'fake' heads.
708 # Turn heads into a dictionary so we can remove 'fake' heads.
709 # Also, later we will be using it to filter out the heads we can't
709 # Also, later we will be using it to filter out the heads we can't
710 # find from roots.
710 # find from roots.
711 heads = dict.fromkeys(heads, 0)
711 heads = dict.fromkeys(heads, 0)
712 # Start at the top and keep marking parents until we're done.
712 # Start at the top and keep marking parents until we're done.
713 nodestotag = set(heads)
713 nodestotag = set(heads)
714 # Remember where the top was so we can use it as a limit later.
714 # Remember where the top was so we can use it as a limit later.
715 highestrev = max([self.rev(n) for n in nodestotag])
715 highestrev = max([self.rev(n) for n in nodestotag])
716 while nodestotag:
716 while nodestotag:
717 # grab a node to tag
717 # grab a node to tag
718 n = nodestotag.pop()
718 n = nodestotag.pop()
719 # Never tag nullid
719 # Never tag nullid
720 if n == nullid:
720 if n == nullid:
721 continue
721 continue
722 # A node's revision number represents its place in a
722 # A node's revision number represents its place in a
723 # topologically sorted list of nodes.
723 # topologically sorted list of nodes.
724 r = self.rev(n)
724 r = self.rev(n)
725 if r >= lowestrev:
725 if r >= lowestrev:
726 if n not in ancestors:
726 if n not in ancestors:
727 # If we are possibly a descendent of one of the roots
727 # If we are possibly a descendent of one of the roots
728 # and we haven't already been marked as an ancestor
728 # and we haven't already been marked as an ancestor
729 ancestors.add(n) # Mark as ancestor
729 ancestors.add(n) # Mark as ancestor
730 # Add non-nullid parents to list of nodes to tag.
730 # Add non-nullid parents to list of nodes to tag.
731 nodestotag.update([p for p in self.parents(n) if
731 nodestotag.update([p for p in self.parents(n) if
732 p != nullid])
732 p != nullid])
733 elif n in heads: # We've seen it before, is it a fake head?
733 elif n in heads: # We've seen it before, is it a fake head?
734 # So it is, real heads should not be the ancestors of
734 # So it is, real heads should not be the ancestors of
735 # any other heads.
735 # any other heads.
736 heads.pop(n)
736 heads.pop(n)
737 if not ancestors:
737 if not ancestors:
738 return nonodes
738 return nonodes
739 # Now that we have our set of ancestors, we want to remove any
739 # Now that we have our set of ancestors, we want to remove any
740 # roots that are not ancestors.
740 # roots that are not ancestors.
741
741
742 # If one of the roots was nullid, everything is included anyway.
742 # If one of the roots was nullid, everything is included anyway.
743 if lowestrev > nullrev:
743 if lowestrev > nullrev:
744 # But, since we weren't, let's recompute the lowest rev to not
744 # But, since we weren't, let's recompute the lowest rev to not
745 # include roots that aren't ancestors.
745 # include roots that aren't ancestors.
746
746
747 # Filter out roots that aren't ancestors of heads
747 # Filter out roots that aren't ancestors of heads
748 roots = [n for n in roots if n in ancestors]
748 roots = [n for n in roots if n in ancestors]
749 # Recompute the lowest revision
749 # Recompute the lowest revision
750 if roots:
750 if roots:
751 lowestrev = min([self.rev(n) for n in roots])
751 lowestrev = min([self.rev(n) for n in roots])
752 else:
752 else:
753 # No more roots? Return empty list
753 # No more roots? Return empty list
754 return nonodes
754 return nonodes
755 else:
755 else:
756 # We are descending from nullid, and don't need to care about
756 # We are descending from nullid, and don't need to care about
757 # any other roots.
757 # any other roots.
758 lowestrev = nullrev
758 lowestrev = nullrev
759 roots = [nullid]
759 roots = [nullid]
760 # Transform our roots list into a set.
760 # Transform our roots list into a set.
761 descendents = set(roots)
761 descendents = set(roots)
762 # Also, keep the original roots so we can filter out roots that aren't
762 # Also, keep the original roots so we can filter out roots that aren't
763 # 'real' roots (i.e. are descended from other roots).
763 # 'real' roots (i.e. are descended from other roots).
764 roots = descendents.copy()
764 roots = descendents.copy()
765 # Our topologically sorted list of output nodes.
765 # Our topologically sorted list of output nodes.
766 orderedout = []
766 orderedout = []
767 # Don't start at nullid since we don't want nullid in our output list,
767 # Don't start at nullid since we don't want nullid in our output list,
768 # and if nullid shows up in descedents, empty parents will look like
768 # and if nullid shows up in descedents, empty parents will look like
769 # they're descendents.
769 # they're descendents.
770 for r in xrange(max(lowestrev, 0), highestrev + 1):
770 for r in xrange(max(lowestrev, 0), highestrev + 1):
771 n = self.node(r)
771 n = self.node(r)
772 isdescendent = False
772 isdescendent = False
773 if lowestrev == nullrev: # Everybody is a descendent of nullid
773 if lowestrev == nullrev: # Everybody is a descendent of nullid
774 isdescendent = True
774 isdescendent = True
775 elif n in descendents:
775 elif n in descendents:
776 # n is already a descendent
776 # n is already a descendent
777 isdescendent = True
777 isdescendent = True
778 # This check only needs to be done here because all the roots
778 # This check only needs to be done here because all the roots
779 # will start being marked is descendents before the loop.
779 # will start being marked is descendents before the loop.
780 if n in roots:
780 if n in roots:
781 # If n was a root, check if it's a 'real' root.
781 # If n was a root, check if it's a 'real' root.
782 p = tuple(self.parents(n))
782 p = tuple(self.parents(n))
783 # If any of its parents are descendents, it's not a root.
783 # If any of its parents are descendents, it's not a root.
784 if (p[0] in descendents) or (p[1] in descendents):
784 if (p[0] in descendents) or (p[1] in descendents):
785 roots.remove(n)
785 roots.remove(n)
786 else:
786 else:
787 p = tuple(self.parents(n))
787 p = tuple(self.parents(n))
788 # A node is a descendent if either of its parents are
788 # A node is a descendent if either of its parents are
789 # descendents. (We seeded the dependents list with the roots
789 # descendents. (We seeded the dependents list with the roots
790 # up there, remember?)
790 # up there, remember?)
791 if (p[0] in descendents) or (p[1] in descendents):
791 if (p[0] in descendents) or (p[1] in descendents):
792 descendents.add(n)
792 descendents.add(n)
793 isdescendent = True
793 isdescendent = True
794 if isdescendent and ((ancestors is None) or (n in ancestors)):
794 if isdescendent and ((ancestors is None) or (n in ancestors)):
795 # Only include nodes that are both descendents and ancestors.
795 # Only include nodes that are both descendents and ancestors.
796 orderedout.append(n)
796 orderedout.append(n)
797 if (ancestors is not None) and (n in heads):
797 if (ancestors is not None) and (n in heads):
798 # We're trying to figure out which heads are reachable
798 # We're trying to figure out which heads are reachable
799 # from roots.
799 # from roots.
800 # Mark this head as having been reached
800 # Mark this head as having been reached
801 heads[n] = 1
801 heads[n] = 1
802 elif ancestors is None:
802 elif ancestors is None:
803 # Otherwise, we're trying to discover the heads.
803 # Otherwise, we're trying to discover the heads.
804 # Assume this is a head because if it isn't, the next step
804 # Assume this is a head because if it isn't, the next step
805 # will eventually remove it.
805 # will eventually remove it.
806 heads[n] = 1
806 heads[n] = 1
807 # But, obviously its parents aren't.
807 # But, obviously its parents aren't.
808 for p in self.parents(n):
808 for p in self.parents(n):
809 heads.pop(p, None)
809 heads.pop(p, None)
810 heads = [n for n in heads.iterkeys() if heads[n] != 0]
810 heads = [n for n in heads.iterkeys() if heads[n] != 0]
811 roots = list(roots)
811 roots = list(roots)
812 assert orderedout
812 assert orderedout
813 assert roots
813 assert roots
814 assert heads
814 assert heads
815 return (orderedout, roots, heads)
815 return (orderedout, roots, heads)
816
816
817 def heads(self, start=None, stop=None):
817 def heads(self, start=None, stop=None):
818 """return the list of all nodes that have no children
818 """return the list of all nodes that have no children
819
819
820 if start is specified, only heads that are descendants of
820 if start is specified, only heads that are descendants of
821 start will be returned
821 start will be returned
822 if stop is specified, it will consider all the revs from stop
822 if stop is specified, it will consider all the revs from stop
823 as if they had no children
823 as if they had no children
824 """
824 """
825 if start is None and stop is None:
825 if start is None and stop is None:
826 count = len(self)
826 count = len(self)
827 if not count:
827 if not count:
828 return [nullid]
828 return [nullid]
829 ishead = [1] * (count + 1)
829 ishead = [1] * (count + 1)
830 index = self.index
830 index = self.index
831 for r in xrange(count):
831 for r in xrange(count):
832 e = index[r]
832 e = index[r]
833 ishead[e[5]] = ishead[e[6]] = 0
833 ishead[e[5]] = ishead[e[6]] = 0
834 return [self.node(r) for r in xrange(count) if ishead[r]]
834 return [self.node(r) for r in xrange(count) if ishead[r]]
835
835
836 if start is None:
836 if start is None:
837 start = nullid
837 start = nullid
838 if stop is None:
838 if stop is None:
839 stop = []
839 stop = []
840 stoprevs = set([self.rev(n) for n in stop])
840 stoprevs = set([self.rev(n) for n in stop])
841 startrev = self.rev(start)
841 startrev = self.rev(start)
842 reachable = set((startrev,))
842 reachable = set((startrev,))
843 heads = set((startrev,))
843 heads = set((startrev,))
844
844
845 parentrevs = self.parentrevs
845 parentrevs = self.parentrevs
846 for r in xrange(startrev + 1, len(self)):
846 for r in xrange(startrev + 1, len(self)):
847 for p in parentrevs(r):
847 for p in parentrevs(r):
848 if p in reachable:
848 if p in reachable:
849 if r not in stoprevs:
849 if r not in stoprevs:
850 reachable.add(r)
850 reachable.add(r)
851 heads.add(r)
851 heads.add(r)
852 if p in heads and p not in stoprevs:
852 if p in heads and p not in stoprevs:
853 heads.remove(p)
853 heads.remove(p)
854
854
855 return [self.node(r) for r in heads]
855 return [self.node(r) for r in heads]
856
856
857 def children(self, node):
857 def children(self, node):
858 """find the children of a given node"""
858 """find the children of a given node"""
859 c = []
859 c = []
860 p = self.rev(node)
860 p = self.rev(node)
861 for r in range(p + 1, len(self)):
861 for r in range(p + 1, len(self)):
862 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
862 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
863 if prevs:
863 if prevs:
864 for pr in prevs:
864 for pr in prevs:
865 if pr == p:
865 if pr == p:
866 c.append(self.node(r))
866 c.append(self.node(r))
867 elif p == nullrev:
867 elif p == nullrev:
868 c.append(self.node(r))
868 c.append(self.node(r))
869 return c
869 return c
870
870
871 def descendant(self, start, end):
871 def descendant(self, start, end):
872 for i in self.descendants(start):
872 for i in self.descendants(start):
873 if i == end:
873 if i == end:
874 return True
874 return True
875 elif i > end:
875 elif i > end:
876 break
876 break
877 return False
877 return False
878
878
879 def ancestor(self, a, b):
879 def ancestor(self, a, b):
880 """calculate the least common ancestor of nodes a and b"""
880 """calculate the least common ancestor of nodes a and b"""
881
881
882 # fast path, check if it is a descendant
882 # fast path, check if it is a descendant
883 a, b = self.rev(a), self.rev(b)
883 a, b = self.rev(a), self.rev(b)
884 start, end = sorted((a, b))
884 start, end = sorted((a, b))
885 if self.descendant(start, end):
885 if self.descendant(start, end):
886 return self.node(start)
886 return self.node(start)
887
887
888 def parents(rev):
888 def parents(rev):
889 return [p for p in self.parentrevs(rev) if p != nullrev]
889 return [p for p in self.parentrevs(rev) if p != nullrev]
890
890
891 c = ancestor.ancestor(a, b, parents)
891 c = ancestor.ancestor(a, b, parents)
892 if c is None:
892 if c is None:
893 return nullid
893 return nullid
894
894
895 return self.node(c)
895 return self.node(c)
896
896
897 def _match(self, id):
897 def _match(self, id):
898 if isinstance(id, (long, int)):
898 if isinstance(id, (long, int)):
899 # rev
899 # rev
900 return self.node(id)
900 return self.node(id)
901 if len(id) == 20:
901 if len(id) == 20:
902 # possibly a binary node
902 # possibly a binary node
903 # odds of a binary node being all hex in ASCII are 1 in 10**25
903 # odds of a binary node being all hex in ASCII are 1 in 10**25
904 try:
904 try:
905 node = id
905 node = id
906 self.rev(node) # quick search the index
906 self.rev(node) # quick search the index
907 return node
907 return node
908 except LookupError:
908 except LookupError:
909 pass # may be partial hex id
909 pass # may be partial hex id
910 try:
910 try:
911 # str(rev)
911 # str(rev)
912 rev = int(id)
912 rev = int(id)
913 if str(rev) != id:
913 if str(rev) != id:
914 raise ValueError
914 raise ValueError
915 if rev < 0:
915 if rev < 0:
916 rev = len(self) + rev
916 rev = len(self) + rev
917 if rev < 0 or rev >= len(self):
917 if rev < 0 or rev >= len(self):
918 raise ValueError
918 raise ValueError
919 return self.node(rev)
919 return self.node(rev)
920 except (ValueError, OverflowError):
920 except (ValueError, OverflowError):
921 pass
921 pass
922 if len(id) == 40:
922 if len(id) == 40:
923 try:
923 try:
924 # a full hex nodeid?
924 # a full hex nodeid?
925 node = bin(id)
925 node = bin(id)
926 self.rev(node)
926 self.rev(node)
927 return node
927 return node
928 except (TypeError, LookupError):
928 except (TypeError, LookupError):
929 pass
929 pass
930
930
931 def _partialmatch(self, id):
931 def _partialmatch(self, id):
932 if len(id) < 40:
932 if len(id) < 40:
933 try:
933 try:
934 # hex(node)[:...]
934 # hex(node)[:...]
935 l = len(id) // 2 # grab an even number of digits
935 l = len(id) // 2 # grab an even number of digits
936 bin_id = bin(id[:l * 2])
936 bin_id = bin(id[:l * 2])
937 nl = [n for n in self.nodemap if n[:l] == bin_id]
937 nl = [n for n in self.nodemap if n[:l] == bin_id]
938 nl = [n for n in nl if hex(n).startswith(id)]
938 nl = [n for n in nl if hex(n).startswith(id)]
939 if len(nl) > 0:
939 if len(nl) > 0:
940 if len(nl) == 1:
940 if len(nl) == 1:
941 return nl[0]
941 return nl[0]
942 raise LookupError(id, self.indexfile,
942 raise LookupError(id, self.indexfile,
943 _('ambiguous identifier'))
943 _('ambiguous identifier'))
944 return None
944 return None
945 except TypeError:
945 except TypeError:
946 pass
946 pass
947
947
948 def lookup(self, id):
948 def lookup(self, id):
949 """locate a node based on:
949 """locate a node based on:
950 - revision number or str(revision number)
950 - revision number or str(revision number)
951 - nodeid or subset of hex nodeid
951 - nodeid or subset of hex nodeid
952 """
952 """
953 n = self._match(id)
953 n = self._match(id)
954 if n is not None:
954 if n is not None:
955 return n
955 return n
956 n = self._partialmatch(id)
956 n = self._partialmatch(id)
957 if n:
957 if n:
958 return n
958 return n
959
959
960 raise LookupError(id, self.indexfile, _('no match found'))
960 raise LookupError(id, self.indexfile, _('no match found'))
961
961
962 def cmp(self, node, text):
962 def cmp(self, node, text):
963 """compare text with a given file revision
963 """compare text with a given file revision
964
964
965 returns True if text is different than what is stored.
965 returns True if text is different than what is stored.
966 """
966 """
967 p1, p2 = self.parents(node)
967 p1, p2 = self.parents(node)
968 return hash(text, p1, p2) != node
968 return hash(text, p1, p2) != node
969
969
970 def _addchunk(self, offset, data):
970 def _addchunk(self, offset, data):
971 o, d = self._chunkcache
971 o, d = self._chunkcache
972 # try to add to existing cache
972 # try to add to existing cache
973 if o + len(d) == offset and len(d) + len(data) < _prereadsize:
973 if o + len(d) == offset and len(d) + len(data) < _prereadsize:
974 self._chunkcache = o, d + data
974 self._chunkcache = o, d + data
975 else:
975 else:
976 self._chunkcache = offset, data
976 self._chunkcache = offset, data
977
977
978 def _loadchunk(self, offset, length):
978 def _loadchunk(self, offset, length):
979 if self._inline:
979 if self._inline:
980 df = self.opener(self.indexfile)
980 df = self.opener(self.indexfile)
981 else:
981 else:
982 df = self.opener(self.datafile)
982 df = self.opener(self.datafile)
983
983
984 readahead = max(65536, length)
984 readahead = max(65536, length)
985 df.seek(offset)
985 df.seek(offset)
986 d = df.read(readahead)
986 d = df.read(readahead)
987 self._addchunk(offset, d)
987 self._addchunk(offset, d)
988 if readahead > length:
988 if readahead > length:
989 return d[:length]
989 return d[:length]
990 return d
990 return d
991
991
992 def _getchunk(self, offset, length):
992 def _getchunk(self, offset, length):
993 o, d = self._chunkcache
993 o, d = self._chunkcache
994 l = len(d)
994 l = len(d)
995
995
996 # is it in the cache?
996 # is it in the cache?
997 cachestart = offset - o
997 cachestart = offset - o
998 cacheend = cachestart + length
998 cacheend = cachestart + length
999 if cachestart >= 0 and cacheend <= l:
999 if cachestart >= 0 and cacheend <= l:
1000 if cachestart == 0 and cacheend == l:
1000 if cachestart == 0 and cacheend == l:
1001 return d # avoid a copy
1001 return d # avoid a copy
1002 return d[cachestart:cacheend]
1002 return d[cachestart:cacheend]
1003
1003
1004 return self._loadchunk(offset, length)
1004 return self._loadchunk(offset, length)
1005
1005
1006 def _chunkraw(self, startrev, endrev):
1006 def _chunkraw(self, startrev, endrev):
1007 start = self.start(startrev)
1007 start = self.start(startrev)
1008 length = self.end(endrev) - start
1008 length = self.end(endrev) - start
1009 if self._inline:
1009 if self._inline:
1010 start += (startrev + 1) * self._io.size
1010 start += (startrev + 1) * self._io.size
1011 return self._getchunk(start, length)
1011 return self._getchunk(start, length)
1012
1012
1013 def _chunk(self, rev):
1013 def _chunk(self, rev):
1014 return decompress(self._chunkraw(rev, rev))
1014 return decompress(self._chunkraw(rev, rev))
1015
1015
1016 def _chunkclear(self):
1016 def _chunkclear(self):
1017 self._chunkcache = (0, '')
1017 self._chunkcache = (0, '')
1018
1018
1019 def deltaparent(self, rev):
1019 def deltaparent(self, rev):
1020 """return previous revision or parentrev according to flags"""
1020 """return previous revision or parentrev according to flags"""
1021 if self.base(rev) == rev:
1021 if self.base(rev) == rev:
1022 return nullrev
1022 return nullrev
1023 elif self.flags(rev) & REVIDX_PARENTDELTA:
1023 elif self.flags(rev) & REVIDX_PARENTDELTA:
1024 return self.parentrevs(rev)[0]
1024 return self.parentrevs(rev)[0]
1025 else:
1025 else:
1026 return rev - 1
1026 return rev - 1
1027
1027
1028
1028
1029 def deltachain(self, rev, cache):
1029 def deltachain(self, rev, cache):
1030 """return chain of revisions to construct a given revision"""
1030 """return chain of revisions to construct a given revision"""
1031 chain = []
1031 chain = []
1032 check = False
1032 check = False
1033 while self.base(rev) != rev and rev != cache:
1033 while self.base(rev) != rev and rev != cache:
1034 chain.append(rev)
1034 chain.append(rev)
1035 rev = self.deltaparent(rev)
1035 rev = self.deltaparent(rev)
1036 chain.reverse()
1036 chain.reverse()
1037 if rev == cache:
1037 if rev == cache:
1038 check = True
1038 check = True
1039 return check, rev, chain
1039 return check, rev, chain
1040
1040
1041 def revdiff(self, rev1, rev2):
1041 def revdiff(self, rev1, rev2):
1042 """return or calculate a delta between two revisions"""
1042 """return or calculate a delta between two revisions"""
1043 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1043 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1044 return self._chunk(rev2)
1044 return self._chunk(rev2)
1045
1045
1046 return mdiff.textdiff(self.revision(self.node(rev1)),
1046 return mdiff.textdiff(self.revision(self.node(rev1)),
1047 self.revision(self.node(rev2)))
1047 self.revision(self.node(rev2)))
1048
1048
1049 def revision(self, node):
1049 def revision(self, node):
1050 """return an uncompressed revision of a given node"""
1050 """return an uncompressed revision of a given node"""
1051 cache = nullrev
1051 cache = nullrev
1052 if node == nullid:
1052 if node == nullid:
1053 return ""
1053 return ""
1054 if self._cache:
1054 if self._cache:
1055 cache = self._cache[1]
1055 cache = self._cache[1]
1056 if self._cache[0] == node:
1056 if self._cache[0] == node:
1057 return self._cache[2]
1057 return self._cache[2]
1058
1058
1059 # look up what we need to read
1059 # look up what we need to read
1060 text = None
1060 text = None
1061 rev = self.rev(node)
1061 rev = self.rev(node)
1062 cache, base, chain = self.deltachain(rev, cache)
1062 cache, base, chain = self.deltachain(rev, cache)
1063
1063
1064 # check rev flags
1064 # check rev flags
1065 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
1065 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
1066 raise RevlogError(_('incompatible revision flag %x') %
1066 raise RevlogError(_('incompatible revision flag %x') %
1067 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
1067 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
1068
1068
1069 # do we have useful data cached?
1069 # do we have useful data cached?
1070 if cache and self._cache:
1070 if cache and self._cache:
1071 text = self._cache[2]
1071 text = self._cache[2]
1072
1072
1073 # drop cache to save memory
1073 # drop cache to save memory
1074 self._cache = None
1074 self._cache = None
1075
1075
1076 self._loadindex(base, rev + 1)
1076 self._loadindex(base, rev + 1)
1077 self._chunkraw(base, rev)
1077 self._chunkraw(base, rev)
1078 if text is None:
1078 if text is None:
1079 text = self._chunk(base)
1079 text = self._chunk(base)
1080
1080
1081 bins = [self._chunk(r) for r in chain]
1081 bins = [self._chunk(r) for r in chain]
1082 text = mdiff.patches(text, bins)
1082 text = mdiff.patches(text, bins)
1083 p1, p2 = self.parents(node)
1083 p1, p2 = self.parents(node)
1084 if (node != hash(text, p1, p2) and
1084 if (node != hash(text, p1, p2) and
1085 not (self.flags(rev) & REVIDX_PUNCHED_FLAG)):
1085 not (self.flags(rev) & REVIDX_PUNCHED_FLAG)):
1086 raise RevlogError(_("integrity check failed on %s:%d")
1086 raise RevlogError(_("integrity check failed on %s:%d")
1087 % (self.indexfile, rev))
1087 % (self.indexfile, rev))
1088
1088
1089 self._cache = (node, rev, text)
1089 self._cache = (node, rev, text)
1090 return text
1090 return text
1091
1091
1092 def checkinlinesize(self, tr, fp=None):
1092 def checkinlinesize(self, tr, fp=None):
1093 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1093 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1094 return
1094 return
1095
1095
1096 trinfo = tr.find(self.indexfile)
1096 trinfo = tr.find(self.indexfile)
1097 if trinfo is None:
1097 if trinfo is None:
1098 raise RevlogError(_("%s not found in the transaction")
1098 raise RevlogError(_("%s not found in the transaction")
1099 % self.indexfile)
1099 % self.indexfile)
1100
1100
1101 trindex = trinfo[2]
1101 trindex = trinfo[2]
1102 dataoff = self.start(trindex)
1102 dataoff = self.start(trindex)
1103
1103
1104 tr.add(self.datafile, dataoff)
1104 tr.add(self.datafile, dataoff)
1105
1105
1106 if fp:
1106 if fp:
1107 fp.flush()
1107 fp.flush()
1108 fp.close()
1108 fp.close()
1109
1109
1110 df = self.opener(self.datafile, 'w')
1110 df = self.opener(self.datafile, 'w')
1111 try:
1111 try:
1112 for r in self:
1112 for r in self:
1113 df.write(self._chunkraw(r, r))
1113 df.write(self._chunkraw(r, r))
1114 finally:
1114 finally:
1115 df.close()
1115 df.close()
1116
1116
1117 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1117 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1118 self.version &= ~(REVLOGNGINLINEDATA)
1118 self.version &= ~(REVLOGNGINLINEDATA)
1119 self._inline = False
1119 self._inline = False
1120 for i in self:
1120 for i in self:
1121 e = self._io.packentry(self.index[i], self.node, self.version, i)
1121 e = self._io.packentry(self.index[i], self.node, self.version, i)
1122 fp.write(e)
1122 fp.write(e)
1123
1123
1124 # if we don't call rename, the temp file will never replace the
1124 # if we don't call rename, the temp file will never replace the
1125 # real index
1125 # real index
1126 fp.rename()
1126 fp.rename()
1127
1127
1128 tr.replace(self.indexfile, trindex * self._io.size)
1128 tr.replace(self.indexfile, trindex * self._io.size)
1129 self._chunkclear()
1129 self._chunkclear()
1130
1130
1131 def addrevision(self, text, transaction, link, p1, p2, d=None):
1131 def addrevision(self, text, transaction, link, p1, p2, d=None):
1132 """add a revision to the log
1132 """add a revision to the log
1133
1133
1134 text - the revision data to add
1134 text - the revision data to add
1135 transaction - the transaction object used for rollback
1135 transaction - the transaction object used for rollback
1136 link - the linkrev data to add
1136 link - the linkrev data to add
1137 p1, p2 - the parent nodeids of the revision
1137 p1, p2 - the parent nodeids of the revision
1138 d - an optional precomputed delta
1138 d - an optional precomputed delta
1139 """
1139 """
1140 dfh = None
1140 dfh = None
1141 if not self._inline:
1141 if not self._inline:
1142 dfh = self.opener(self.datafile, "a")
1142 dfh = self.opener(self.datafile, "a")
1143 ifh = self.opener(self.indexfile, "a+")
1143 ifh = self.opener(self.indexfile, "a+")
1144 try:
1144 try:
1145 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1145 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1146 finally:
1146 finally:
1147 if dfh:
1147 if dfh:
1148 dfh.close()
1148 dfh.close()
1149 ifh.close()
1149 ifh.close()
1150
1150
1151 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1151 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1152 node = hash(text, p1, p2)
1152 node = hash(text, p1, p2)
1153 if node in self.nodemap:
1153 if node in self.nodemap:
1154 return node
1154 return node
1155
1155
1156 curr = len(self)
1156 curr = len(self)
1157 prev = curr - 1
1157 prev = curr - 1
1158 base = self.base(prev)
1158 base = self.base(prev)
1159 offset = self.end(prev)
1159 offset = self.end(prev)
1160 flags = 0
1160
1161
1161 if curr:
1162 if curr:
1162 if not d:
1163 if not d:
1163 ptext = self.revision(self.node(prev))
1164 if self._parentdelta:
1165 ptext = self.revision(p1)
1166 flags = REVIDX_PARENTDELTA
1167 else:
1168 ptext = self.revision(self.node(prev))
1164 d = mdiff.textdiff(ptext, text)
1169 d = mdiff.textdiff(ptext, text)
1165 data = compress(d)
1170 data = compress(d)
1166 l = len(data[1]) + len(data[0])
1171 l = len(data[1]) + len(data[0])
1167 dist = l + offset - self.start(base)
1172 dist = l + offset - self.start(base)
1168
1173
1169 # full versions are inserted when the needed deltas
1174 # full versions are inserted when the needed deltas
1170 # become comparable to the uncompressed text
1175 # become comparable to the uncompressed text
1171 # or the base revision is punched
1176 # or the base revision is punched
1172 if (not curr or dist > len(text) * 2 or
1177 if (not curr or dist > len(text) * 2 or
1173 (self.flags(base) & REVIDX_PUNCHED_FLAG)):
1178 (self.flags(base) & REVIDX_PUNCHED_FLAG)):
1174 data = compress(text)
1179 data = compress(text)
1175 l = len(data[1]) + len(data[0])
1180 l = len(data[1]) + len(data[0])
1176 base = curr
1181 base = curr
1177
1182
1178 e = (offset_type(offset, 0), l, len(text),
1183 e = (offset_type(offset, flags), l, len(text),
1179 base, link, self.rev(p1), self.rev(p2), node)
1184 base, link, self.rev(p1), self.rev(p2), node)
1180 self.index.insert(-1, e)
1185 self.index.insert(-1, e)
1181 self.nodemap[node] = curr
1186 self.nodemap[node] = curr
1182
1187
1183 entry = self._io.packentry(e, self.node, self.version, curr)
1188 entry = self._io.packentry(e, self.node, self.version, curr)
1184 if not self._inline:
1189 if not self._inline:
1185 transaction.add(self.datafile, offset)
1190 transaction.add(self.datafile, offset)
1186 transaction.add(self.indexfile, curr * len(entry))
1191 transaction.add(self.indexfile, curr * len(entry))
1187 if data[0]:
1192 if data[0]:
1188 dfh.write(data[0])
1193 dfh.write(data[0])
1189 dfh.write(data[1])
1194 dfh.write(data[1])
1190 dfh.flush()
1195 dfh.flush()
1191 ifh.write(entry)
1196 ifh.write(entry)
1192 else:
1197 else:
1193 offset += curr * self._io.size
1198 offset += curr * self._io.size
1194 transaction.add(self.indexfile, offset, curr)
1199 transaction.add(self.indexfile, offset, curr)
1195 ifh.write(entry)
1200 ifh.write(entry)
1196 ifh.write(data[0])
1201 ifh.write(data[0])
1197 ifh.write(data[1])
1202 ifh.write(data[1])
1198 self.checkinlinesize(transaction, ifh)
1203 self.checkinlinesize(transaction, ifh)
1199
1204
1200 if type(text) == str: # only accept immutable objects
1205 if type(text) == str: # only accept immutable objects
1201 self._cache = (node, curr, text)
1206 self._cache = (node, curr, text)
1202 return node
1207 return node
1203
1208
1204 def group(self, nodelist, lookup, infocollect=None):
1209 def group(self, nodelist, lookup, infocollect=None):
1205 """Calculate a delta group, yielding a sequence of changegroup chunks
1210 """Calculate a delta group, yielding a sequence of changegroup chunks
1206 (strings).
1211 (strings).
1207
1212
1208 Given a list of changeset revs, return a set of deltas and
1213 Given a list of changeset revs, return a set of deltas and
1209 metadata corresponding to nodes. the first delta is
1214 metadata corresponding to nodes. the first delta is
1210 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1215 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1211 have this parent as it has all history before these
1216 have this parent as it has all history before these
1212 changesets. parent is parent[0]
1217 changesets. parent is parent[0]
1213 """
1218 """
1214
1219
1215 revs = [self.rev(n) for n in nodelist]
1220 revs = [self.rev(n) for n in nodelist]
1216
1221
1217 # if we don't have any revisions touched by these changesets, bail
1222 # if we don't have any revisions touched by these changesets, bail
1218 if not revs:
1223 if not revs:
1219 yield changegroup.closechunk()
1224 yield changegroup.closechunk()
1220 return
1225 return
1221
1226
1222 # add the parent of the first rev
1227 # add the parent of the first rev
1223 p = self.parentrevs(revs[0])[0]
1228 p = self.parentrevs(revs[0])[0]
1224 revs.insert(0, p)
1229 revs.insert(0, p)
1225
1230
1226 # build deltas
1231 # build deltas
1227 for d in xrange(len(revs) - 1):
1232 for d in xrange(len(revs) - 1):
1228 a, b = revs[d], revs[d + 1]
1233 a, b = revs[d], revs[d + 1]
1229 nb = self.node(b)
1234 nb = self.node(b)
1230
1235
1231 if infocollect is not None:
1236 if infocollect is not None:
1232 infocollect(nb)
1237 infocollect(nb)
1233
1238
1234 p = self.parents(nb)
1239 p = self.parents(nb)
1235 meta = nb + p[0] + p[1] + lookup(nb)
1240 meta = nb + p[0] + p[1] + lookup(nb)
1236 if a == -1:
1241 if a == -1:
1237 d = self.revision(nb)
1242 d = self.revision(nb)
1238 meta += mdiff.trivialdiffheader(len(d))
1243 meta += mdiff.trivialdiffheader(len(d))
1239 else:
1244 else:
1240 d = self.revdiff(a, b)
1245 d = self.revdiff(a, b)
1241 yield changegroup.chunkheader(len(meta) + len(d))
1246 yield changegroup.chunkheader(len(meta) + len(d))
1242 yield meta
1247 yield meta
1243 yield d
1248 yield d
1244
1249
1245 yield changegroup.closechunk()
1250 yield changegroup.closechunk()
1246
1251
1247 def addgroup(self, revs, linkmapper, transaction):
1252 def addgroup(self, revs, linkmapper, transaction):
1248 """
1253 """
1249 add a delta group
1254 add a delta group
1250
1255
1251 given a set of deltas, add them to the revision log. the
1256 given a set of deltas, add them to the revision log. the
1252 first delta is against its parent, which should be in our
1257 first delta is against its parent, which should be in our
1253 log, the rest are against the previous delta.
1258 log, the rest are against the previous delta.
1254 """
1259 """
1255
1260
1256 #track the base of the current delta log
1261 #track the base of the current delta log
1257 r = len(self)
1262 r = len(self)
1258 t = r - 1
1263 t = r - 1
1259 node = None
1264 node = None
1260
1265
1261 base = prev = nullrev
1266 base = prev = nullrev
1262 start = end = textlen = 0
1267 start = end = textlen = 0
1263 if r:
1268 if r:
1264 end = self.end(t)
1269 end = self.end(t)
1265
1270
1266 ifh = self.opener(self.indexfile, "a+")
1271 ifh = self.opener(self.indexfile, "a+")
1267 isize = r * self._io.size
1272 isize = r * self._io.size
1268 if self._inline:
1273 if self._inline:
1269 transaction.add(self.indexfile, end + isize, r)
1274 transaction.add(self.indexfile, end + isize, r)
1270 dfh = None
1275 dfh = None
1271 else:
1276 else:
1272 transaction.add(self.indexfile, isize, r)
1277 transaction.add(self.indexfile, isize, r)
1273 transaction.add(self.datafile, end)
1278 transaction.add(self.datafile, end)
1274 dfh = self.opener(self.datafile, "a")
1279 dfh = self.opener(self.datafile, "a")
1275
1280
1276 try:
1281 try:
1277 # loop through our set of deltas
1282 # loop through our set of deltas
1278 chain = None
1283 chain = None
1279 for chunk in revs:
1284 for chunk in revs:
1280 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1285 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1281 link = linkmapper(cs)
1286 link = linkmapper(cs)
1282 if node in self.nodemap:
1287 if node in self.nodemap:
1283 # this can happen if two branches make the same change
1288 # this can happen if two branches make the same change
1284 chain = node
1289 chain = node
1285 continue
1290 continue
1286 delta = buffer(chunk, 80)
1291 delta = buffer(chunk, 80)
1287 del chunk
1292 del chunk
1288
1293
1289 for p in (p1, p2):
1294 for p in (p1, p2):
1290 if not p in self.nodemap:
1295 if not p in self.nodemap:
1291 raise LookupError(p, self.indexfile, _('unknown parent'))
1296 raise LookupError(p, self.indexfile, _('unknown parent'))
1292
1297
1293 if not chain:
1298 if not chain:
1294 # retrieve the parent revision of the delta chain
1299 # retrieve the parent revision of the delta chain
1295 chain = p1
1300 chain = p1
1296 if not chain in self.nodemap:
1301 if not chain in self.nodemap:
1297 raise LookupError(chain, self.indexfile, _('unknown base'))
1302 raise LookupError(chain, self.indexfile, _('unknown base'))
1298
1303
1299 # full versions are inserted when the needed deltas become
1304 # full versions are inserted when the needed deltas become
1300 # comparable to the uncompressed text or when the previous
1305 # comparable to the uncompressed text or when the previous
1301 # version is not the one we have a delta against. We use
1306 # version is not the one we have a delta against. We use
1302 # the size of the previous full rev as a proxy for the
1307 # the size of the previous full rev as a proxy for the
1303 # current size.
1308 # current size.
1304
1309
1305 if chain == prev:
1310 if chain == prev:
1306 cdelta = compress(delta)
1311 cdelta = compress(delta)
1307 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1312 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1308 textlen = mdiff.patchedsize(textlen, delta)
1313 textlen = mdiff.patchedsize(textlen, delta)
1309
1314
1310 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1315 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1311 # flush our writes here so we can read it in revision
1316 # flush our writes here so we can read it in revision
1312 if dfh:
1317 if dfh:
1313 dfh.flush()
1318 dfh.flush()
1314 ifh.flush()
1319 ifh.flush()
1315 text = self.revision(chain)
1320 text = self.revision(chain)
1316 if len(text) == 0:
1321 if len(text) == 0:
1317 # skip over trivial delta header
1322 # skip over trivial delta header
1318 text = buffer(delta, 12)
1323 text = buffer(delta, 12)
1319 else:
1324 else:
1320 text = mdiff.patches(text, [delta])
1325 text = mdiff.patches(text, [delta])
1321 del delta
1326 del delta
1322 chk = self._addrevision(text, transaction, link, p1, p2, None,
1327 chk = self._addrevision(text, transaction, link, p1, p2, None,
1323 ifh, dfh)
1328 ifh, dfh)
1324 if not dfh and not self._inline:
1329 if not dfh and not self._inline:
1325 # addrevision switched from inline to conventional
1330 # addrevision switched from inline to conventional
1326 # reopen the index
1331 # reopen the index
1327 dfh = self.opener(self.datafile, "a")
1332 dfh = self.opener(self.datafile, "a")
1328 ifh = self.opener(self.indexfile, "a")
1333 ifh = self.opener(self.indexfile, "a")
1329 if chk != node:
1334 if chk != node:
1330 raise RevlogError(_("consistency error adding group"))
1335 raise RevlogError(_("consistency error adding group"))
1331 textlen = len(text)
1336 textlen = len(text)
1332 else:
1337 else:
1333 e = (offset_type(end, 0), cdeltalen, textlen, base,
1338 e = (offset_type(end, 0), cdeltalen, textlen, base,
1334 link, self.rev(p1), self.rev(p2), node)
1339 link, self.rev(p1), self.rev(p2), node)
1335 self.index.insert(-1, e)
1340 self.index.insert(-1, e)
1336 self.nodemap[node] = r
1341 self.nodemap[node] = r
1337 entry = self._io.packentry(e, self.node, self.version, r)
1342 entry = self._io.packentry(e, self.node, self.version, r)
1338 if self._inline:
1343 if self._inline:
1339 ifh.write(entry)
1344 ifh.write(entry)
1340 ifh.write(cdelta[0])
1345 ifh.write(cdelta[0])
1341 ifh.write(cdelta[1])
1346 ifh.write(cdelta[1])
1342 self.checkinlinesize(transaction, ifh)
1347 self.checkinlinesize(transaction, ifh)
1343 if not self._inline:
1348 if not self._inline:
1344 dfh = self.opener(self.datafile, "a")
1349 dfh = self.opener(self.datafile, "a")
1345 ifh = self.opener(self.indexfile, "a")
1350 ifh = self.opener(self.indexfile, "a")
1346 else:
1351 else:
1347 dfh.write(cdelta[0])
1352 dfh.write(cdelta[0])
1348 dfh.write(cdelta[1])
1353 dfh.write(cdelta[1])
1349 ifh.write(entry)
1354 ifh.write(entry)
1350
1355
1351 t, r, chain, prev = r, r + 1, node, node
1356 t, r, chain, prev = r, r + 1, node, node
1352 base = self.base(t)
1357 base = self.base(t)
1353 start = self.start(base)
1358 start = self.start(base)
1354 end = self.end(t)
1359 end = self.end(t)
1355 finally:
1360 finally:
1356 if dfh:
1361 if dfh:
1357 dfh.close()
1362 dfh.close()
1358 ifh.close()
1363 ifh.close()
1359
1364
1360 return node
1365 return node
1361
1366
1362 def strip(self, minlink, transaction):
1367 def strip(self, minlink, transaction):
1363 """truncate the revlog on the first revision with a linkrev >= minlink
1368 """truncate the revlog on the first revision with a linkrev >= minlink
1364
1369
1365 This function is called when we're stripping revision minlink and
1370 This function is called when we're stripping revision minlink and
1366 its descendants from the repository.
1371 its descendants from the repository.
1367
1372
1368 We have to remove all revisions with linkrev >= minlink, because
1373 We have to remove all revisions with linkrev >= minlink, because
1369 the equivalent changelog revisions will be renumbered after the
1374 the equivalent changelog revisions will be renumbered after the
1370 strip.
1375 strip.
1371
1376
1372 So we truncate the revlog on the first of these revisions, and
1377 So we truncate the revlog on the first of these revisions, and
1373 trust that the caller has saved the revisions that shouldn't be
1378 trust that the caller has saved the revisions that shouldn't be
1374 removed and that it'll readd them after this truncation.
1379 removed and that it'll readd them after this truncation.
1375 """
1380 """
1376 if len(self) == 0:
1381 if len(self) == 0:
1377 return
1382 return
1378
1383
1379 if isinstance(self.index, lazyindex):
1384 if isinstance(self.index, lazyindex):
1380 self._loadindexmap()
1385 self._loadindexmap()
1381
1386
1382 for rev in self:
1387 for rev in self:
1383 if self.index[rev][4] >= minlink:
1388 if self.index[rev][4] >= minlink:
1384 break
1389 break
1385 else:
1390 else:
1386 return
1391 return
1387
1392
1388 # first truncate the files on disk
1393 # first truncate the files on disk
1389 end = self.start(rev)
1394 end = self.start(rev)
1390 if not self._inline:
1395 if not self._inline:
1391 transaction.add(self.datafile, end)
1396 transaction.add(self.datafile, end)
1392 end = rev * self._io.size
1397 end = rev * self._io.size
1393 else:
1398 else:
1394 end += rev * self._io.size
1399 end += rev * self._io.size
1395
1400
1396 transaction.add(self.indexfile, end)
1401 transaction.add(self.indexfile, end)
1397
1402
1398 # then reset internal state in memory to forget those revisions
1403 # then reset internal state in memory to forget those revisions
1399 self._cache = None
1404 self._cache = None
1400 self._chunkclear()
1405 self._chunkclear()
1401 for x in xrange(rev, len(self)):
1406 for x in xrange(rev, len(self)):
1402 del self.nodemap[self.node(x)]
1407 del self.nodemap[self.node(x)]
1403
1408
1404 del self.index[rev:-1]
1409 del self.index[rev:-1]
1405
1410
1406 def checksize(self):
1411 def checksize(self):
1407 expected = 0
1412 expected = 0
1408 if len(self):
1413 if len(self):
1409 expected = max(0, self.end(len(self) - 1))
1414 expected = max(0, self.end(len(self) - 1))
1410
1415
1411 try:
1416 try:
1412 f = self.opener(self.datafile)
1417 f = self.opener(self.datafile)
1413 f.seek(0, 2)
1418 f.seek(0, 2)
1414 actual = f.tell()
1419 actual = f.tell()
1415 dd = actual - expected
1420 dd = actual - expected
1416 except IOError, inst:
1421 except IOError, inst:
1417 if inst.errno != errno.ENOENT:
1422 if inst.errno != errno.ENOENT:
1418 raise
1423 raise
1419 dd = 0
1424 dd = 0
1420
1425
1421 try:
1426 try:
1422 f = self.opener(self.indexfile)
1427 f = self.opener(self.indexfile)
1423 f.seek(0, 2)
1428 f.seek(0, 2)
1424 actual = f.tell()
1429 actual = f.tell()
1425 s = self._io.size
1430 s = self._io.size
1426 i = max(0, actual // s)
1431 i = max(0, actual // s)
1427 di = actual - (i * s)
1432 di = actual - (i * s)
1428 if self._inline:
1433 if self._inline:
1429 databytes = 0
1434 databytes = 0
1430 for r in self:
1435 for r in self:
1431 databytes += max(0, self.length(r))
1436 databytes += max(0, self.length(r))
1432 dd = 0
1437 dd = 0
1433 di = actual - len(self) * s - databytes
1438 di = actual - len(self) * s - databytes
1434 except IOError, inst:
1439 except IOError, inst:
1435 if inst.errno != errno.ENOENT:
1440 if inst.errno != errno.ENOENT:
1436 raise
1441 raise
1437 di = 0
1442 di = 0
1438
1443
1439 return (dd, di)
1444 return (dd, di)
1440
1445
1441 def files(self):
1446 def files(self):
1442 res = [self.indexfile]
1447 res = [self.indexfile]
1443 if not self._inline:
1448 if not self._inline:
1444 res.append(self.datafile)
1449 res.append(self.datafile)
1445 return res
1450 return res
General Comments 0
You need to be logged in to leave comments. Login now