##// END OF EJS Templates
revlog: deltachain() returns chain of revs need to construct a revision
Pradeepkumar Gayam -
r11929:1839a751 default
parent child Browse files
Show More
@@ -1,1421 +1,1443 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 # import stuff from node for others to import from revlog
14 # import stuff from node for others to import from revlog
15 from node import bin, hex, nullid, nullrev, short #@UnusedImport
15 from node import bin, hex, nullid, nullrev, short #@UnusedImport
16 from i18n import _
16 from i18n import _
17 import changegroup, ancestor, mdiff, parsers, error, util
17 import changegroup, ancestor, mdiff, parsers, error, util
18 import struct, zlib, errno
18 import struct, zlib, errno
19
19
20 _pack = struct.pack
20 _pack = struct.pack
21 _unpack = struct.unpack
21 _unpack = struct.unpack
22 _compress = zlib.compress
22 _compress = zlib.compress
23 _decompress = zlib.decompress
23 _decompress = zlib.decompress
24 _sha = util.sha1
24 _sha = util.sha1
25
25
26 # revlog header flags
26 # revlog header flags
27 REVLOGV0 = 0
27 REVLOGV0 = 0
28 REVLOGNG = 1
28 REVLOGNG = 1
29 REVLOGNGINLINEDATA = (1 << 16)
29 REVLOGNGINLINEDATA = (1 << 16)
30 REVLOGSHALLOW = (1 << 17)
30 REVLOGSHALLOW = (1 << 17)
31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
32 REVLOG_DEFAULT_FORMAT = REVLOGNG
32 REVLOG_DEFAULT_FORMAT = REVLOGNG
33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGSHALLOW
34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGSHALLOW
35
35
36 # revlog index flags
36 # revlog index flags
37 REVIDX_PARENTDELTA = 1
37 REVIDX_PARENTDELTA = 1
38 REVIDX_PUNCHED_FLAG = 2
38 REVIDX_PUNCHED_FLAG = 2
39 REVIDX_KNOWN_FLAGS = REVIDX_PUNCHED_FLAG | REVIDX_PARENTDELTA
39 REVIDX_KNOWN_FLAGS = REVIDX_PUNCHED_FLAG | REVIDX_PARENTDELTA
40
40
41 # amount of data read unconditionally, should be >= 4
41 # amount of data read unconditionally, should be >= 4
42 # when not inline: threshold for using lazy index
42 # when not inline: threshold for using lazy index
43 _prereadsize = 1048576
43 _prereadsize = 1048576
44 # max size of revlog with inline data
44 # max size of revlog with inline data
45 _maxinline = 131072
45 _maxinline = 131072
46
46
47 RevlogError = error.RevlogError
47 RevlogError = error.RevlogError
48 LookupError = error.LookupError
48 LookupError = error.LookupError
49
49
50 def getoffset(q):
50 def getoffset(q):
51 return int(q >> 16)
51 return int(q >> 16)
52
52
53 def gettype(q):
53 def gettype(q):
54 return int(q & 0xFFFF)
54 return int(q & 0xFFFF)
55
55
56 def offset_type(offset, type):
56 def offset_type(offset, type):
57 return long(long(offset) << 16 | type)
57 return long(long(offset) << 16 | type)
58
58
59 nullhash = _sha(nullid)
59 nullhash = _sha(nullid)
60
60
61 def hash(text, p1, p2):
61 def hash(text, p1, p2):
62 """generate a hash from the given text and its parent hashes
62 """generate a hash from the given text and its parent hashes
63
63
64 This hash combines both the current file contents and its history
64 This hash combines both the current file contents and its history
65 in a manner that makes it easy to distinguish nodes with the same
65 in a manner that makes it easy to distinguish nodes with the same
66 content in the revision graph.
66 content in the revision graph.
67 """
67 """
68 # As of now, if one of the parent node is null, p2 is null
68 # As of now, if one of the parent node is null, p2 is null
69 if p2 == nullid:
69 if p2 == nullid:
70 # deep copy of a hash is faster than creating one
70 # deep copy of a hash is faster than creating one
71 s = nullhash.copy()
71 s = nullhash.copy()
72 s.update(p1)
72 s.update(p1)
73 else:
73 else:
74 # none of the parent nodes are nullid
74 # none of the parent nodes are nullid
75 l = [p1, p2]
75 l = [p1, p2]
76 l.sort()
76 l.sort()
77 s = _sha(l[0])
77 s = _sha(l[0])
78 s.update(l[1])
78 s.update(l[1])
79 s.update(text)
79 s.update(text)
80 return s.digest()
80 return s.digest()
81
81
82 def compress(text):
82 def compress(text):
83 """ generate a possibly-compressed representation of text """
83 """ generate a possibly-compressed representation of text """
84 if not text:
84 if not text:
85 return ("", text)
85 return ("", text)
86 l = len(text)
86 l = len(text)
87 bin = None
87 bin = None
88 if l < 44:
88 if l < 44:
89 pass
89 pass
90 elif l > 1000000:
90 elif l > 1000000:
91 # zlib makes an internal copy, thus doubling memory usage for
91 # zlib makes an internal copy, thus doubling memory usage for
92 # large files, so lets do this in pieces
92 # large files, so lets do this in pieces
93 z = zlib.compressobj()
93 z = zlib.compressobj()
94 p = []
94 p = []
95 pos = 0
95 pos = 0
96 while pos < l:
96 while pos < l:
97 pos2 = pos + 2**20
97 pos2 = pos + 2**20
98 p.append(z.compress(text[pos:pos2]))
98 p.append(z.compress(text[pos:pos2]))
99 pos = pos2
99 pos = pos2
100 p.append(z.flush())
100 p.append(z.flush())
101 if sum(map(len, p)) < l:
101 if sum(map(len, p)) < l:
102 bin = "".join(p)
102 bin = "".join(p)
103 else:
103 else:
104 bin = _compress(text)
104 bin = _compress(text)
105 if bin is None or len(bin) > l:
105 if bin is None or len(bin) > l:
106 if text[0] == '\0':
106 if text[0] == '\0':
107 return ("", text)
107 return ("", text)
108 return ('u', text)
108 return ('u', text)
109 return ("", bin)
109 return ("", bin)
110
110
111 def decompress(bin):
111 def decompress(bin):
112 """ decompress the given input """
112 """ decompress the given input """
113 if not bin:
113 if not bin:
114 return bin
114 return bin
115 t = bin[0]
115 t = bin[0]
116 if t == '\0':
116 if t == '\0':
117 return bin
117 return bin
118 if t == 'x':
118 if t == 'x':
119 return _decompress(bin)
119 return _decompress(bin)
120 if t == 'u':
120 if t == 'u':
121 return bin[1:]
121 return bin[1:]
122 raise RevlogError(_("unknown compression type %r") % t)
122 raise RevlogError(_("unknown compression type %r") % t)
123
123
124 class lazyparser(object):
124 class lazyparser(object):
125 """
125 """
126 this class avoids the need to parse the entirety of large indices
126 this class avoids the need to parse the entirety of large indices
127 """
127 """
128
128
129 # lazyparser is not safe to use on windows if win32 extensions not
129 # lazyparser is not safe to use on windows if win32 extensions not
130 # available. it keeps file handle open, which make it not possible
130 # available. it keeps file handle open, which make it not possible
131 # to break hardlinks on local cloned repos.
131 # to break hardlinks on local cloned repos.
132
132
133 def __init__(self, dataf):
133 def __init__(self, dataf):
134 try:
134 try:
135 size = util.fstat(dataf).st_size
135 size = util.fstat(dataf).st_size
136 except AttributeError:
136 except AttributeError:
137 size = 0
137 size = 0
138 self.dataf = dataf
138 self.dataf = dataf
139 self.s = struct.calcsize(indexformatng)
139 self.s = struct.calcsize(indexformatng)
140 self.datasize = size
140 self.datasize = size
141 self.l = size // self.s
141 self.l = size // self.s
142 self.index = [None] * self.l
142 self.index = [None] * self.l
143 self.map = {nullid: nullrev}
143 self.map = {nullid: nullrev}
144 self.allmap = 0
144 self.allmap = 0
145 self.all = 0
145 self.all = 0
146 self.mapfind_count = 0
146 self.mapfind_count = 0
147
147
148 def loadmap(self):
148 def loadmap(self):
149 """
149 """
150 during a commit, we need to make sure the rev being added is
150 during a commit, we need to make sure the rev being added is
151 not a duplicate. This requires loading the entire index,
151 not a duplicate. This requires loading the entire index,
152 which is fairly slow. loadmap can load up just the node map,
152 which is fairly slow. loadmap can load up just the node map,
153 which takes much less time.
153 which takes much less time.
154 """
154 """
155 if self.allmap:
155 if self.allmap:
156 return
156 return
157 end = self.datasize
157 end = self.datasize
158 self.allmap = 1
158 self.allmap = 1
159 cur = 0
159 cur = 0
160 count = 0
160 count = 0
161 blocksize = self.s * 256
161 blocksize = self.s * 256
162 self.dataf.seek(0)
162 self.dataf.seek(0)
163 while cur < end:
163 while cur < end:
164 data = self.dataf.read(blocksize)
164 data = self.dataf.read(blocksize)
165 off = 0
165 off = 0
166 for x in xrange(256):
166 for x in xrange(256):
167 n = data[off + ngshaoffset:off + ngshaoffset + 20]
167 n = data[off + ngshaoffset:off + ngshaoffset + 20]
168 self.map[n] = count
168 self.map[n] = count
169 count += 1
169 count += 1
170 if count >= self.l:
170 if count >= self.l:
171 break
171 break
172 off += self.s
172 off += self.s
173 cur += blocksize
173 cur += blocksize
174
174
175 def loadblock(self, blockstart, blocksize, data=None):
175 def loadblock(self, blockstart, blocksize, data=None):
176 if self.all:
176 if self.all:
177 return
177 return
178 if data is None:
178 if data is None:
179 self.dataf.seek(blockstart)
179 self.dataf.seek(blockstart)
180 if blockstart + blocksize > self.datasize:
180 if blockstart + blocksize > self.datasize:
181 # the revlog may have grown since we've started running,
181 # the revlog may have grown since we've started running,
182 # but we don't have space in self.index for more entries.
182 # but we don't have space in self.index for more entries.
183 # limit blocksize so that we don't get too much data.
183 # limit blocksize so that we don't get too much data.
184 blocksize = max(self.datasize - blockstart, 0)
184 blocksize = max(self.datasize - blockstart, 0)
185 data = self.dataf.read(blocksize)
185 data = self.dataf.read(blocksize)
186 lend = len(data) // self.s
186 lend = len(data) // self.s
187 i = blockstart // self.s
187 i = blockstart // self.s
188 off = 0
188 off = 0
189 # lazyindex supports __delitem__
189 # lazyindex supports __delitem__
190 if lend > len(self.index) - i:
190 if lend > len(self.index) - i:
191 lend = len(self.index) - i
191 lend = len(self.index) - i
192 for x in xrange(lend):
192 for x in xrange(lend):
193 if self.index[i + x] is None:
193 if self.index[i + x] is None:
194 b = data[off : off + self.s]
194 b = data[off : off + self.s]
195 self.index[i + x] = b
195 self.index[i + x] = b
196 n = b[ngshaoffset:ngshaoffset + 20]
196 n = b[ngshaoffset:ngshaoffset + 20]
197 self.map[n] = i + x
197 self.map[n] = i + x
198 off += self.s
198 off += self.s
199
199
200 def findnode(self, node):
200 def findnode(self, node):
201 """search backwards through the index file for a specific node"""
201 """search backwards through the index file for a specific node"""
202 if self.allmap:
202 if self.allmap:
203 return None
203 return None
204
204
205 # hg log will cause many many searches for the manifest
205 # hg log will cause many many searches for the manifest
206 # nodes. After we get called a few times, just load the whole
206 # nodes. After we get called a few times, just load the whole
207 # thing.
207 # thing.
208 if self.mapfind_count > 8:
208 if self.mapfind_count > 8:
209 self.loadmap()
209 self.loadmap()
210 if node in self.map:
210 if node in self.map:
211 return node
211 return node
212 return None
212 return None
213 self.mapfind_count += 1
213 self.mapfind_count += 1
214 last = self.l - 1
214 last = self.l - 1
215 while self.index[last] != None:
215 while self.index[last] != None:
216 if last == 0:
216 if last == 0:
217 self.all = 1
217 self.all = 1
218 self.allmap = 1
218 self.allmap = 1
219 return None
219 return None
220 last -= 1
220 last -= 1
221 end = (last + 1) * self.s
221 end = (last + 1) * self.s
222 blocksize = self.s * 256
222 blocksize = self.s * 256
223 while end >= 0:
223 while end >= 0:
224 start = max(end - blocksize, 0)
224 start = max(end - blocksize, 0)
225 self.dataf.seek(start)
225 self.dataf.seek(start)
226 data = self.dataf.read(end - start)
226 data = self.dataf.read(end - start)
227 findend = end - start
227 findend = end - start
228 while True:
228 while True:
229 # we're searching backwards, so we have to make sure
229 # we're searching backwards, so we have to make sure
230 # we don't find a changeset where this node is a parent
230 # we don't find a changeset where this node is a parent
231 off = data.find(node, 0, findend)
231 off = data.find(node, 0, findend)
232 findend = off
232 findend = off
233 if off >= 0:
233 if off >= 0:
234 i = off / self.s
234 i = off / self.s
235 off = i * self.s
235 off = i * self.s
236 n = data[off + ngshaoffset:off + ngshaoffset + 20]
236 n = data[off + ngshaoffset:off + ngshaoffset + 20]
237 if n == node:
237 if n == node:
238 self.map[n] = i + start / self.s
238 self.map[n] = i + start / self.s
239 return node
239 return node
240 else:
240 else:
241 break
241 break
242 end -= blocksize
242 end -= blocksize
243 return None
243 return None
244
244
245 def loadindex(self, i=None, end=None):
245 def loadindex(self, i=None, end=None):
246 if self.all:
246 if self.all:
247 return
247 return
248 all = False
248 all = False
249 if i is None:
249 if i is None:
250 blockstart = 0
250 blockstart = 0
251 blocksize = (65536 / self.s) * self.s
251 blocksize = (65536 / self.s) * self.s
252 end = self.datasize
252 end = self.datasize
253 all = True
253 all = True
254 else:
254 else:
255 if end:
255 if end:
256 blockstart = i * self.s
256 blockstart = i * self.s
257 end = end * self.s
257 end = end * self.s
258 blocksize = end - blockstart
258 blocksize = end - blockstart
259 else:
259 else:
260 blockstart = (i & ~1023) * self.s
260 blockstart = (i & ~1023) * self.s
261 blocksize = self.s * 1024
261 blocksize = self.s * 1024
262 end = blockstart + blocksize
262 end = blockstart + blocksize
263 while blockstart < end:
263 while blockstart < end:
264 self.loadblock(blockstart, blocksize)
264 self.loadblock(blockstart, blocksize)
265 blockstart += blocksize
265 blockstart += blocksize
266 if all:
266 if all:
267 self.all = True
267 self.all = True
268
268
269 class lazyindex(object):
269 class lazyindex(object):
270 """a lazy version of the index array"""
270 """a lazy version of the index array"""
271 def __init__(self, parser):
271 def __init__(self, parser):
272 self.p = parser
272 self.p = parser
273 def __len__(self):
273 def __len__(self):
274 return len(self.p.index)
274 return len(self.p.index)
275 def load(self, pos):
275 def load(self, pos):
276 if pos < 0:
276 if pos < 0:
277 pos += len(self.p.index)
277 pos += len(self.p.index)
278 self.p.loadindex(pos)
278 self.p.loadindex(pos)
279 return self.p.index[pos]
279 return self.p.index[pos]
280 def __getitem__(self, pos):
280 def __getitem__(self, pos):
281 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
281 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
282 def __setitem__(self, pos, item):
282 def __setitem__(self, pos, item):
283 self.p.index[pos] = _pack(indexformatng, *item)
283 self.p.index[pos] = _pack(indexformatng, *item)
284 def __delitem__(self, pos):
284 def __delitem__(self, pos):
285 del self.p.index[pos]
285 del self.p.index[pos]
286 def insert(self, pos, e):
286 def insert(self, pos, e):
287 self.p.index.insert(pos, _pack(indexformatng, *e))
287 self.p.index.insert(pos, _pack(indexformatng, *e))
288 def append(self, e):
288 def append(self, e):
289 self.p.index.append(_pack(indexformatng, *e))
289 self.p.index.append(_pack(indexformatng, *e))
290
290
291 class lazymap(object):
291 class lazymap(object):
292 """a lazy version of the node map"""
292 """a lazy version of the node map"""
293 def __init__(self, parser):
293 def __init__(self, parser):
294 self.p = parser
294 self.p = parser
295 def load(self, key):
295 def load(self, key):
296 n = self.p.findnode(key)
296 n = self.p.findnode(key)
297 if n is None:
297 if n is None:
298 raise KeyError(key)
298 raise KeyError(key)
299 def __contains__(self, key):
299 def __contains__(self, key):
300 if key in self.p.map:
300 if key in self.p.map:
301 return True
301 return True
302 self.p.loadmap()
302 self.p.loadmap()
303 return key in self.p.map
303 return key in self.p.map
304 def __iter__(self):
304 def __iter__(self):
305 yield nullid
305 yield nullid
306 for i, ret in enumerate(self.p.index):
306 for i, ret in enumerate(self.p.index):
307 if not ret:
307 if not ret:
308 self.p.loadindex(i)
308 self.p.loadindex(i)
309 ret = self.p.index[i]
309 ret = self.p.index[i]
310 if isinstance(ret, str):
310 if isinstance(ret, str):
311 ret = _unpack(indexformatng, ret)
311 ret = _unpack(indexformatng, ret)
312 yield ret[7]
312 yield ret[7]
313 def __getitem__(self, key):
313 def __getitem__(self, key):
314 try:
314 try:
315 return self.p.map[key]
315 return self.p.map[key]
316 except KeyError:
316 except KeyError:
317 try:
317 try:
318 self.load(key)
318 self.load(key)
319 return self.p.map[key]
319 return self.p.map[key]
320 except KeyError:
320 except KeyError:
321 raise KeyError("node " + hex(key))
321 raise KeyError("node " + hex(key))
322 def __setitem__(self, key, val):
322 def __setitem__(self, key, val):
323 self.p.map[key] = val
323 self.p.map[key] = val
324 def __delitem__(self, key):
324 def __delitem__(self, key):
325 del self.p.map[key]
325 del self.p.map[key]
326
326
327 indexformatv0 = ">4l20s20s20s"
327 indexformatv0 = ">4l20s20s20s"
328 v0shaoffset = 56
328 v0shaoffset = 56
329
329
330 class revlogoldio(object):
330 class revlogoldio(object):
331 def __init__(self):
331 def __init__(self):
332 self.size = struct.calcsize(indexformatv0)
332 self.size = struct.calcsize(indexformatv0)
333
333
334 def parseindex(self, fp, data, inline):
334 def parseindex(self, fp, data, inline):
335 s = self.size
335 s = self.size
336 index = []
336 index = []
337 nodemap = {nullid: nullrev}
337 nodemap = {nullid: nullrev}
338 n = off = 0
338 n = off = 0
339 if len(data) == _prereadsize:
339 if len(data) == _prereadsize:
340 data += fp.read() # read the rest
340 data += fp.read() # read the rest
341 l = len(data)
341 l = len(data)
342 while off + s <= l:
342 while off + s <= l:
343 cur = data[off:off + s]
343 cur = data[off:off + s]
344 off += s
344 off += s
345 e = _unpack(indexformatv0, cur)
345 e = _unpack(indexformatv0, cur)
346 # transform to revlogv1 format
346 # transform to revlogv1 format
347 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
347 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
348 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
348 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
349 index.append(e2)
349 index.append(e2)
350 nodemap[e[6]] = n
350 nodemap[e[6]] = n
351 n += 1
351 n += 1
352
352
353 return index, nodemap, None
353 return index, nodemap, None
354
354
355 def packentry(self, entry, node, version, rev):
355 def packentry(self, entry, node, version, rev):
356 if gettype(entry[0]):
356 if gettype(entry[0]):
357 raise RevlogError(_("index entry flags need RevlogNG"))
357 raise RevlogError(_("index entry flags need RevlogNG"))
358 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
358 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
359 node(entry[5]), node(entry[6]), entry[7])
359 node(entry[5]), node(entry[6]), entry[7])
360 return _pack(indexformatv0, *e2)
360 return _pack(indexformatv0, *e2)
361
361
362 # index ng:
362 # index ng:
363 # 6 bytes: offset
363 # 6 bytes: offset
364 # 2 bytes: flags
364 # 2 bytes: flags
365 # 4 bytes: compressed length
365 # 4 bytes: compressed length
366 # 4 bytes: uncompressed length
366 # 4 bytes: uncompressed length
367 # 4 bytes: base rev
367 # 4 bytes: base rev
368 # 4 bytes: link rev
368 # 4 bytes: link rev
369 # 4 bytes: parent 1 rev
369 # 4 bytes: parent 1 rev
370 # 4 bytes: parent 2 rev
370 # 4 bytes: parent 2 rev
371 # 32 bytes: nodeid
371 # 32 bytes: nodeid
372 indexformatng = ">Qiiiiii20s12x"
372 indexformatng = ">Qiiiiii20s12x"
373 ngshaoffset = 32
373 ngshaoffset = 32
374 versionformat = ">I"
374 versionformat = ">I"
375
375
376 class revlogio(object):
376 class revlogio(object):
377 def __init__(self):
377 def __init__(self):
378 self.size = struct.calcsize(indexformatng)
378 self.size = struct.calcsize(indexformatng)
379
379
380 def parseindex(self, fp, data, inline):
380 def parseindex(self, fp, data, inline):
381 if len(data) == _prereadsize:
381 if len(data) == _prereadsize:
382 if util.openhardlinks() and not inline:
382 if util.openhardlinks() and not inline:
383 # big index, let's parse it on demand
383 # big index, let's parse it on demand
384 parser = lazyparser(fp)
384 parser = lazyparser(fp)
385 index = lazyindex(parser)
385 index = lazyindex(parser)
386 nodemap = lazymap(parser)
386 nodemap = lazymap(parser)
387 e = list(index[0])
387 e = list(index[0])
388 type = gettype(e[0])
388 type = gettype(e[0])
389 e[0] = offset_type(0, type)
389 e[0] = offset_type(0, type)
390 index[0] = e
390 index[0] = e
391 return index, nodemap, None
391 return index, nodemap, None
392 else:
392 else:
393 data += fp.read()
393 data += fp.read()
394
394
395 # call the C implementation to parse the index data
395 # call the C implementation to parse the index data
396 index, nodemap, cache = parsers.parse_index(data, inline)
396 index, nodemap, cache = parsers.parse_index(data, inline)
397 return index, nodemap, cache
397 return index, nodemap, cache
398
398
399 def packentry(self, entry, node, version, rev):
399 def packentry(self, entry, node, version, rev):
400 p = _pack(indexformatng, *entry)
400 p = _pack(indexformatng, *entry)
401 if rev == 0:
401 if rev == 0:
402 p = _pack(versionformat, version) + p[4:]
402 p = _pack(versionformat, version) + p[4:]
403 return p
403 return p
404
404
405 class revlog(object):
405 class revlog(object):
406 """
406 """
407 the underlying revision storage object
407 the underlying revision storage object
408
408
409 A revlog consists of two parts, an index and the revision data.
409 A revlog consists of two parts, an index and the revision data.
410
410
411 The index is a file with a fixed record size containing
411 The index is a file with a fixed record size containing
412 information on each revision, including its nodeid (hash), the
412 information on each revision, including its nodeid (hash), the
413 nodeids of its parents, the position and offset of its data within
413 nodeids of its parents, the position and offset of its data within
414 the data file, and the revision it's based on. Finally, each entry
414 the data file, and the revision it's based on. Finally, each entry
415 contains a linkrev entry that can serve as a pointer to external
415 contains a linkrev entry that can serve as a pointer to external
416 data.
416 data.
417
417
418 The revision data itself is a linear collection of data chunks.
418 The revision data itself is a linear collection of data chunks.
419 Each chunk represents a revision and is usually represented as a
419 Each chunk represents a revision and is usually represented as a
420 delta against the previous chunk. To bound lookup time, runs of
420 delta against the previous chunk. To bound lookup time, runs of
421 deltas are limited to about 2 times the length of the original
421 deltas are limited to about 2 times the length of the original
422 version data. This makes retrieval of a version proportional to
422 version data. This makes retrieval of a version proportional to
423 its size, or O(1) relative to the number of revisions.
423 its size, or O(1) relative to the number of revisions.
424
424
425 Both pieces of the revlog are written to in an append-only
425 Both pieces of the revlog are written to in an append-only
426 fashion, which means we never need to rewrite a file to insert or
426 fashion, which means we never need to rewrite a file to insert or
427 remove data, and can use some simple techniques to avoid the need
427 remove data, and can use some simple techniques to avoid the need
428 for locking while reading.
428 for locking while reading.
429 """
429 """
430 def __init__(self, opener, indexfile, shallowroot=None):
430 def __init__(self, opener, indexfile, shallowroot=None):
431 """
431 """
432 create a revlog object
432 create a revlog object
433
433
434 opener is a function that abstracts the file opening operation
434 opener is a function that abstracts the file opening operation
435 and can be used to implement COW semantics or the like.
435 and can be used to implement COW semantics or the like.
436 """
436 """
437 self.indexfile = indexfile
437 self.indexfile = indexfile
438 self.datafile = indexfile[:-2] + ".d"
438 self.datafile = indexfile[:-2] + ".d"
439 self.opener = opener
439 self.opener = opener
440 self._cache = None
440 self._cache = None
441 self._chunkcache = (0, '')
441 self._chunkcache = (0, '')
442 self.nodemap = {nullid: nullrev}
442 self.nodemap = {nullid: nullrev}
443 self.index = []
443 self.index = []
444 self._shallowroot = shallowroot
444 self._shallowroot = shallowroot
445 self._parentdelta = 0
445 self._parentdelta = 0
446
446
447 v = REVLOG_DEFAULT_VERSION
447 v = REVLOG_DEFAULT_VERSION
448 if hasattr(opener, 'options') and 'defversion' in opener.options:
448 if hasattr(opener, 'options') and 'defversion' in opener.options:
449 v = opener.options['defversion']
449 v = opener.options['defversion']
450 if v & REVLOGNG:
450 if v & REVLOGNG:
451 v |= REVLOGNGINLINEDATA
451 v |= REVLOGNGINLINEDATA
452 if v & REVLOGNG and 'parentdelta' in opener.options:
452 if v & REVLOGNG and 'parentdelta' in opener.options:
453 self._parentdelta = 1
453 self._parentdelta = 1
454
454
455 if shallowroot:
455 if shallowroot:
456 v |= REVLOGSHALLOW
456 v |= REVLOGSHALLOW
457
457
458 i = ''
458 i = ''
459 try:
459 try:
460 f = self.opener(self.indexfile)
460 f = self.opener(self.indexfile)
461 if "nonlazy" in getattr(self.opener, 'options', {}):
461 if "nonlazy" in getattr(self.opener, 'options', {}):
462 i = f.read()
462 i = f.read()
463 else:
463 else:
464 i = f.read(_prereadsize)
464 i = f.read(_prereadsize)
465 if len(i) > 0:
465 if len(i) > 0:
466 v = struct.unpack(versionformat, i[:4])[0]
466 v = struct.unpack(versionformat, i[:4])[0]
467 except IOError, inst:
467 except IOError, inst:
468 if inst.errno != errno.ENOENT:
468 if inst.errno != errno.ENOENT:
469 raise
469 raise
470
470
471 self.version = v
471 self.version = v
472 self._inline = v & REVLOGNGINLINEDATA
472 self._inline = v & REVLOGNGINLINEDATA
473 self._shallow = v & REVLOGSHALLOW
473 self._shallow = v & REVLOGSHALLOW
474 flags = v & ~0xFFFF
474 flags = v & ~0xFFFF
475 fmt = v & 0xFFFF
475 fmt = v & 0xFFFF
476 if fmt == REVLOGV0 and flags:
476 if fmt == REVLOGV0 and flags:
477 raise RevlogError(_("index %s unknown flags %#04x for format v0")
477 raise RevlogError(_("index %s unknown flags %#04x for format v0")
478 % (self.indexfile, flags >> 16))
478 % (self.indexfile, flags >> 16))
479 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
479 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
480 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
480 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
481 % (self.indexfile, flags >> 16))
481 % (self.indexfile, flags >> 16))
482 elif fmt > REVLOGNG:
482 elif fmt > REVLOGNG:
483 raise RevlogError(_("index %s unknown format %d")
483 raise RevlogError(_("index %s unknown format %d")
484 % (self.indexfile, fmt))
484 % (self.indexfile, fmt))
485
485
486 self._io = revlogio()
486 self._io = revlogio()
487 if self.version == REVLOGV0:
487 if self.version == REVLOGV0:
488 self._io = revlogoldio()
488 self._io = revlogoldio()
489 if i:
489 if i:
490 try:
490 try:
491 d = self._io.parseindex(f, i, self._inline)
491 d = self._io.parseindex(f, i, self._inline)
492 except (ValueError, IndexError):
492 except (ValueError, IndexError):
493 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
493 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
494 self.index, self.nodemap, self._chunkcache = d
494 self.index, self.nodemap, self._chunkcache = d
495 if not self._chunkcache:
495 if not self._chunkcache:
496 self._chunkclear()
496 self._chunkclear()
497
497
498 # add the magic null revision at -1 (if it hasn't been done already)
498 # add the magic null revision at -1 (if it hasn't been done already)
499 if (self.index == [] or isinstance(self.index, lazyindex) or
499 if (self.index == [] or isinstance(self.index, lazyindex) or
500 self.index[-1][7] != nullid) :
500 self.index[-1][7] != nullid) :
501 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
501 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
502
502
503 def _loadindex(self, start, end):
503 def _loadindex(self, start, end):
504 """load a block of indexes all at once from the lazy parser"""
504 """load a block of indexes all at once from the lazy parser"""
505 if isinstance(self.index, lazyindex):
505 if isinstance(self.index, lazyindex):
506 self.index.p.loadindex(start, end)
506 self.index.p.loadindex(start, end)
507
507
508 def _loadindexmap(self):
508 def _loadindexmap(self):
509 """loads both the map and the index from the lazy parser"""
509 """loads both the map and the index from the lazy parser"""
510 if isinstance(self.index, lazyindex):
510 if isinstance(self.index, lazyindex):
511 p = self.index.p
511 p = self.index.p
512 p.loadindex()
512 p.loadindex()
513 self.nodemap = p.map
513 self.nodemap = p.map
514
514
515 def _loadmap(self):
515 def _loadmap(self):
516 """loads the map from the lazy parser"""
516 """loads the map from the lazy parser"""
517 if isinstance(self.nodemap, lazymap):
517 if isinstance(self.nodemap, lazymap):
518 self.nodemap.p.loadmap()
518 self.nodemap.p.loadmap()
519 self.nodemap = self.nodemap.p.map
519 self.nodemap = self.nodemap.p.map
520
520
521 def tip(self):
521 def tip(self):
522 return self.node(len(self.index) - 2)
522 return self.node(len(self.index) - 2)
523 def __len__(self):
523 def __len__(self):
524 return len(self.index) - 1
524 return len(self.index) - 1
525 def __iter__(self):
525 def __iter__(self):
526 for i in xrange(len(self)):
526 for i in xrange(len(self)):
527 yield i
527 yield i
528 def rev(self, node):
528 def rev(self, node):
529 try:
529 try:
530 return self.nodemap[node]
530 return self.nodemap[node]
531 except KeyError:
531 except KeyError:
532 raise LookupError(node, self.indexfile, _('no node'))
532 raise LookupError(node, self.indexfile, _('no node'))
533 def node(self, rev):
533 def node(self, rev):
534 return self.index[rev][7]
534 return self.index[rev][7]
535 def linkrev(self, rev):
535 def linkrev(self, rev):
536 return self.index[rev][4]
536 return self.index[rev][4]
537 def parents(self, node):
537 def parents(self, node):
538 i = self.index
538 i = self.index
539 d = i[self.rev(node)]
539 d = i[self.rev(node)]
540 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
540 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
541 def parentrevs(self, rev):
541 def parentrevs(self, rev):
542 return self.index[rev][5:7]
542 return self.index[rev][5:7]
543 def start(self, rev):
543 def start(self, rev):
544 return int(self.index[rev][0] >> 16)
544 return int(self.index[rev][0] >> 16)
545 def end(self, rev):
545 def end(self, rev):
546 return self.start(rev) + self.length(rev)
546 return self.start(rev) + self.length(rev)
547 def length(self, rev):
547 def length(self, rev):
548 return self.index[rev][1]
548 return self.index[rev][1]
549 def base(self, rev):
549 def base(self, rev):
550 return self.index[rev][3]
550 return self.index[rev][3]
551 def flags(self, rev):
551 def flags(self, rev):
552 return self.index[rev][0] & 0xFFFF
552 return self.index[rev][0] & 0xFFFF
553
553
554 def size(self, rev):
554 def size(self, rev):
555 """return the length of the uncompressed text for a given revision"""
555 """return the length of the uncompressed text for a given revision"""
556 l = self.index[rev][2]
556 l = self.index[rev][2]
557 if l >= 0:
557 if l >= 0:
558 return l
558 return l
559
559
560 t = self.revision(self.node(rev))
560 t = self.revision(self.node(rev))
561 return len(t)
561 return len(t)
562
562
563 def reachable(self, node, stop=None):
563 def reachable(self, node, stop=None):
564 """return the set of all nodes ancestral to a given node, including
564 """return the set of all nodes ancestral to a given node, including
565 the node itself, stopping when stop is matched"""
565 the node itself, stopping when stop is matched"""
566 reachable = set((node,))
566 reachable = set((node,))
567 visit = [node]
567 visit = [node]
568 if stop:
568 if stop:
569 stopn = self.rev(stop)
569 stopn = self.rev(stop)
570 else:
570 else:
571 stopn = 0
571 stopn = 0
572 while visit:
572 while visit:
573 n = visit.pop(0)
573 n = visit.pop(0)
574 if n == stop:
574 if n == stop:
575 continue
575 continue
576 if n == nullid:
576 if n == nullid:
577 continue
577 continue
578 for p in self.parents(n):
578 for p in self.parents(n):
579 if self.rev(p) < stopn:
579 if self.rev(p) < stopn:
580 continue
580 continue
581 if p not in reachable:
581 if p not in reachable:
582 reachable.add(p)
582 reachable.add(p)
583 visit.append(p)
583 visit.append(p)
584 return reachable
584 return reachable
585
585
586 def ancestors(self, *revs):
586 def ancestors(self, *revs):
587 """Generate the ancestors of 'revs' in reverse topological order.
587 """Generate the ancestors of 'revs' in reverse topological order.
588
588
589 Yield a sequence of revision numbers starting with the parents
589 Yield a sequence of revision numbers starting with the parents
590 of each revision in revs, i.e., each revision is *not* considered
590 of each revision in revs, i.e., each revision is *not* considered
591 an ancestor of itself. Results are in breadth-first order:
591 an ancestor of itself. Results are in breadth-first order:
592 parents of each rev in revs, then parents of those, etc. Result
592 parents of each rev in revs, then parents of those, etc. Result
593 does not include the null revision."""
593 does not include the null revision."""
594 visit = list(revs)
594 visit = list(revs)
595 seen = set([nullrev])
595 seen = set([nullrev])
596 while visit:
596 while visit:
597 for parent in self.parentrevs(visit.pop(0)):
597 for parent in self.parentrevs(visit.pop(0)):
598 if parent not in seen:
598 if parent not in seen:
599 visit.append(parent)
599 visit.append(parent)
600 seen.add(parent)
600 seen.add(parent)
601 yield parent
601 yield parent
602
602
603 def descendants(self, *revs):
603 def descendants(self, *revs):
604 """Generate the descendants of 'revs' in revision order.
604 """Generate the descendants of 'revs' in revision order.
605
605
606 Yield a sequence of revision numbers starting with a child of
606 Yield a sequence of revision numbers starting with a child of
607 some rev in revs, i.e., each revision is *not* considered a
607 some rev in revs, i.e., each revision is *not* considered a
608 descendant of itself. Results are ordered by revision number (a
608 descendant of itself. Results are ordered by revision number (a
609 topological sort)."""
609 topological sort)."""
610 seen = set(revs)
610 seen = set(revs)
611 for i in xrange(min(revs) + 1, len(self)):
611 for i in xrange(min(revs) + 1, len(self)):
612 for x in self.parentrevs(i):
612 for x in self.parentrevs(i):
613 if x != nullrev and x in seen:
613 if x != nullrev and x in seen:
614 seen.add(i)
614 seen.add(i)
615 yield i
615 yield i
616 break
616 break
617
617
618 def findmissing(self, common=None, heads=None):
618 def findmissing(self, common=None, heads=None):
619 """Return the ancestors of heads that are not ancestors of common.
619 """Return the ancestors of heads that are not ancestors of common.
620
620
621 More specifically, return a list of nodes N such that every N
621 More specifically, return a list of nodes N such that every N
622 satisfies the following constraints:
622 satisfies the following constraints:
623
623
624 1. N is an ancestor of some node in 'heads'
624 1. N is an ancestor of some node in 'heads'
625 2. N is not an ancestor of any node in 'common'
625 2. N is not an ancestor of any node in 'common'
626
626
627 The list is sorted by revision number, meaning it is
627 The list is sorted by revision number, meaning it is
628 topologically sorted.
628 topologically sorted.
629
629
630 'heads' and 'common' are both lists of node IDs. If heads is
630 'heads' and 'common' are both lists of node IDs. If heads is
631 not supplied, uses all of the revlog's heads. If common is not
631 not supplied, uses all of the revlog's heads. If common is not
632 supplied, uses nullid."""
632 supplied, uses nullid."""
633 if common is None:
633 if common is None:
634 common = [nullid]
634 common = [nullid]
635 if heads is None:
635 if heads is None:
636 heads = self.heads()
636 heads = self.heads()
637
637
638 common = [self.rev(n) for n in common]
638 common = [self.rev(n) for n in common]
639 heads = [self.rev(n) for n in heads]
639 heads = [self.rev(n) for n in heads]
640
640
641 # we want the ancestors, but inclusive
641 # we want the ancestors, but inclusive
642 has = set(self.ancestors(*common))
642 has = set(self.ancestors(*common))
643 has.add(nullrev)
643 has.add(nullrev)
644 has.update(common)
644 has.update(common)
645
645
646 # take all ancestors from heads that aren't in has
646 # take all ancestors from heads that aren't in has
647 missing = set()
647 missing = set()
648 visit = [r for r in heads if r not in has]
648 visit = [r for r in heads if r not in has]
649 while visit:
649 while visit:
650 r = visit.pop(0)
650 r = visit.pop(0)
651 if r in missing:
651 if r in missing:
652 continue
652 continue
653 else:
653 else:
654 missing.add(r)
654 missing.add(r)
655 for p in self.parentrevs(r):
655 for p in self.parentrevs(r):
656 if p not in has:
656 if p not in has:
657 visit.append(p)
657 visit.append(p)
658 missing = list(missing)
658 missing = list(missing)
659 missing.sort()
659 missing.sort()
660 return [self.node(r) for r in missing]
660 return [self.node(r) for r in missing]
661
661
662 def nodesbetween(self, roots=None, heads=None):
662 def nodesbetween(self, roots=None, heads=None):
663 """Return a topological path from 'roots' to 'heads'.
663 """Return a topological path from 'roots' to 'heads'.
664
664
665 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
665 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
666 topologically sorted list of all nodes N that satisfy both of
666 topologically sorted list of all nodes N that satisfy both of
667 these constraints:
667 these constraints:
668
668
669 1. N is a descendant of some node in 'roots'
669 1. N is a descendant of some node in 'roots'
670 2. N is an ancestor of some node in 'heads'
670 2. N is an ancestor of some node in 'heads'
671
671
672 Every node is considered to be both a descendant and an ancestor
672 Every node is considered to be both a descendant and an ancestor
673 of itself, so every reachable node in 'roots' and 'heads' will be
673 of itself, so every reachable node in 'roots' and 'heads' will be
674 included in 'nodes'.
674 included in 'nodes'.
675
675
676 'outroots' is the list of reachable nodes in 'roots', i.e., the
676 'outroots' is the list of reachable nodes in 'roots', i.e., the
677 subset of 'roots' that is returned in 'nodes'. Likewise,
677 subset of 'roots' that is returned in 'nodes'. Likewise,
678 'outheads' is the subset of 'heads' that is also in 'nodes'.
678 'outheads' is the subset of 'heads' that is also in 'nodes'.
679
679
680 'roots' and 'heads' are both lists of node IDs. If 'roots' is
680 'roots' and 'heads' are both lists of node IDs. If 'roots' is
681 unspecified, uses nullid as the only root. If 'heads' is
681 unspecified, uses nullid as the only root. If 'heads' is
682 unspecified, uses list of all of the revlog's heads."""
682 unspecified, uses list of all of the revlog's heads."""
683 nonodes = ([], [], [])
683 nonodes = ([], [], [])
684 if roots is not None:
684 if roots is not None:
685 roots = list(roots)
685 roots = list(roots)
686 if not roots:
686 if not roots:
687 return nonodes
687 return nonodes
688 lowestrev = min([self.rev(n) for n in roots])
688 lowestrev = min([self.rev(n) for n in roots])
689 else:
689 else:
690 roots = [nullid] # Everybody's a descendent of nullid
690 roots = [nullid] # Everybody's a descendent of nullid
691 lowestrev = nullrev
691 lowestrev = nullrev
692 if (lowestrev == nullrev) and (heads is None):
692 if (lowestrev == nullrev) and (heads is None):
693 # We want _all_ the nodes!
693 # We want _all_ the nodes!
694 return ([self.node(r) for r in self], [nullid], list(self.heads()))
694 return ([self.node(r) for r in self], [nullid], list(self.heads()))
695 if heads is None:
695 if heads is None:
696 # All nodes are ancestors, so the latest ancestor is the last
696 # All nodes are ancestors, so the latest ancestor is the last
697 # node.
697 # node.
698 highestrev = len(self) - 1
698 highestrev = len(self) - 1
699 # Set ancestors to None to signal that every node is an ancestor.
699 # Set ancestors to None to signal that every node is an ancestor.
700 ancestors = None
700 ancestors = None
701 # Set heads to an empty dictionary for later discovery of heads
701 # Set heads to an empty dictionary for later discovery of heads
702 heads = {}
702 heads = {}
703 else:
703 else:
704 heads = list(heads)
704 heads = list(heads)
705 if not heads:
705 if not heads:
706 return nonodes
706 return nonodes
707 ancestors = set()
707 ancestors = set()
708 # Turn heads into a dictionary so we can remove 'fake' heads.
708 # Turn heads into a dictionary so we can remove 'fake' heads.
709 # Also, later we will be using it to filter out the heads we can't
709 # Also, later we will be using it to filter out the heads we can't
710 # find from roots.
710 # find from roots.
711 heads = dict.fromkeys(heads, 0)
711 heads = dict.fromkeys(heads, 0)
712 # Start at the top and keep marking parents until we're done.
712 # Start at the top and keep marking parents until we're done.
713 nodestotag = set(heads)
713 nodestotag = set(heads)
714 # Remember where the top was so we can use it as a limit later.
714 # Remember where the top was so we can use it as a limit later.
715 highestrev = max([self.rev(n) for n in nodestotag])
715 highestrev = max([self.rev(n) for n in nodestotag])
716 while nodestotag:
716 while nodestotag:
717 # grab a node to tag
717 # grab a node to tag
718 n = nodestotag.pop()
718 n = nodestotag.pop()
719 # Never tag nullid
719 # Never tag nullid
720 if n == nullid:
720 if n == nullid:
721 continue
721 continue
722 # A node's revision number represents its place in a
722 # A node's revision number represents its place in a
723 # topologically sorted list of nodes.
723 # topologically sorted list of nodes.
724 r = self.rev(n)
724 r = self.rev(n)
725 if r >= lowestrev:
725 if r >= lowestrev:
726 if n not in ancestors:
726 if n not in ancestors:
727 # If we are possibly a descendent of one of the roots
727 # If we are possibly a descendent of one of the roots
728 # and we haven't already been marked as an ancestor
728 # and we haven't already been marked as an ancestor
729 ancestors.add(n) # Mark as ancestor
729 ancestors.add(n) # Mark as ancestor
730 # Add non-nullid parents to list of nodes to tag.
730 # Add non-nullid parents to list of nodes to tag.
731 nodestotag.update([p for p in self.parents(n) if
731 nodestotag.update([p for p in self.parents(n) if
732 p != nullid])
732 p != nullid])
733 elif n in heads: # We've seen it before, is it a fake head?
733 elif n in heads: # We've seen it before, is it a fake head?
734 # So it is, real heads should not be the ancestors of
734 # So it is, real heads should not be the ancestors of
735 # any other heads.
735 # any other heads.
736 heads.pop(n)
736 heads.pop(n)
737 if not ancestors:
737 if not ancestors:
738 return nonodes
738 return nonodes
739 # Now that we have our set of ancestors, we want to remove any
739 # Now that we have our set of ancestors, we want to remove any
740 # roots that are not ancestors.
740 # roots that are not ancestors.
741
741
742 # If one of the roots was nullid, everything is included anyway.
742 # If one of the roots was nullid, everything is included anyway.
743 if lowestrev > nullrev:
743 if lowestrev > nullrev:
744 # But, since we weren't, let's recompute the lowest rev to not
744 # But, since we weren't, let's recompute the lowest rev to not
745 # include roots that aren't ancestors.
745 # include roots that aren't ancestors.
746
746
747 # Filter out roots that aren't ancestors of heads
747 # Filter out roots that aren't ancestors of heads
748 roots = [n for n in roots if n in ancestors]
748 roots = [n for n in roots if n in ancestors]
749 # Recompute the lowest revision
749 # Recompute the lowest revision
750 if roots:
750 if roots:
751 lowestrev = min([self.rev(n) for n in roots])
751 lowestrev = min([self.rev(n) for n in roots])
752 else:
752 else:
753 # No more roots? Return empty list
753 # No more roots? Return empty list
754 return nonodes
754 return nonodes
755 else:
755 else:
756 # We are descending from nullid, and don't need to care about
756 # We are descending from nullid, and don't need to care about
757 # any other roots.
757 # any other roots.
758 lowestrev = nullrev
758 lowestrev = nullrev
759 roots = [nullid]
759 roots = [nullid]
760 # Transform our roots list into a set.
760 # Transform our roots list into a set.
761 descendents = set(roots)
761 descendents = set(roots)
762 # Also, keep the original roots so we can filter out roots that aren't
762 # Also, keep the original roots so we can filter out roots that aren't
763 # 'real' roots (i.e. are descended from other roots).
763 # 'real' roots (i.e. are descended from other roots).
764 roots = descendents.copy()
764 roots = descendents.copy()
765 # Our topologically sorted list of output nodes.
765 # Our topologically sorted list of output nodes.
766 orderedout = []
766 orderedout = []
767 # Don't start at nullid since we don't want nullid in our output list,
767 # Don't start at nullid since we don't want nullid in our output list,
768 # and if nullid shows up in descedents, empty parents will look like
768 # and if nullid shows up in descedents, empty parents will look like
769 # they're descendents.
769 # they're descendents.
770 for r in xrange(max(lowestrev, 0), highestrev + 1):
770 for r in xrange(max(lowestrev, 0), highestrev + 1):
771 n = self.node(r)
771 n = self.node(r)
772 isdescendent = False
772 isdescendent = False
773 if lowestrev == nullrev: # Everybody is a descendent of nullid
773 if lowestrev == nullrev: # Everybody is a descendent of nullid
774 isdescendent = True
774 isdescendent = True
775 elif n in descendents:
775 elif n in descendents:
776 # n is already a descendent
776 # n is already a descendent
777 isdescendent = True
777 isdescendent = True
778 # This check only needs to be done here because all the roots
778 # This check only needs to be done here because all the roots
779 # will start being marked is descendents before the loop.
779 # will start being marked is descendents before the loop.
780 if n in roots:
780 if n in roots:
781 # If n was a root, check if it's a 'real' root.
781 # If n was a root, check if it's a 'real' root.
782 p = tuple(self.parents(n))
782 p = tuple(self.parents(n))
783 # If any of its parents are descendents, it's not a root.
783 # If any of its parents are descendents, it's not a root.
784 if (p[0] in descendents) or (p[1] in descendents):
784 if (p[0] in descendents) or (p[1] in descendents):
785 roots.remove(n)
785 roots.remove(n)
786 else:
786 else:
787 p = tuple(self.parents(n))
787 p = tuple(self.parents(n))
788 # A node is a descendent if either of its parents are
788 # A node is a descendent if either of its parents are
789 # descendents. (We seeded the dependents list with the roots
789 # descendents. (We seeded the dependents list with the roots
790 # up there, remember?)
790 # up there, remember?)
791 if (p[0] in descendents) or (p[1] in descendents):
791 if (p[0] in descendents) or (p[1] in descendents):
792 descendents.add(n)
792 descendents.add(n)
793 isdescendent = True
793 isdescendent = True
794 if isdescendent and ((ancestors is None) or (n in ancestors)):
794 if isdescendent and ((ancestors is None) or (n in ancestors)):
795 # Only include nodes that are both descendents and ancestors.
795 # Only include nodes that are both descendents and ancestors.
796 orderedout.append(n)
796 orderedout.append(n)
797 if (ancestors is not None) and (n in heads):
797 if (ancestors is not None) and (n in heads):
798 # We're trying to figure out which heads are reachable
798 # We're trying to figure out which heads are reachable
799 # from roots.
799 # from roots.
800 # Mark this head as having been reached
800 # Mark this head as having been reached
801 heads[n] = 1
801 heads[n] = 1
802 elif ancestors is None:
802 elif ancestors is None:
803 # Otherwise, we're trying to discover the heads.
803 # Otherwise, we're trying to discover the heads.
804 # Assume this is a head because if it isn't, the next step
804 # Assume this is a head because if it isn't, the next step
805 # will eventually remove it.
805 # will eventually remove it.
806 heads[n] = 1
806 heads[n] = 1
807 # But, obviously its parents aren't.
807 # But, obviously its parents aren't.
808 for p in self.parents(n):
808 for p in self.parents(n):
809 heads.pop(p, None)
809 heads.pop(p, None)
810 heads = [n for n in heads.iterkeys() if heads[n] != 0]
810 heads = [n for n in heads.iterkeys() if heads[n] != 0]
811 roots = list(roots)
811 roots = list(roots)
812 assert orderedout
812 assert orderedout
813 assert roots
813 assert roots
814 assert heads
814 assert heads
815 return (orderedout, roots, heads)
815 return (orderedout, roots, heads)
816
816
817 def heads(self, start=None, stop=None):
817 def heads(self, start=None, stop=None):
818 """return the list of all nodes that have no children
818 """return the list of all nodes that have no children
819
819
820 if start is specified, only heads that are descendants of
820 if start is specified, only heads that are descendants of
821 start will be returned
821 start will be returned
822 if stop is specified, it will consider all the revs from stop
822 if stop is specified, it will consider all the revs from stop
823 as if they had no children
823 as if they had no children
824 """
824 """
825 if start is None and stop is None:
825 if start is None and stop is None:
826 count = len(self)
826 count = len(self)
827 if not count:
827 if not count:
828 return [nullid]
828 return [nullid]
829 ishead = [1] * (count + 1)
829 ishead = [1] * (count + 1)
830 index = self.index
830 index = self.index
831 for r in xrange(count):
831 for r in xrange(count):
832 e = index[r]
832 e = index[r]
833 ishead[e[5]] = ishead[e[6]] = 0
833 ishead[e[5]] = ishead[e[6]] = 0
834 return [self.node(r) for r in xrange(count) if ishead[r]]
834 return [self.node(r) for r in xrange(count) if ishead[r]]
835
835
836 if start is None:
836 if start is None:
837 start = nullid
837 start = nullid
838 if stop is None:
838 if stop is None:
839 stop = []
839 stop = []
840 stoprevs = set([self.rev(n) for n in stop])
840 stoprevs = set([self.rev(n) for n in stop])
841 startrev = self.rev(start)
841 startrev = self.rev(start)
842 reachable = set((startrev,))
842 reachable = set((startrev,))
843 heads = set((startrev,))
843 heads = set((startrev,))
844
844
845 parentrevs = self.parentrevs
845 parentrevs = self.parentrevs
846 for r in xrange(startrev + 1, len(self)):
846 for r in xrange(startrev + 1, len(self)):
847 for p in parentrevs(r):
847 for p in parentrevs(r):
848 if p in reachable:
848 if p in reachable:
849 if r not in stoprevs:
849 if r not in stoprevs:
850 reachable.add(r)
850 reachable.add(r)
851 heads.add(r)
851 heads.add(r)
852 if p in heads and p not in stoprevs:
852 if p in heads and p not in stoprevs:
853 heads.remove(p)
853 heads.remove(p)
854
854
855 return [self.node(r) for r in heads]
855 return [self.node(r) for r in heads]
856
856
857 def children(self, node):
857 def children(self, node):
858 """find the children of a given node"""
858 """find the children of a given node"""
859 c = []
859 c = []
860 p = self.rev(node)
860 p = self.rev(node)
861 for r in range(p + 1, len(self)):
861 for r in range(p + 1, len(self)):
862 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
862 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
863 if prevs:
863 if prevs:
864 for pr in prevs:
864 for pr in prevs:
865 if pr == p:
865 if pr == p:
866 c.append(self.node(r))
866 c.append(self.node(r))
867 elif p == nullrev:
867 elif p == nullrev:
868 c.append(self.node(r))
868 c.append(self.node(r))
869 return c
869 return c
870
870
871 def descendant(self, start, end):
871 def descendant(self, start, end):
872 for i in self.descendants(start):
872 for i in self.descendants(start):
873 if i == end:
873 if i == end:
874 return True
874 return True
875 elif i > end:
875 elif i > end:
876 break
876 break
877 return False
877 return False
878
878
879 def ancestor(self, a, b):
879 def ancestor(self, a, b):
880 """calculate the least common ancestor of nodes a and b"""
880 """calculate the least common ancestor of nodes a and b"""
881
881
882 # fast path, check if it is a descendant
882 # fast path, check if it is a descendant
883 a, b = self.rev(a), self.rev(b)
883 a, b = self.rev(a), self.rev(b)
884 start, end = sorted((a, b))
884 start, end = sorted((a, b))
885 if self.descendant(start, end):
885 if self.descendant(start, end):
886 return self.node(start)
886 return self.node(start)
887
887
888 def parents(rev):
888 def parents(rev):
889 return [p for p in self.parentrevs(rev) if p != nullrev]
889 return [p for p in self.parentrevs(rev) if p != nullrev]
890
890
891 c = ancestor.ancestor(a, b, parents)
891 c = ancestor.ancestor(a, b, parents)
892 if c is None:
892 if c is None:
893 return nullid
893 return nullid
894
894
895 return self.node(c)
895 return self.node(c)
896
896
897 def _match(self, id):
897 def _match(self, id):
898 if isinstance(id, (long, int)):
898 if isinstance(id, (long, int)):
899 # rev
899 # rev
900 return self.node(id)
900 return self.node(id)
901 if len(id) == 20:
901 if len(id) == 20:
902 # possibly a binary node
902 # possibly a binary node
903 # odds of a binary node being all hex in ASCII are 1 in 10**25
903 # odds of a binary node being all hex in ASCII are 1 in 10**25
904 try:
904 try:
905 node = id
905 node = id
906 self.rev(node) # quick search the index
906 self.rev(node) # quick search the index
907 return node
907 return node
908 except LookupError:
908 except LookupError:
909 pass # may be partial hex id
909 pass # may be partial hex id
910 try:
910 try:
911 # str(rev)
911 # str(rev)
912 rev = int(id)
912 rev = int(id)
913 if str(rev) != id:
913 if str(rev) != id:
914 raise ValueError
914 raise ValueError
915 if rev < 0:
915 if rev < 0:
916 rev = len(self) + rev
916 rev = len(self) + rev
917 if rev < 0 or rev >= len(self):
917 if rev < 0 or rev >= len(self):
918 raise ValueError
918 raise ValueError
919 return self.node(rev)
919 return self.node(rev)
920 except (ValueError, OverflowError):
920 except (ValueError, OverflowError):
921 pass
921 pass
922 if len(id) == 40:
922 if len(id) == 40:
923 try:
923 try:
924 # a full hex nodeid?
924 # a full hex nodeid?
925 node = bin(id)
925 node = bin(id)
926 self.rev(node)
926 self.rev(node)
927 return node
927 return node
928 except (TypeError, LookupError):
928 except (TypeError, LookupError):
929 pass
929 pass
930
930
931 def _partialmatch(self, id):
931 def _partialmatch(self, id):
932 if len(id) < 40:
932 if len(id) < 40:
933 try:
933 try:
934 # hex(node)[:...]
934 # hex(node)[:...]
935 l = len(id) // 2 # grab an even number of digits
935 l = len(id) // 2 # grab an even number of digits
936 bin_id = bin(id[:l * 2])
936 bin_id = bin(id[:l * 2])
937 nl = [n for n in self.nodemap if n[:l] == bin_id]
937 nl = [n for n in self.nodemap if n[:l] == bin_id]
938 nl = [n for n in nl if hex(n).startswith(id)]
938 nl = [n for n in nl if hex(n).startswith(id)]
939 if len(nl) > 0:
939 if len(nl) > 0:
940 if len(nl) == 1:
940 if len(nl) == 1:
941 return nl[0]
941 return nl[0]
942 raise LookupError(id, self.indexfile,
942 raise LookupError(id, self.indexfile,
943 _('ambiguous identifier'))
943 _('ambiguous identifier'))
944 return None
944 return None
945 except TypeError:
945 except TypeError:
946 pass
946 pass
947
947
948 def lookup(self, id):
948 def lookup(self, id):
949 """locate a node based on:
949 """locate a node based on:
950 - revision number or str(revision number)
950 - revision number or str(revision number)
951 - nodeid or subset of hex nodeid
951 - nodeid or subset of hex nodeid
952 """
952 """
953 n = self._match(id)
953 n = self._match(id)
954 if n is not None:
954 if n is not None:
955 return n
955 return n
956 n = self._partialmatch(id)
956 n = self._partialmatch(id)
957 if n:
957 if n:
958 return n
958 return n
959
959
960 raise LookupError(id, self.indexfile, _('no match found'))
960 raise LookupError(id, self.indexfile, _('no match found'))
961
961
962 def cmp(self, node, text):
962 def cmp(self, node, text):
963 """compare text with a given file revision
963 """compare text with a given file revision
964
964
965 returns True if text is different than what is stored.
965 returns True if text is different than what is stored.
966 """
966 """
967 p1, p2 = self.parents(node)
967 p1, p2 = self.parents(node)
968 return hash(text, p1, p2) != node
968 return hash(text, p1, p2) != node
969
969
970 def _addchunk(self, offset, data):
970 def _addchunk(self, offset, data):
971 o, d = self._chunkcache
971 o, d = self._chunkcache
972 # try to add to existing cache
972 # try to add to existing cache
973 if o + len(d) == offset and len(d) + len(data) < _prereadsize:
973 if o + len(d) == offset and len(d) + len(data) < _prereadsize:
974 self._chunkcache = o, d + data
974 self._chunkcache = o, d + data
975 else:
975 else:
976 self._chunkcache = offset, data
976 self._chunkcache = offset, data
977
977
978 def _loadchunk(self, offset, length):
978 def _loadchunk(self, offset, length):
979 if self._inline:
979 if self._inline:
980 df = self.opener(self.indexfile)
980 df = self.opener(self.indexfile)
981 else:
981 else:
982 df = self.opener(self.datafile)
982 df = self.opener(self.datafile)
983
983
984 readahead = max(65536, length)
984 readahead = max(65536, length)
985 df.seek(offset)
985 df.seek(offset)
986 d = df.read(readahead)
986 d = df.read(readahead)
987 self._addchunk(offset, d)
987 self._addchunk(offset, d)
988 if readahead > length:
988 if readahead > length:
989 return d[:length]
989 return d[:length]
990 return d
990 return d
991
991
992 def _getchunk(self, offset, length):
992 def _getchunk(self, offset, length):
993 o, d = self._chunkcache
993 o, d = self._chunkcache
994 l = len(d)
994 l = len(d)
995
995
996 # is it in the cache?
996 # is it in the cache?
997 cachestart = offset - o
997 cachestart = offset - o
998 cacheend = cachestart + length
998 cacheend = cachestart + length
999 if cachestart >= 0 and cacheend <= l:
999 if cachestart >= 0 and cacheend <= l:
1000 if cachestart == 0 and cacheend == l:
1000 if cachestart == 0 and cacheend == l:
1001 return d # avoid a copy
1001 return d # avoid a copy
1002 return d[cachestart:cacheend]
1002 return d[cachestart:cacheend]
1003
1003
1004 return self._loadchunk(offset, length)
1004 return self._loadchunk(offset, length)
1005
1005
1006 def _chunkraw(self, startrev, endrev):
1006 def _chunkraw(self, startrev, endrev):
1007 start = self.start(startrev)
1007 start = self.start(startrev)
1008 length = self.end(endrev) - start
1008 length = self.end(endrev) - start
1009 if self._inline:
1009 if self._inline:
1010 start += (startrev + 1) * self._io.size
1010 start += (startrev + 1) * self._io.size
1011 return self._getchunk(start, length)
1011 return self._getchunk(start, length)
1012
1012
1013 def _chunk(self, rev):
1013 def _chunk(self, rev):
1014 return decompress(self._chunkraw(rev, rev))
1014 return decompress(self._chunkraw(rev, rev))
1015
1015
1016 def _chunkclear(self):
1016 def _chunkclear(self):
1017 self._chunkcache = (0, '')
1017 self._chunkcache = (0, '')
1018
1018
1019 def deltaparent(self, rev):
1020 """return previous revision or parentrev according to flags"""
1021 if self.base(rev) == rev:
1022 return nullrev
1023 elif self.flags(rev) & REVIDX_PARENTDELTA:
1024 return self.parentrevs(rev)[0]
1025 else:
1026 return rev - 1
1027
1028
1029 def deltachain(self, rev, cache):
1030 """return chain of revisions to construct a given revision"""
1031 chain = []
1032 check = False
1033 while self.base(rev) != rev and rev != cache:
1034 chain.append(rev)
1035 rev = self.deltaparent(rev)
1036 chain.reverse()
1037 if rev == cache:
1038 check = True
1039 return check, rev, chain
1040
1019 def revdiff(self, rev1, rev2):
1041 def revdiff(self, rev1, rev2):
1020 """return or calculate a delta between two revisions"""
1042 """return or calculate a delta between two revisions"""
1021 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
1043 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
1022 return self._chunk(rev2)
1044 return self._chunk(rev2)
1023
1045
1024 return mdiff.textdiff(self.revision(self.node(rev1)),
1046 return mdiff.textdiff(self.revision(self.node(rev1)),
1025 self.revision(self.node(rev2)))
1047 self.revision(self.node(rev2)))
1026
1048
1027 def revision(self, node):
1049 def revision(self, node):
1028 """return an uncompressed revision of a given node"""
1050 """return an uncompressed revision of a given node"""
1029 if node == nullid:
1051 if node == nullid:
1030 return ""
1052 return ""
1031 if self._cache and self._cache[0] == node:
1053 if self._cache and self._cache[0] == node:
1032 return self._cache[2]
1054 return self._cache[2]
1033
1055
1034 # look up what we need to read
1056 # look up what we need to read
1035 text = None
1057 text = None
1036 rev = self.rev(node)
1058 rev = self.rev(node)
1037 base = self.base(rev)
1059 base = self.base(rev)
1038
1060
1039 # check rev flags
1061 # check rev flags
1040 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
1062 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
1041 raise RevlogError(_('incompatible revision flag %x') %
1063 raise RevlogError(_('incompatible revision flag %x') %
1042 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
1064 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
1043
1065
1044 # do we have useful data cached?
1066 # do we have useful data cached?
1045 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
1067 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
1046 base = self._cache[1]
1068 base = self._cache[1]
1047 text = self._cache[2]
1069 text = self._cache[2]
1048
1070
1049 # drop cache to save memory
1071 # drop cache to save memory
1050 self._cache = None
1072 self._cache = None
1051
1073
1052 self._loadindex(base, rev + 1)
1074 self._loadindex(base, rev + 1)
1053 self._chunkraw(base, rev)
1075 self._chunkraw(base, rev)
1054 if text is None:
1076 if text is None:
1055 text = self._chunk(base)
1077 text = self._chunk(base)
1056
1078
1057 bins = [self._chunk(r) for r in xrange(base + 1, rev + 1)]
1079 bins = [self._chunk(r) for r in xrange(base + 1, rev + 1)]
1058 text = mdiff.patches(text, bins)
1080 text = mdiff.patches(text, bins)
1059 p1, p2 = self.parents(node)
1081 p1, p2 = self.parents(node)
1060 if (node != hash(text, p1, p2) and
1082 if (node != hash(text, p1, p2) and
1061 not (self.flags(rev) & REVIDX_PUNCHED_FLAG)):
1083 not (self.flags(rev) & REVIDX_PUNCHED_FLAG)):
1062 raise RevlogError(_("integrity check failed on %s:%d")
1084 raise RevlogError(_("integrity check failed on %s:%d")
1063 % (self.indexfile, rev))
1085 % (self.indexfile, rev))
1064
1086
1065 self._cache = (node, rev, text)
1087 self._cache = (node, rev, text)
1066 return text
1088 return text
1067
1089
1068 def checkinlinesize(self, tr, fp=None):
1090 def checkinlinesize(self, tr, fp=None):
1069 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1091 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1070 return
1092 return
1071
1093
1072 trinfo = tr.find(self.indexfile)
1094 trinfo = tr.find(self.indexfile)
1073 if trinfo is None:
1095 if trinfo is None:
1074 raise RevlogError(_("%s not found in the transaction")
1096 raise RevlogError(_("%s not found in the transaction")
1075 % self.indexfile)
1097 % self.indexfile)
1076
1098
1077 trindex = trinfo[2]
1099 trindex = trinfo[2]
1078 dataoff = self.start(trindex)
1100 dataoff = self.start(trindex)
1079
1101
1080 tr.add(self.datafile, dataoff)
1102 tr.add(self.datafile, dataoff)
1081
1103
1082 if fp:
1104 if fp:
1083 fp.flush()
1105 fp.flush()
1084 fp.close()
1106 fp.close()
1085
1107
1086 df = self.opener(self.datafile, 'w')
1108 df = self.opener(self.datafile, 'w')
1087 try:
1109 try:
1088 for r in self:
1110 for r in self:
1089 df.write(self._chunkraw(r, r))
1111 df.write(self._chunkraw(r, r))
1090 finally:
1112 finally:
1091 df.close()
1113 df.close()
1092
1114
1093 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1115 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1094 self.version &= ~(REVLOGNGINLINEDATA)
1116 self.version &= ~(REVLOGNGINLINEDATA)
1095 self._inline = False
1117 self._inline = False
1096 for i in self:
1118 for i in self:
1097 e = self._io.packentry(self.index[i], self.node, self.version, i)
1119 e = self._io.packentry(self.index[i], self.node, self.version, i)
1098 fp.write(e)
1120 fp.write(e)
1099
1121
1100 # if we don't call rename, the temp file will never replace the
1122 # if we don't call rename, the temp file will never replace the
1101 # real index
1123 # real index
1102 fp.rename()
1124 fp.rename()
1103
1125
1104 tr.replace(self.indexfile, trindex * self._io.size)
1126 tr.replace(self.indexfile, trindex * self._io.size)
1105 self._chunkclear()
1127 self._chunkclear()
1106
1128
1107 def addrevision(self, text, transaction, link, p1, p2, d=None):
1129 def addrevision(self, text, transaction, link, p1, p2, d=None):
1108 """add a revision to the log
1130 """add a revision to the log
1109
1131
1110 text - the revision data to add
1132 text - the revision data to add
1111 transaction - the transaction object used for rollback
1133 transaction - the transaction object used for rollback
1112 link - the linkrev data to add
1134 link - the linkrev data to add
1113 p1, p2 - the parent nodeids of the revision
1135 p1, p2 - the parent nodeids of the revision
1114 d - an optional precomputed delta
1136 d - an optional precomputed delta
1115 """
1137 """
1116 dfh = None
1138 dfh = None
1117 if not self._inline:
1139 if not self._inline:
1118 dfh = self.opener(self.datafile, "a")
1140 dfh = self.opener(self.datafile, "a")
1119 ifh = self.opener(self.indexfile, "a+")
1141 ifh = self.opener(self.indexfile, "a+")
1120 try:
1142 try:
1121 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1143 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1122 finally:
1144 finally:
1123 if dfh:
1145 if dfh:
1124 dfh.close()
1146 dfh.close()
1125 ifh.close()
1147 ifh.close()
1126
1148
1127 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1149 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1128 node = hash(text, p1, p2)
1150 node = hash(text, p1, p2)
1129 if node in self.nodemap:
1151 if node in self.nodemap:
1130 return node
1152 return node
1131
1153
1132 curr = len(self)
1154 curr = len(self)
1133 prev = curr - 1
1155 prev = curr - 1
1134 base = self.base(prev)
1156 base = self.base(prev)
1135 offset = self.end(prev)
1157 offset = self.end(prev)
1136
1158
1137 if curr:
1159 if curr:
1138 if not d:
1160 if not d:
1139 ptext = self.revision(self.node(prev))
1161 ptext = self.revision(self.node(prev))
1140 d = mdiff.textdiff(ptext, text)
1162 d = mdiff.textdiff(ptext, text)
1141 data = compress(d)
1163 data = compress(d)
1142 l = len(data[1]) + len(data[0])
1164 l = len(data[1]) + len(data[0])
1143 dist = l + offset - self.start(base)
1165 dist = l + offset - self.start(base)
1144
1166
1145 # full versions are inserted when the needed deltas
1167 # full versions are inserted when the needed deltas
1146 # become comparable to the uncompressed text
1168 # become comparable to the uncompressed text
1147 # or the base revision is punched
1169 # or the base revision is punched
1148 if (not curr or dist > len(text) * 2 or
1170 if (not curr or dist > len(text) * 2 or
1149 (self.flags(base) & REVIDX_PUNCHED_FLAG)):
1171 (self.flags(base) & REVIDX_PUNCHED_FLAG)):
1150 data = compress(text)
1172 data = compress(text)
1151 l = len(data[1]) + len(data[0])
1173 l = len(data[1]) + len(data[0])
1152 base = curr
1174 base = curr
1153
1175
1154 e = (offset_type(offset, 0), l, len(text),
1176 e = (offset_type(offset, 0), l, len(text),
1155 base, link, self.rev(p1), self.rev(p2), node)
1177 base, link, self.rev(p1), self.rev(p2), node)
1156 self.index.insert(-1, e)
1178 self.index.insert(-1, e)
1157 self.nodemap[node] = curr
1179 self.nodemap[node] = curr
1158
1180
1159 entry = self._io.packentry(e, self.node, self.version, curr)
1181 entry = self._io.packentry(e, self.node, self.version, curr)
1160 if not self._inline:
1182 if not self._inline:
1161 transaction.add(self.datafile, offset)
1183 transaction.add(self.datafile, offset)
1162 transaction.add(self.indexfile, curr * len(entry))
1184 transaction.add(self.indexfile, curr * len(entry))
1163 if data[0]:
1185 if data[0]:
1164 dfh.write(data[0])
1186 dfh.write(data[0])
1165 dfh.write(data[1])
1187 dfh.write(data[1])
1166 dfh.flush()
1188 dfh.flush()
1167 ifh.write(entry)
1189 ifh.write(entry)
1168 else:
1190 else:
1169 offset += curr * self._io.size
1191 offset += curr * self._io.size
1170 transaction.add(self.indexfile, offset, curr)
1192 transaction.add(self.indexfile, offset, curr)
1171 ifh.write(entry)
1193 ifh.write(entry)
1172 ifh.write(data[0])
1194 ifh.write(data[0])
1173 ifh.write(data[1])
1195 ifh.write(data[1])
1174 self.checkinlinesize(transaction, ifh)
1196 self.checkinlinesize(transaction, ifh)
1175
1197
1176 if type(text) == str: # only accept immutable objects
1198 if type(text) == str: # only accept immutable objects
1177 self._cache = (node, curr, text)
1199 self._cache = (node, curr, text)
1178 return node
1200 return node
1179
1201
1180 def group(self, nodelist, lookup, infocollect=None):
1202 def group(self, nodelist, lookup, infocollect=None):
1181 """Calculate a delta group, yielding a sequence of changegroup chunks
1203 """Calculate a delta group, yielding a sequence of changegroup chunks
1182 (strings).
1204 (strings).
1183
1205
1184 Given a list of changeset revs, return a set of deltas and
1206 Given a list of changeset revs, return a set of deltas and
1185 metadata corresponding to nodes. the first delta is
1207 metadata corresponding to nodes. the first delta is
1186 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1208 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1187 have this parent as it has all history before these
1209 have this parent as it has all history before these
1188 changesets. parent is parent[0]
1210 changesets. parent is parent[0]
1189 """
1211 """
1190
1212
1191 revs = [self.rev(n) for n in nodelist]
1213 revs = [self.rev(n) for n in nodelist]
1192
1214
1193 # if we don't have any revisions touched by these changesets, bail
1215 # if we don't have any revisions touched by these changesets, bail
1194 if not revs:
1216 if not revs:
1195 yield changegroup.closechunk()
1217 yield changegroup.closechunk()
1196 return
1218 return
1197
1219
1198 # add the parent of the first rev
1220 # add the parent of the first rev
1199 p = self.parentrevs(revs[0])[0]
1221 p = self.parentrevs(revs[0])[0]
1200 revs.insert(0, p)
1222 revs.insert(0, p)
1201
1223
1202 # build deltas
1224 # build deltas
1203 for d in xrange(len(revs) - 1):
1225 for d in xrange(len(revs) - 1):
1204 a, b = revs[d], revs[d + 1]
1226 a, b = revs[d], revs[d + 1]
1205 nb = self.node(b)
1227 nb = self.node(b)
1206
1228
1207 if infocollect is not None:
1229 if infocollect is not None:
1208 infocollect(nb)
1230 infocollect(nb)
1209
1231
1210 p = self.parents(nb)
1232 p = self.parents(nb)
1211 meta = nb + p[0] + p[1] + lookup(nb)
1233 meta = nb + p[0] + p[1] + lookup(nb)
1212 if a == -1:
1234 if a == -1:
1213 d = self.revision(nb)
1235 d = self.revision(nb)
1214 meta += mdiff.trivialdiffheader(len(d))
1236 meta += mdiff.trivialdiffheader(len(d))
1215 else:
1237 else:
1216 d = self.revdiff(a, b)
1238 d = self.revdiff(a, b)
1217 yield changegroup.chunkheader(len(meta) + len(d))
1239 yield changegroup.chunkheader(len(meta) + len(d))
1218 yield meta
1240 yield meta
1219 yield d
1241 yield d
1220
1242
1221 yield changegroup.closechunk()
1243 yield changegroup.closechunk()
1222
1244
1223 def addgroup(self, revs, linkmapper, transaction):
1245 def addgroup(self, revs, linkmapper, transaction):
1224 """
1246 """
1225 add a delta group
1247 add a delta group
1226
1248
1227 given a set of deltas, add them to the revision log. the
1249 given a set of deltas, add them to the revision log. the
1228 first delta is against its parent, which should be in our
1250 first delta is against its parent, which should be in our
1229 log, the rest are against the previous delta.
1251 log, the rest are against the previous delta.
1230 """
1252 """
1231
1253
1232 #track the base of the current delta log
1254 #track the base of the current delta log
1233 r = len(self)
1255 r = len(self)
1234 t = r - 1
1256 t = r - 1
1235 node = None
1257 node = None
1236
1258
1237 base = prev = nullrev
1259 base = prev = nullrev
1238 start = end = textlen = 0
1260 start = end = textlen = 0
1239 if r:
1261 if r:
1240 end = self.end(t)
1262 end = self.end(t)
1241
1263
1242 ifh = self.opener(self.indexfile, "a+")
1264 ifh = self.opener(self.indexfile, "a+")
1243 isize = r * self._io.size
1265 isize = r * self._io.size
1244 if self._inline:
1266 if self._inline:
1245 transaction.add(self.indexfile, end + isize, r)
1267 transaction.add(self.indexfile, end + isize, r)
1246 dfh = None
1268 dfh = None
1247 else:
1269 else:
1248 transaction.add(self.indexfile, isize, r)
1270 transaction.add(self.indexfile, isize, r)
1249 transaction.add(self.datafile, end)
1271 transaction.add(self.datafile, end)
1250 dfh = self.opener(self.datafile, "a")
1272 dfh = self.opener(self.datafile, "a")
1251
1273
1252 try:
1274 try:
1253 # loop through our set of deltas
1275 # loop through our set of deltas
1254 chain = None
1276 chain = None
1255 for chunk in revs:
1277 for chunk in revs:
1256 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1278 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1257 link = linkmapper(cs)
1279 link = linkmapper(cs)
1258 if node in self.nodemap:
1280 if node in self.nodemap:
1259 # this can happen if two branches make the same change
1281 # this can happen if two branches make the same change
1260 chain = node
1282 chain = node
1261 continue
1283 continue
1262 delta = buffer(chunk, 80)
1284 delta = buffer(chunk, 80)
1263 del chunk
1285 del chunk
1264
1286
1265 for p in (p1, p2):
1287 for p in (p1, p2):
1266 if not p in self.nodemap:
1288 if not p in self.nodemap:
1267 raise LookupError(p, self.indexfile, _('unknown parent'))
1289 raise LookupError(p, self.indexfile, _('unknown parent'))
1268
1290
1269 if not chain:
1291 if not chain:
1270 # retrieve the parent revision of the delta chain
1292 # retrieve the parent revision of the delta chain
1271 chain = p1
1293 chain = p1
1272 if not chain in self.nodemap:
1294 if not chain in self.nodemap:
1273 raise LookupError(chain, self.indexfile, _('unknown base'))
1295 raise LookupError(chain, self.indexfile, _('unknown base'))
1274
1296
1275 # full versions are inserted when the needed deltas become
1297 # full versions are inserted when the needed deltas become
1276 # comparable to the uncompressed text or when the previous
1298 # comparable to the uncompressed text or when the previous
1277 # version is not the one we have a delta against. We use
1299 # version is not the one we have a delta against. We use
1278 # the size of the previous full rev as a proxy for the
1300 # the size of the previous full rev as a proxy for the
1279 # current size.
1301 # current size.
1280
1302
1281 if chain == prev:
1303 if chain == prev:
1282 cdelta = compress(delta)
1304 cdelta = compress(delta)
1283 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1305 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1284 textlen = mdiff.patchedsize(textlen, delta)
1306 textlen = mdiff.patchedsize(textlen, delta)
1285
1307
1286 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1308 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1287 # flush our writes here so we can read it in revision
1309 # flush our writes here so we can read it in revision
1288 if dfh:
1310 if dfh:
1289 dfh.flush()
1311 dfh.flush()
1290 ifh.flush()
1312 ifh.flush()
1291 text = self.revision(chain)
1313 text = self.revision(chain)
1292 if len(text) == 0:
1314 if len(text) == 0:
1293 # skip over trivial delta header
1315 # skip over trivial delta header
1294 text = buffer(delta, 12)
1316 text = buffer(delta, 12)
1295 else:
1317 else:
1296 text = mdiff.patches(text, [delta])
1318 text = mdiff.patches(text, [delta])
1297 del delta
1319 del delta
1298 chk = self._addrevision(text, transaction, link, p1, p2, None,
1320 chk = self._addrevision(text, transaction, link, p1, p2, None,
1299 ifh, dfh)
1321 ifh, dfh)
1300 if not dfh and not self._inline:
1322 if not dfh and not self._inline:
1301 # addrevision switched from inline to conventional
1323 # addrevision switched from inline to conventional
1302 # reopen the index
1324 # reopen the index
1303 dfh = self.opener(self.datafile, "a")
1325 dfh = self.opener(self.datafile, "a")
1304 ifh = self.opener(self.indexfile, "a")
1326 ifh = self.opener(self.indexfile, "a")
1305 if chk != node:
1327 if chk != node:
1306 raise RevlogError(_("consistency error adding group"))
1328 raise RevlogError(_("consistency error adding group"))
1307 textlen = len(text)
1329 textlen = len(text)
1308 else:
1330 else:
1309 e = (offset_type(end, 0), cdeltalen, textlen, base,
1331 e = (offset_type(end, 0), cdeltalen, textlen, base,
1310 link, self.rev(p1), self.rev(p2), node)
1332 link, self.rev(p1), self.rev(p2), node)
1311 self.index.insert(-1, e)
1333 self.index.insert(-1, e)
1312 self.nodemap[node] = r
1334 self.nodemap[node] = r
1313 entry = self._io.packentry(e, self.node, self.version, r)
1335 entry = self._io.packentry(e, self.node, self.version, r)
1314 if self._inline:
1336 if self._inline:
1315 ifh.write(entry)
1337 ifh.write(entry)
1316 ifh.write(cdelta[0])
1338 ifh.write(cdelta[0])
1317 ifh.write(cdelta[1])
1339 ifh.write(cdelta[1])
1318 self.checkinlinesize(transaction, ifh)
1340 self.checkinlinesize(transaction, ifh)
1319 if not self._inline:
1341 if not self._inline:
1320 dfh = self.opener(self.datafile, "a")
1342 dfh = self.opener(self.datafile, "a")
1321 ifh = self.opener(self.indexfile, "a")
1343 ifh = self.opener(self.indexfile, "a")
1322 else:
1344 else:
1323 dfh.write(cdelta[0])
1345 dfh.write(cdelta[0])
1324 dfh.write(cdelta[1])
1346 dfh.write(cdelta[1])
1325 ifh.write(entry)
1347 ifh.write(entry)
1326
1348
1327 t, r, chain, prev = r, r + 1, node, node
1349 t, r, chain, prev = r, r + 1, node, node
1328 base = self.base(t)
1350 base = self.base(t)
1329 start = self.start(base)
1351 start = self.start(base)
1330 end = self.end(t)
1352 end = self.end(t)
1331 finally:
1353 finally:
1332 if dfh:
1354 if dfh:
1333 dfh.close()
1355 dfh.close()
1334 ifh.close()
1356 ifh.close()
1335
1357
1336 return node
1358 return node
1337
1359
1338 def strip(self, minlink, transaction):
1360 def strip(self, minlink, transaction):
1339 """truncate the revlog on the first revision with a linkrev >= minlink
1361 """truncate the revlog on the first revision with a linkrev >= minlink
1340
1362
1341 This function is called when we're stripping revision minlink and
1363 This function is called when we're stripping revision minlink and
1342 its descendants from the repository.
1364 its descendants from the repository.
1343
1365
1344 We have to remove all revisions with linkrev >= minlink, because
1366 We have to remove all revisions with linkrev >= minlink, because
1345 the equivalent changelog revisions will be renumbered after the
1367 the equivalent changelog revisions will be renumbered after the
1346 strip.
1368 strip.
1347
1369
1348 So we truncate the revlog on the first of these revisions, and
1370 So we truncate the revlog on the first of these revisions, and
1349 trust that the caller has saved the revisions that shouldn't be
1371 trust that the caller has saved the revisions that shouldn't be
1350 removed and that it'll readd them after this truncation.
1372 removed and that it'll readd them after this truncation.
1351 """
1373 """
1352 if len(self) == 0:
1374 if len(self) == 0:
1353 return
1375 return
1354
1376
1355 if isinstance(self.index, lazyindex):
1377 if isinstance(self.index, lazyindex):
1356 self._loadindexmap()
1378 self._loadindexmap()
1357
1379
1358 for rev in self:
1380 for rev in self:
1359 if self.index[rev][4] >= minlink:
1381 if self.index[rev][4] >= minlink:
1360 break
1382 break
1361 else:
1383 else:
1362 return
1384 return
1363
1385
1364 # first truncate the files on disk
1386 # first truncate the files on disk
1365 end = self.start(rev)
1387 end = self.start(rev)
1366 if not self._inline:
1388 if not self._inline:
1367 transaction.add(self.datafile, end)
1389 transaction.add(self.datafile, end)
1368 end = rev * self._io.size
1390 end = rev * self._io.size
1369 else:
1391 else:
1370 end += rev * self._io.size
1392 end += rev * self._io.size
1371
1393
1372 transaction.add(self.indexfile, end)
1394 transaction.add(self.indexfile, end)
1373
1395
1374 # then reset internal state in memory to forget those revisions
1396 # then reset internal state in memory to forget those revisions
1375 self._cache = None
1397 self._cache = None
1376 self._chunkclear()
1398 self._chunkclear()
1377 for x in xrange(rev, len(self)):
1399 for x in xrange(rev, len(self)):
1378 del self.nodemap[self.node(x)]
1400 del self.nodemap[self.node(x)]
1379
1401
1380 del self.index[rev:-1]
1402 del self.index[rev:-1]
1381
1403
1382 def checksize(self):
1404 def checksize(self):
1383 expected = 0
1405 expected = 0
1384 if len(self):
1406 if len(self):
1385 expected = max(0, self.end(len(self) - 1))
1407 expected = max(0, self.end(len(self) - 1))
1386
1408
1387 try:
1409 try:
1388 f = self.opener(self.datafile)
1410 f = self.opener(self.datafile)
1389 f.seek(0, 2)
1411 f.seek(0, 2)
1390 actual = f.tell()
1412 actual = f.tell()
1391 dd = actual - expected
1413 dd = actual - expected
1392 except IOError, inst:
1414 except IOError, inst:
1393 if inst.errno != errno.ENOENT:
1415 if inst.errno != errno.ENOENT:
1394 raise
1416 raise
1395 dd = 0
1417 dd = 0
1396
1418
1397 try:
1419 try:
1398 f = self.opener(self.indexfile)
1420 f = self.opener(self.indexfile)
1399 f.seek(0, 2)
1421 f.seek(0, 2)
1400 actual = f.tell()
1422 actual = f.tell()
1401 s = self._io.size
1423 s = self._io.size
1402 i = max(0, actual // s)
1424 i = max(0, actual // s)
1403 di = actual - (i * s)
1425 di = actual - (i * s)
1404 if self._inline:
1426 if self._inline:
1405 databytes = 0
1427 databytes = 0
1406 for r in self:
1428 for r in self:
1407 databytes += max(0, self.length(r))
1429 databytes += max(0, self.length(r))
1408 dd = 0
1430 dd = 0
1409 di = actual - len(self) * s - databytes
1431 di = actual - len(self) * s - databytes
1410 except IOError, inst:
1432 except IOError, inst:
1411 if inst.errno != errno.ENOENT:
1433 if inst.errno != errno.ENOENT:
1412 raise
1434 raise
1413 di = 0
1435 di = 0
1414
1436
1415 return (dd, di)
1437 return (dd, di)
1416
1438
1417 def files(self):
1439 def files(self):
1418 res = [self.indexfile]
1440 res = [self.indexfile]
1419 if not self._inline:
1441 if not self._inline:
1420 res.append(self.datafile)
1442 res.append(self.datafile)
1421 return res
1443 return res
General Comments 0
You need to be logged in to leave comments. Login now