##// END OF EJS Templates
revlog: add flags argument to _addrevision, update callers use default flags...
Mike Edgar -
r23856:062c3ad8 default
parent child Browse files
Show More
@@ -1,1537 +1,1537 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 # import stuff from node for others to import from revlog
14 # import stuff from node for others to import from revlog
15 from node import bin, hex, nullid, nullrev
15 from node import bin, hex, nullid, nullrev
16 from i18n import _
16 from i18n import _
17 import ancestor, mdiff, parsers, error, util, templatefilters
17 import ancestor, mdiff, parsers, error, util, templatefilters
18 import struct, zlib, errno
18 import struct, zlib, errno
19
19
20 _pack = struct.pack
20 _pack = struct.pack
21 _unpack = struct.unpack
21 _unpack = struct.unpack
22 _compress = zlib.compress
22 _compress = zlib.compress
23 _decompress = zlib.decompress
23 _decompress = zlib.decompress
24 _sha = util.sha1
24 _sha = util.sha1
25
25
26 # revlog header flags
26 # revlog header flags
27 REVLOGV0 = 0
27 REVLOGV0 = 0
28 REVLOGNG = 1
28 REVLOGNG = 1
29 REVLOGNGINLINEDATA = (1 << 16)
29 REVLOGNGINLINEDATA = (1 << 16)
30 REVLOGGENERALDELTA = (1 << 17)
30 REVLOGGENERALDELTA = (1 << 17)
31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
32 REVLOG_DEFAULT_FORMAT = REVLOGNG
32 REVLOG_DEFAULT_FORMAT = REVLOGNG
33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
35
35
36 # revlog index flags
36 # revlog index flags
37 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
37 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
38 REVIDX_DEFAULT_FLAGS = 0
38 REVIDX_DEFAULT_FLAGS = 0
39 REVIDX_KNOWN_FLAGS = REVIDX_ISCENSORED
39 REVIDX_KNOWN_FLAGS = REVIDX_ISCENSORED
40
40
41 # max size of revlog with inline data
41 # max size of revlog with inline data
42 _maxinline = 131072
42 _maxinline = 131072
43 _chunksize = 1048576
43 _chunksize = 1048576
44
44
45 RevlogError = error.RevlogError
45 RevlogError = error.RevlogError
46 LookupError = error.LookupError
46 LookupError = error.LookupError
47 CensoredNodeError = error.CensoredNodeError
47 CensoredNodeError = error.CensoredNodeError
48
48
49 def getoffset(q):
49 def getoffset(q):
50 return int(q >> 16)
50 return int(q >> 16)
51
51
52 def gettype(q):
52 def gettype(q):
53 return int(q & 0xFFFF)
53 return int(q & 0xFFFF)
54
54
55 def offset_type(offset, type):
55 def offset_type(offset, type):
56 return long(long(offset) << 16 | type)
56 return long(long(offset) << 16 | type)
57
57
58 _nullhash = _sha(nullid)
58 _nullhash = _sha(nullid)
59
59
60 def hash(text, p1, p2):
60 def hash(text, p1, p2):
61 """generate a hash from the given text and its parent hashes
61 """generate a hash from the given text and its parent hashes
62
62
63 This hash combines both the current file contents and its history
63 This hash combines both the current file contents and its history
64 in a manner that makes it easy to distinguish nodes with the same
64 in a manner that makes it easy to distinguish nodes with the same
65 content in the revision graph.
65 content in the revision graph.
66 """
66 """
67 # As of now, if one of the parent node is null, p2 is null
67 # As of now, if one of the parent node is null, p2 is null
68 if p2 == nullid:
68 if p2 == nullid:
69 # deep copy of a hash is faster than creating one
69 # deep copy of a hash is faster than creating one
70 s = _nullhash.copy()
70 s = _nullhash.copy()
71 s.update(p1)
71 s.update(p1)
72 else:
72 else:
73 # none of the parent nodes are nullid
73 # none of the parent nodes are nullid
74 l = [p1, p2]
74 l = [p1, p2]
75 l.sort()
75 l.sort()
76 s = _sha(l[0])
76 s = _sha(l[0])
77 s.update(l[1])
77 s.update(l[1])
78 s.update(text)
78 s.update(text)
79 return s.digest()
79 return s.digest()
80
80
81 def decompress(bin):
81 def decompress(bin):
82 """ decompress the given input """
82 """ decompress the given input """
83 if not bin:
83 if not bin:
84 return bin
84 return bin
85 t = bin[0]
85 t = bin[0]
86 if t == '\0':
86 if t == '\0':
87 return bin
87 return bin
88 if t == 'x':
88 if t == 'x':
89 try:
89 try:
90 return _decompress(bin)
90 return _decompress(bin)
91 except zlib.error, e:
91 except zlib.error, e:
92 raise RevlogError(_("revlog decompress error: %s") % str(e))
92 raise RevlogError(_("revlog decompress error: %s") % str(e))
93 if t == 'u':
93 if t == 'u':
94 return bin[1:]
94 return bin[1:]
95 raise RevlogError(_("unknown compression type %r") % t)
95 raise RevlogError(_("unknown compression type %r") % t)
96
96
97 # index v0:
97 # index v0:
98 # 4 bytes: offset
98 # 4 bytes: offset
99 # 4 bytes: compressed length
99 # 4 bytes: compressed length
100 # 4 bytes: base rev
100 # 4 bytes: base rev
101 # 4 bytes: link rev
101 # 4 bytes: link rev
102 # 32 bytes: parent 1 nodeid
102 # 32 bytes: parent 1 nodeid
103 # 32 bytes: parent 2 nodeid
103 # 32 bytes: parent 2 nodeid
104 # 32 bytes: nodeid
104 # 32 bytes: nodeid
105 indexformatv0 = ">4l20s20s20s"
105 indexformatv0 = ">4l20s20s20s"
106 v0shaoffset = 56
106 v0shaoffset = 56
107
107
108 class revlogoldio(object):
108 class revlogoldio(object):
109 def __init__(self):
109 def __init__(self):
110 self.size = struct.calcsize(indexformatv0)
110 self.size = struct.calcsize(indexformatv0)
111
111
112 def parseindex(self, data, inline):
112 def parseindex(self, data, inline):
113 s = self.size
113 s = self.size
114 index = []
114 index = []
115 nodemap = {nullid: nullrev}
115 nodemap = {nullid: nullrev}
116 n = off = 0
116 n = off = 0
117 l = len(data)
117 l = len(data)
118 while off + s <= l:
118 while off + s <= l:
119 cur = data[off:off + s]
119 cur = data[off:off + s]
120 off += s
120 off += s
121 e = _unpack(indexformatv0, cur)
121 e = _unpack(indexformatv0, cur)
122 # transform to revlogv1 format
122 # transform to revlogv1 format
123 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
123 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
124 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
124 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
125 index.append(e2)
125 index.append(e2)
126 nodemap[e[6]] = n
126 nodemap[e[6]] = n
127 n += 1
127 n += 1
128
128
129 # add the magic null revision at -1
129 # add the magic null revision at -1
130 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
130 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
131
131
132 return index, nodemap, None
132 return index, nodemap, None
133
133
134 def packentry(self, entry, node, version, rev):
134 def packentry(self, entry, node, version, rev):
135 if gettype(entry[0]):
135 if gettype(entry[0]):
136 raise RevlogError(_("index entry flags need RevlogNG"))
136 raise RevlogError(_("index entry flags need RevlogNG"))
137 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
137 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
138 node(entry[5]), node(entry[6]), entry[7])
138 node(entry[5]), node(entry[6]), entry[7])
139 return _pack(indexformatv0, *e2)
139 return _pack(indexformatv0, *e2)
140
140
141 # index ng:
141 # index ng:
142 # 6 bytes: offset
142 # 6 bytes: offset
143 # 2 bytes: flags
143 # 2 bytes: flags
144 # 4 bytes: compressed length
144 # 4 bytes: compressed length
145 # 4 bytes: uncompressed length
145 # 4 bytes: uncompressed length
146 # 4 bytes: base rev
146 # 4 bytes: base rev
147 # 4 bytes: link rev
147 # 4 bytes: link rev
148 # 4 bytes: parent 1 rev
148 # 4 bytes: parent 1 rev
149 # 4 bytes: parent 2 rev
149 # 4 bytes: parent 2 rev
150 # 32 bytes: nodeid
150 # 32 bytes: nodeid
151 indexformatng = ">Qiiiiii20s12x"
151 indexformatng = ">Qiiiiii20s12x"
152 ngshaoffset = 32
152 ngshaoffset = 32
153 versionformat = ">I"
153 versionformat = ">I"
154
154
155 class revlogio(object):
155 class revlogio(object):
156 def __init__(self):
156 def __init__(self):
157 self.size = struct.calcsize(indexformatng)
157 self.size = struct.calcsize(indexformatng)
158
158
159 def parseindex(self, data, inline):
159 def parseindex(self, data, inline):
160 # call the C implementation to parse the index data
160 # call the C implementation to parse the index data
161 index, cache = parsers.parse_index2(data, inline)
161 index, cache = parsers.parse_index2(data, inline)
162 return index, getattr(index, 'nodemap', None), cache
162 return index, getattr(index, 'nodemap', None), cache
163
163
164 def packentry(self, entry, node, version, rev):
164 def packentry(self, entry, node, version, rev):
165 p = _pack(indexformatng, *entry)
165 p = _pack(indexformatng, *entry)
166 if rev == 0:
166 if rev == 0:
167 p = _pack(versionformat, version) + p[4:]
167 p = _pack(versionformat, version) + p[4:]
168 return p
168 return p
169
169
170 class revlog(object):
170 class revlog(object):
171 """
171 """
172 the underlying revision storage object
172 the underlying revision storage object
173
173
174 A revlog consists of two parts, an index and the revision data.
174 A revlog consists of two parts, an index and the revision data.
175
175
176 The index is a file with a fixed record size containing
176 The index is a file with a fixed record size containing
177 information on each revision, including its nodeid (hash), the
177 information on each revision, including its nodeid (hash), the
178 nodeids of its parents, the position and offset of its data within
178 nodeids of its parents, the position and offset of its data within
179 the data file, and the revision it's based on. Finally, each entry
179 the data file, and the revision it's based on. Finally, each entry
180 contains a linkrev entry that can serve as a pointer to external
180 contains a linkrev entry that can serve as a pointer to external
181 data.
181 data.
182
182
183 The revision data itself is a linear collection of data chunks.
183 The revision data itself is a linear collection of data chunks.
184 Each chunk represents a revision and is usually represented as a
184 Each chunk represents a revision and is usually represented as a
185 delta against the previous chunk. To bound lookup time, runs of
185 delta against the previous chunk. To bound lookup time, runs of
186 deltas are limited to about 2 times the length of the original
186 deltas are limited to about 2 times the length of the original
187 version data. This makes retrieval of a version proportional to
187 version data. This makes retrieval of a version proportional to
188 its size, or O(1) relative to the number of revisions.
188 its size, or O(1) relative to the number of revisions.
189
189
190 Both pieces of the revlog are written to in an append-only
190 Both pieces of the revlog are written to in an append-only
191 fashion, which means we never need to rewrite a file to insert or
191 fashion, which means we never need to rewrite a file to insert or
192 remove data, and can use some simple techniques to avoid the need
192 remove data, and can use some simple techniques to avoid the need
193 for locking while reading.
193 for locking while reading.
194 """
194 """
195 def __init__(self, opener, indexfile):
195 def __init__(self, opener, indexfile):
196 """
196 """
197 create a revlog object
197 create a revlog object
198
198
199 opener is a function that abstracts the file opening operation
199 opener is a function that abstracts the file opening operation
200 and can be used to implement COW semantics or the like.
200 and can be used to implement COW semantics or the like.
201 """
201 """
202 self.indexfile = indexfile
202 self.indexfile = indexfile
203 self.datafile = indexfile[:-2] + ".d"
203 self.datafile = indexfile[:-2] + ".d"
204 self.opener = opener
204 self.opener = opener
205 self._cache = None
205 self._cache = None
206 self._basecache = None
206 self._basecache = None
207 self._chunkcache = (0, '')
207 self._chunkcache = (0, '')
208 self._chunkcachesize = 65536
208 self._chunkcachesize = 65536
209 self._maxchainlen = None
209 self._maxchainlen = None
210 self.index = []
210 self.index = []
211 self._pcache = {}
211 self._pcache = {}
212 self._nodecache = {nullid: nullrev}
212 self._nodecache = {nullid: nullrev}
213 self._nodepos = None
213 self._nodepos = None
214
214
215 v = REVLOG_DEFAULT_VERSION
215 v = REVLOG_DEFAULT_VERSION
216 opts = getattr(opener, 'options', None)
216 opts = getattr(opener, 'options', None)
217 if opts is not None:
217 if opts is not None:
218 if 'revlogv1' in opts:
218 if 'revlogv1' in opts:
219 if 'generaldelta' in opts:
219 if 'generaldelta' in opts:
220 v |= REVLOGGENERALDELTA
220 v |= REVLOGGENERALDELTA
221 else:
221 else:
222 v = 0
222 v = 0
223 if 'chunkcachesize' in opts:
223 if 'chunkcachesize' in opts:
224 self._chunkcachesize = opts['chunkcachesize']
224 self._chunkcachesize = opts['chunkcachesize']
225 if 'maxchainlen' in opts:
225 if 'maxchainlen' in opts:
226 self._maxchainlen = opts['maxchainlen']
226 self._maxchainlen = opts['maxchainlen']
227
227
228 if self._chunkcachesize <= 0:
228 if self._chunkcachesize <= 0:
229 raise RevlogError(_('revlog chunk cache size %r is not greater '
229 raise RevlogError(_('revlog chunk cache size %r is not greater '
230 'than 0') % self._chunkcachesize)
230 'than 0') % self._chunkcachesize)
231 elif self._chunkcachesize & (self._chunkcachesize - 1):
231 elif self._chunkcachesize & (self._chunkcachesize - 1):
232 raise RevlogError(_('revlog chunk cache size %r is not a power '
232 raise RevlogError(_('revlog chunk cache size %r is not a power '
233 'of 2') % self._chunkcachesize)
233 'of 2') % self._chunkcachesize)
234
234
235 i = ''
235 i = ''
236 self._initempty = True
236 self._initempty = True
237 try:
237 try:
238 f = self.opener(self.indexfile)
238 f = self.opener(self.indexfile)
239 i = f.read()
239 i = f.read()
240 f.close()
240 f.close()
241 if len(i) > 0:
241 if len(i) > 0:
242 v = struct.unpack(versionformat, i[:4])[0]
242 v = struct.unpack(versionformat, i[:4])[0]
243 self._initempty = False
243 self._initempty = False
244 except IOError, inst:
244 except IOError, inst:
245 if inst.errno != errno.ENOENT:
245 if inst.errno != errno.ENOENT:
246 raise
246 raise
247
247
248 self.version = v
248 self.version = v
249 self._inline = v & REVLOGNGINLINEDATA
249 self._inline = v & REVLOGNGINLINEDATA
250 self._generaldelta = v & REVLOGGENERALDELTA
250 self._generaldelta = v & REVLOGGENERALDELTA
251 flags = v & ~0xFFFF
251 flags = v & ~0xFFFF
252 fmt = v & 0xFFFF
252 fmt = v & 0xFFFF
253 if fmt == REVLOGV0 and flags:
253 if fmt == REVLOGV0 and flags:
254 raise RevlogError(_("index %s unknown flags %#04x for format v0")
254 raise RevlogError(_("index %s unknown flags %#04x for format v0")
255 % (self.indexfile, flags >> 16))
255 % (self.indexfile, flags >> 16))
256 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
256 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
257 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
257 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
258 % (self.indexfile, flags >> 16))
258 % (self.indexfile, flags >> 16))
259 elif fmt > REVLOGNG:
259 elif fmt > REVLOGNG:
260 raise RevlogError(_("index %s unknown format %d")
260 raise RevlogError(_("index %s unknown format %d")
261 % (self.indexfile, fmt))
261 % (self.indexfile, fmt))
262
262
263 self._io = revlogio()
263 self._io = revlogio()
264 if self.version == REVLOGV0:
264 if self.version == REVLOGV0:
265 self._io = revlogoldio()
265 self._io = revlogoldio()
266 try:
266 try:
267 d = self._io.parseindex(i, self._inline)
267 d = self._io.parseindex(i, self._inline)
268 except (ValueError, IndexError):
268 except (ValueError, IndexError):
269 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
269 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
270 self.index, nodemap, self._chunkcache = d
270 self.index, nodemap, self._chunkcache = d
271 if nodemap is not None:
271 if nodemap is not None:
272 self.nodemap = self._nodecache = nodemap
272 self.nodemap = self._nodecache = nodemap
273 if not self._chunkcache:
273 if not self._chunkcache:
274 self._chunkclear()
274 self._chunkclear()
275 # revnum -> (chain-length, sum-delta-length)
275 # revnum -> (chain-length, sum-delta-length)
276 self._chaininfocache = {}
276 self._chaininfocache = {}
277
277
278 def tip(self):
278 def tip(self):
279 return self.node(len(self.index) - 2)
279 return self.node(len(self.index) - 2)
280 def __len__(self):
280 def __len__(self):
281 return len(self.index) - 1
281 return len(self.index) - 1
282 def __iter__(self):
282 def __iter__(self):
283 return iter(xrange(len(self)))
283 return iter(xrange(len(self)))
284 def revs(self, start=0, stop=None):
284 def revs(self, start=0, stop=None):
285 """iterate over all rev in this revlog (from start to stop)"""
285 """iterate over all rev in this revlog (from start to stop)"""
286 step = 1
286 step = 1
287 if stop is not None:
287 if stop is not None:
288 if start > stop:
288 if start > stop:
289 step = -1
289 step = -1
290 stop += step
290 stop += step
291 else:
291 else:
292 stop = len(self)
292 stop = len(self)
293 return xrange(start, stop, step)
293 return xrange(start, stop, step)
294
294
295 @util.propertycache
295 @util.propertycache
296 def nodemap(self):
296 def nodemap(self):
297 self.rev(self.node(0))
297 self.rev(self.node(0))
298 return self._nodecache
298 return self._nodecache
299
299
300 def hasnode(self, node):
300 def hasnode(self, node):
301 try:
301 try:
302 self.rev(node)
302 self.rev(node)
303 return True
303 return True
304 except KeyError:
304 except KeyError:
305 return False
305 return False
306
306
307 def clearcaches(self):
307 def clearcaches(self):
308 try:
308 try:
309 self._nodecache.clearcaches()
309 self._nodecache.clearcaches()
310 except AttributeError:
310 except AttributeError:
311 self._nodecache = {nullid: nullrev}
311 self._nodecache = {nullid: nullrev}
312 self._nodepos = None
312 self._nodepos = None
313
313
314 def rev(self, node):
314 def rev(self, node):
315 try:
315 try:
316 return self._nodecache[node]
316 return self._nodecache[node]
317 except TypeError:
317 except TypeError:
318 raise
318 raise
319 except RevlogError:
319 except RevlogError:
320 # parsers.c radix tree lookup failed
320 # parsers.c radix tree lookup failed
321 raise LookupError(node, self.indexfile, _('no node'))
321 raise LookupError(node, self.indexfile, _('no node'))
322 except KeyError:
322 except KeyError:
323 # pure python cache lookup failed
323 # pure python cache lookup failed
324 n = self._nodecache
324 n = self._nodecache
325 i = self.index
325 i = self.index
326 p = self._nodepos
326 p = self._nodepos
327 if p is None:
327 if p is None:
328 p = len(i) - 2
328 p = len(i) - 2
329 for r in xrange(p, -1, -1):
329 for r in xrange(p, -1, -1):
330 v = i[r][7]
330 v = i[r][7]
331 n[v] = r
331 n[v] = r
332 if v == node:
332 if v == node:
333 self._nodepos = r - 1
333 self._nodepos = r - 1
334 return r
334 return r
335 raise LookupError(node, self.indexfile, _('no node'))
335 raise LookupError(node, self.indexfile, _('no node'))
336
336
337 def node(self, rev):
337 def node(self, rev):
338 return self.index[rev][7]
338 return self.index[rev][7]
339 def linkrev(self, rev):
339 def linkrev(self, rev):
340 return self.index[rev][4]
340 return self.index[rev][4]
341 def parents(self, node):
341 def parents(self, node):
342 i = self.index
342 i = self.index
343 d = i[self.rev(node)]
343 d = i[self.rev(node)]
344 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
344 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
345 def parentrevs(self, rev):
345 def parentrevs(self, rev):
346 return self.index[rev][5:7]
346 return self.index[rev][5:7]
347 def start(self, rev):
347 def start(self, rev):
348 return int(self.index[rev][0] >> 16)
348 return int(self.index[rev][0] >> 16)
349 def end(self, rev):
349 def end(self, rev):
350 return self.start(rev) + self.length(rev)
350 return self.start(rev) + self.length(rev)
351 def length(self, rev):
351 def length(self, rev):
352 return self.index[rev][1]
352 return self.index[rev][1]
353 def chainbase(self, rev):
353 def chainbase(self, rev):
354 index = self.index
354 index = self.index
355 base = index[rev][3]
355 base = index[rev][3]
356 while base != rev:
356 while base != rev:
357 rev = base
357 rev = base
358 base = index[rev][3]
358 base = index[rev][3]
359 return base
359 return base
360 def chainlen(self, rev):
360 def chainlen(self, rev):
361 return self._chaininfo(rev)[0]
361 return self._chaininfo(rev)[0]
362
362
363 def _chaininfo(self, rev):
363 def _chaininfo(self, rev):
364 chaininfocache = self._chaininfocache
364 chaininfocache = self._chaininfocache
365 if rev in chaininfocache:
365 if rev in chaininfocache:
366 return chaininfocache[rev]
366 return chaininfocache[rev]
367 index = self.index
367 index = self.index
368 generaldelta = self._generaldelta
368 generaldelta = self._generaldelta
369 iterrev = rev
369 iterrev = rev
370 e = index[iterrev]
370 e = index[iterrev]
371 clen = 0
371 clen = 0
372 compresseddeltalen = 0
372 compresseddeltalen = 0
373 while iterrev != e[3]:
373 while iterrev != e[3]:
374 clen += 1
374 clen += 1
375 compresseddeltalen += e[1]
375 compresseddeltalen += e[1]
376 if generaldelta:
376 if generaldelta:
377 iterrev = e[3]
377 iterrev = e[3]
378 else:
378 else:
379 iterrev -= 1
379 iterrev -= 1
380 if iterrev in chaininfocache:
380 if iterrev in chaininfocache:
381 t = chaininfocache[iterrev]
381 t = chaininfocache[iterrev]
382 clen += t[0]
382 clen += t[0]
383 compresseddeltalen += t[1]
383 compresseddeltalen += t[1]
384 break
384 break
385 e = index[iterrev]
385 e = index[iterrev]
386 else:
386 else:
387 # Add text length of base since decompressing that also takes
387 # Add text length of base since decompressing that also takes
388 # work. For cache hits the length is already included.
388 # work. For cache hits the length is already included.
389 compresseddeltalen += e[1]
389 compresseddeltalen += e[1]
390 r = (clen, compresseddeltalen)
390 r = (clen, compresseddeltalen)
391 chaininfocache[rev] = r
391 chaininfocache[rev] = r
392 return r
392 return r
393
393
394 def flags(self, rev):
394 def flags(self, rev):
395 return self.index[rev][0] & 0xFFFF
395 return self.index[rev][0] & 0xFFFF
396 def rawsize(self, rev):
396 def rawsize(self, rev):
397 """return the length of the uncompressed text for a given revision"""
397 """return the length of the uncompressed text for a given revision"""
398 l = self.index[rev][2]
398 l = self.index[rev][2]
399 if l >= 0:
399 if l >= 0:
400 return l
400 return l
401
401
402 t = self.revision(self.node(rev))
402 t = self.revision(self.node(rev))
403 return len(t)
403 return len(t)
404 size = rawsize
404 size = rawsize
405
405
406 def ancestors(self, revs, stoprev=0, inclusive=False):
406 def ancestors(self, revs, stoprev=0, inclusive=False):
407 """Generate the ancestors of 'revs' in reverse topological order.
407 """Generate the ancestors of 'revs' in reverse topological order.
408 Does not generate revs lower than stoprev.
408 Does not generate revs lower than stoprev.
409
409
410 See the documentation for ancestor.lazyancestors for more details."""
410 See the documentation for ancestor.lazyancestors for more details."""
411
411
412 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
412 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
413 inclusive=inclusive)
413 inclusive=inclusive)
414
414
415 def descendants(self, revs):
415 def descendants(self, revs):
416 """Generate the descendants of 'revs' in revision order.
416 """Generate the descendants of 'revs' in revision order.
417
417
418 Yield a sequence of revision numbers starting with a child of
418 Yield a sequence of revision numbers starting with a child of
419 some rev in revs, i.e., each revision is *not* considered a
419 some rev in revs, i.e., each revision is *not* considered a
420 descendant of itself. Results are ordered by revision number (a
420 descendant of itself. Results are ordered by revision number (a
421 topological sort)."""
421 topological sort)."""
422 first = min(revs)
422 first = min(revs)
423 if first == nullrev:
423 if first == nullrev:
424 for i in self:
424 for i in self:
425 yield i
425 yield i
426 return
426 return
427
427
428 seen = set(revs)
428 seen = set(revs)
429 for i in self.revs(start=first + 1):
429 for i in self.revs(start=first + 1):
430 for x in self.parentrevs(i):
430 for x in self.parentrevs(i):
431 if x != nullrev and x in seen:
431 if x != nullrev and x in seen:
432 seen.add(i)
432 seen.add(i)
433 yield i
433 yield i
434 break
434 break
435
435
436 def findcommonmissing(self, common=None, heads=None):
436 def findcommonmissing(self, common=None, heads=None):
437 """Return a tuple of the ancestors of common and the ancestors of heads
437 """Return a tuple of the ancestors of common and the ancestors of heads
438 that are not ancestors of common. In revset terminology, we return the
438 that are not ancestors of common. In revset terminology, we return the
439 tuple:
439 tuple:
440
440
441 ::common, (::heads) - (::common)
441 ::common, (::heads) - (::common)
442
442
443 The list is sorted by revision number, meaning it is
443 The list is sorted by revision number, meaning it is
444 topologically sorted.
444 topologically sorted.
445
445
446 'heads' and 'common' are both lists of node IDs. If heads is
446 'heads' and 'common' are both lists of node IDs. If heads is
447 not supplied, uses all of the revlog's heads. If common is not
447 not supplied, uses all of the revlog's heads. If common is not
448 supplied, uses nullid."""
448 supplied, uses nullid."""
449 if common is None:
449 if common is None:
450 common = [nullid]
450 common = [nullid]
451 if heads is None:
451 if heads is None:
452 heads = self.heads()
452 heads = self.heads()
453
453
454 common = [self.rev(n) for n in common]
454 common = [self.rev(n) for n in common]
455 heads = [self.rev(n) for n in heads]
455 heads = [self.rev(n) for n in heads]
456
456
457 # we want the ancestors, but inclusive
457 # we want the ancestors, but inclusive
458 class lazyset(object):
458 class lazyset(object):
459 def __init__(self, lazyvalues):
459 def __init__(self, lazyvalues):
460 self.addedvalues = set()
460 self.addedvalues = set()
461 self.lazyvalues = lazyvalues
461 self.lazyvalues = lazyvalues
462
462
463 def __contains__(self, value):
463 def __contains__(self, value):
464 return value in self.addedvalues or value in self.lazyvalues
464 return value in self.addedvalues or value in self.lazyvalues
465
465
466 def __iter__(self):
466 def __iter__(self):
467 added = self.addedvalues
467 added = self.addedvalues
468 for r in added:
468 for r in added:
469 yield r
469 yield r
470 for r in self.lazyvalues:
470 for r in self.lazyvalues:
471 if not r in added:
471 if not r in added:
472 yield r
472 yield r
473
473
474 def add(self, value):
474 def add(self, value):
475 self.addedvalues.add(value)
475 self.addedvalues.add(value)
476
476
477 def update(self, values):
477 def update(self, values):
478 self.addedvalues.update(values)
478 self.addedvalues.update(values)
479
479
480 has = lazyset(self.ancestors(common))
480 has = lazyset(self.ancestors(common))
481 has.add(nullrev)
481 has.add(nullrev)
482 has.update(common)
482 has.update(common)
483
483
484 # take all ancestors from heads that aren't in has
484 # take all ancestors from heads that aren't in has
485 missing = set()
485 missing = set()
486 visit = util.deque(r for r in heads if r not in has)
486 visit = util.deque(r for r in heads if r not in has)
487 while visit:
487 while visit:
488 r = visit.popleft()
488 r = visit.popleft()
489 if r in missing:
489 if r in missing:
490 continue
490 continue
491 else:
491 else:
492 missing.add(r)
492 missing.add(r)
493 for p in self.parentrevs(r):
493 for p in self.parentrevs(r):
494 if p not in has:
494 if p not in has:
495 visit.append(p)
495 visit.append(p)
496 missing = list(missing)
496 missing = list(missing)
497 missing.sort()
497 missing.sort()
498 return has, [self.node(r) for r in missing]
498 return has, [self.node(r) for r in missing]
499
499
500 def incrementalmissingrevs(self, common=None):
500 def incrementalmissingrevs(self, common=None):
501 """Return an object that can be used to incrementally compute the
501 """Return an object that can be used to incrementally compute the
502 revision numbers of the ancestors of arbitrary sets that are not
502 revision numbers of the ancestors of arbitrary sets that are not
503 ancestors of common. This is an ancestor.incrementalmissingancestors
503 ancestors of common. This is an ancestor.incrementalmissingancestors
504 object.
504 object.
505
505
506 'common' is a list of revision numbers. If common is not supplied, uses
506 'common' is a list of revision numbers. If common is not supplied, uses
507 nullrev.
507 nullrev.
508 """
508 """
509 if common is None:
509 if common is None:
510 common = [nullrev]
510 common = [nullrev]
511
511
512 return ancestor.incrementalmissingancestors(self.parentrevs, common)
512 return ancestor.incrementalmissingancestors(self.parentrevs, common)
513
513
514 def findmissingrevs(self, common=None, heads=None):
514 def findmissingrevs(self, common=None, heads=None):
515 """Return the revision numbers of the ancestors of heads that
515 """Return the revision numbers of the ancestors of heads that
516 are not ancestors of common.
516 are not ancestors of common.
517
517
518 More specifically, return a list of revision numbers corresponding to
518 More specifically, return a list of revision numbers corresponding to
519 nodes N such that every N satisfies the following constraints:
519 nodes N such that every N satisfies the following constraints:
520
520
521 1. N is an ancestor of some node in 'heads'
521 1. N is an ancestor of some node in 'heads'
522 2. N is not an ancestor of any node in 'common'
522 2. N is not an ancestor of any node in 'common'
523
523
524 The list is sorted by revision number, meaning it is
524 The list is sorted by revision number, meaning it is
525 topologically sorted.
525 topologically sorted.
526
526
527 'heads' and 'common' are both lists of revision numbers. If heads is
527 'heads' and 'common' are both lists of revision numbers. If heads is
528 not supplied, uses all of the revlog's heads. If common is not
528 not supplied, uses all of the revlog's heads. If common is not
529 supplied, uses nullid."""
529 supplied, uses nullid."""
530 if common is None:
530 if common is None:
531 common = [nullrev]
531 common = [nullrev]
532 if heads is None:
532 if heads is None:
533 heads = self.headrevs()
533 heads = self.headrevs()
534
534
535 inc = self.incrementalmissingrevs(common=common)
535 inc = self.incrementalmissingrevs(common=common)
536 return inc.missingancestors(heads)
536 return inc.missingancestors(heads)
537
537
538 def findmissing(self, common=None, heads=None):
538 def findmissing(self, common=None, heads=None):
539 """Return the ancestors of heads that are not ancestors of common.
539 """Return the ancestors of heads that are not ancestors of common.
540
540
541 More specifically, return a list of nodes N such that every N
541 More specifically, return a list of nodes N such that every N
542 satisfies the following constraints:
542 satisfies the following constraints:
543
543
544 1. N is an ancestor of some node in 'heads'
544 1. N is an ancestor of some node in 'heads'
545 2. N is not an ancestor of any node in 'common'
545 2. N is not an ancestor of any node in 'common'
546
546
547 The list is sorted by revision number, meaning it is
547 The list is sorted by revision number, meaning it is
548 topologically sorted.
548 topologically sorted.
549
549
550 'heads' and 'common' are both lists of node IDs. If heads is
550 'heads' and 'common' are both lists of node IDs. If heads is
551 not supplied, uses all of the revlog's heads. If common is not
551 not supplied, uses all of the revlog's heads. If common is not
552 supplied, uses nullid."""
552 supplied, uses nullid."""
553 if common is None:
553 if common is None:
554 common = [nullid]
554 common = [nullid]
555 if heads is None:
555 if heads is None:
556 heads = self.heads()
556 heads = self.heads()
557
557
558 common = [self.rev(n) for n in common]
558 common = [self.rev(n) for n in common]
559 heads = [self.rev(n) for n in heads]
559 heads = [self.rev(n) for n in heads]
560
560
561 inc = self.incrementalmissingrevs(common=common)
561 inc = self.incrementalmissingrevs(common=common)
562 return [self.node(r) for r in inc.missingancestors(heads)]
562 return [self.node(r) for r in inc.missingancestors(heads)]
563
563
564 def nodesbetween(self, roots=None, heads=None):
564 def nodesbetween(self, roots=None, heads=None):
565 """Return a topological path from 'roots' to 'heads'.
565 """Return a topological path from 'roots' to 'heads'.
566
566
567 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
567 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
568 topologically sorted list of all nodes N that satisfy both of
568 topologically sorted list of all nodes N that satisfy both of
569 these constraints:
569 these constraints:
570
570
571 1. N is a descendant of some node in 'roots'
571 1. N is a descendant of some node in 'roots'
572 2. N is an ancestor of some node in 'heads'
572 2. N is an ancestor of some node in 'heads'
573
573
574 Every node is considered to be both a descendant and an ancestor
574 Every node is considered to be both a descendant and an ancestor
575 of itself, so every reachable node in 'roots' and 'heads' will be
575 of itself, so every reachable node in 'roots' and 'heads' will be
576 included in 'nodes'.
576 included in 'nodes'.
577
577
578 'outroots' is the list of reachable nodes in 'roots', i.e., the
578 'outroots' is the list of reachable nodes in 'roots', i.e., the
579 subset of 'roots' that is returned in 'nodes'. Likewise,
579 subset of 'roots' that is returned in 'nodes'. Likewise,
580 'outheads' is the subset of 'heads' that is also in 'nodes'.
580 'outheads' is the subset of 'heads' that is also in 'nodes'.
581
581
582 'roots' and 'heads' are both lists of node IDs. If 'roots' is
582 'roots' and 'heads' are both lists of node IDs. If 'roots' is
583 unspecified, uses nullid as the only root. If 'heads' is
583 unspecified, uses nullid as the only root. If 'heads' is
584 unspecified, uses list of all of the revlog's heads."""
584 unspecified, uses list of all of the revlog's heads."""
585 nonodes = ([], [], [])
585 nonodes = ([], [], [])
586 if roots is not None:
586 if roots is not None:
587 roots = list(roots)
587 roots = list(roots)
588 if not roots:
588 if not roots:
589 return nonodes
589 return nonodes
590 lowestrev = min([self.rev(n) for n in roots])
590 lowestrev = min([self.rev(n) for n in roots])
591 else:
591 else:
592 roots = [nullid] # Everybody's a descendant of nullid
592 roots = [nullid] # Everybody's a descendant of nullid
593 lowestrev = nullrev
593 lowestrev = nullrev
594 if (lowestrev == nullrev) and (heads is None):
594 if (lowestrev == nullrev) and (heads is None):
595 # We want _all_ the nodes!
595 # We want _all_ the nodes!
596 return ([self.node(r) for r in self], [nullid], list(self.heads()))
596 return ([self.node(r) for r in self], [nullid], list(self.heads()))
597 if heads is None:
597 if heads is None:
598 # All nodes are ancestors, so the latest ancestor is the last
598 # All nodes are ancestors, so the latest ancestor is the last
599 # node.
599 # node.
600 highestrev = len(self) - 1
600 highestrev = len(self) - 1
601 # Set ancestors to None to signal that every node is an ancestor.
601 # Set ancestors to None to signal that every node is an ancestor.
602 ancestors = None
602 ancestors = None
603 # Set heads to an empty dictionary for later discovery of heads
603 # Set heads to an empty dictionary for later discovery of heads
604 heads = {}
604 heads = {}
605 else:
605 else:
606 heads = list(heads)
606 heads = list(heads)
607 if not heads:
607 if not heads:
608 return nonodes
608 return nonodes
609 ancestors = set()
609 ancestors = set()
610 # Turn heads into a dictionary so we can remove 'fake' heads.
610 # Turn heads into a dictionary so we can remove 'fake' heads.
611 # Also, later we will be using it to filter out the heads we can't
611 # Also, later we will be using it to filter out the heads we can't
612 # find from roots.
612 # find from roots.
613 heads = dict.fromkeys(heads, False)
613 heads = dict.fromkeys(heads, False)
614 # Start at the top and keep marking parents until we're done.
614 # Start at the top and keep marking parents until we're done.
615 nodestotag = set(heads)
615 nodestotag = set(heads)
616 # Remember where the top was so we can use it as a limit later.
616 # Remember where the top was so we can use it as a limit later.
617 highestrev = max([self.rev(n) for n in nodestotag])
617 highestrev = max([self.rev(n) for n in nodestotag])
618 while nodestotag:
618 while nodestotag:
619 # grab a node to tag
619 # grab a node to tag
620 n = nodestotag.pop()
620 n = nodestotag.pop()
621 # Never tag nullid
621 # Never tag nullid
622 if n == nullid:
622 if n == nullid:
623 continue
623 continue
624 # A node's revision number represents its place in a
624 # A node's revision number represents its place in a
625 # topologically sorted list of nodes.
625 # topologically sorted list of nodes.
626 r = self.rev(n)
626 r = self.rev(n)
627 if r >= lowestrev:
627 if r >= lowestrev:
628 if n not in ancestors:
628 if n not in ancestors:
629 # If we are possibly a descendant of one of the roots
629 # If we are possibly a descendant of one of the roots
630 # and we haven't already been marked as an ancestor
630 # and we haven't already been marked as an ancestor
631 ancestors.add(n) # Mark as ancestor
631 ancestors.add(n) # Mark as ancestor
632 # Add non-nullid parents to list of nodes to tag.
632 # Add non-nullid parents to list of nodes to tag.
633 nodestotag.update([p for p in self.parents(n) if
633 nodestotag.update([p for p in self.parents(n) if
634 p != nullid])
634 p != nullid])
635 elif n in heads: # We've seen it before, is it a fake head?
635 elif n in heads: # We've seen it before, is it a fake head?
636 # So it is, real heads should not be the ancestors of
636 # So it is, real heads should not be the ancestors of
637 # any other heads.
637 # any other heads.
638 heads.pop(n)
638 heads.pop(n)
639 if not ancestors:
639 if not ancestors:
640 return nonodes
640 return nonodes
641 # Now that we have our set of ancestors, we want to remove any
641 # Now that we have our set of ancestors, we want to remove any
642 # roots that are not ancestors.
642 # roots that are not ancestors.
643
643
644 # If one of the roots was nullid, everything is included anyway.
644 # If one of the roots was nullid, everything is included anyway.
645 if lowestrev > nullrev:
645 if lowestrev > nullrev:
646 # But, since we weren't, let's recompute the lowest rev to not
646 # But, since we weren't, let's recompute the lowest rev to not
647 # include roots that aren't ancestors.
647 # include roots that aren't ancestors.
648
648
649 # Filter out roots that aren't ancestors of heads
649 # Filter out roots that aren't ancestors of heads
650 roots = [n for n in roots if n in ancestors]
650 roots = [n for n in roots if n in ancestors]
651 # Recompute the lowest revision
651 # Recompute the lowest revision
652 if roots:
652 if roots:
653 lowestrev = min([self.rev(n) for n in roots])
653 lowestrev = min([self.rev(n) for n in roots])
654 else:
654 else:
655 # No more roots? Return empty list
655 # No more roots? Return empty list
656 return nonodes
656 return nonodes
657 else:
657 else:
658 # We are descending from nullid, and don't need to care about
658 # We are descending from nullid, and don't need to care about
659 # any other roots.
659 # any other roots.
660 lowestrev = nullrev
660 lowestrev = nullrev
661 roots = [nullid]
661 roots = [nullid]
662 # Transform our roots list into a set.
662 # Transform our roots list into a set.
663 descendants = set(roots)
663 descendants = set(roots)
664 # Also, keep the original roots so we can filter out roots that aren't
664 # Also, keep the original roots so we can filter out roots that aren't
665 # 'real' roots (i.e. are descended from other roots).
665 # 'real' roots (i.e. are descended from other roots).
666 roots = descendants.copy()
666 roots = descendants.copy()
667 # Our topologically sorted list of output nodes.
667 # Our topologically sorted list of output nodes.
668 orderedout = []
668 orderedout = []
669 # Don't start at nullid since we don't want nullid in our output list,
669 # Don't start at nullid since we don't want nullid in our output list,
670 # and if nullid shows up in descendants, empty parents will look like
670 # and if nullid shows up in descendants, empty parents will look like
671 # they're descendants.
671 # they're descendants.
672 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
672 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
673 n = self.node(r)
673 n = self.node(r)
674 isdescendant = False
674 isdescendant = False
675 if lowestrev == nullrev: # Everybody is a descendant of nullid
675 if lowestrev == nullrev: # Everybody is a descendant of nullid
676 isdescendant = True
676 isdescendant = True
677 elif n in descendants:
677 elif n in descendants:
678 # n is already a descendant
678 # n is already a descendant
679 isdescendant = True
679 isdescendant = True
680 # This check only needs to be done here because all the roots
680 # This check only needs to be done here because all the roots
681 # will start being marked is descendants before the loop.
681 # will start being marked is descendants before the loop.
682 if n in roots:
682 if n in roots:
683 # If n was a root, check if it's a 'real' root.
683 # If n was a root, check if it's a 'real' root.
684 p = tuple(self.parents(n))
684 p = tuple(self.parents(n))
685 # If any of its parents are descendants, it's not a root.
685 # If any of its parents are descendants, it's not a root.
686 if (p[0] in descendants) or (p[1] in descendants):
686 if (p[0] in descendants) or (p[1] in descendants):
687 roots.remove(n)
687 roots.remove(n)
688 else:
688 else:
689 p = tuple(self.parents(n))
689 p = tuple(self.parents(n))
690 # A node is a descendant if either of its parents are
690 # A node is a descendant if either of its parents are
691 # descendants. (We seeded the dependents list with the roots
691 # descendants. (We seeded the dependents list with the roots
692 # up there, remember?)
692 # up there, remember?)
693 if (p[0] in descendants) or (p[1] in descendants):
693 if (p[0] in descendants) or (p[1] in descendants):
694 descendants.add(n)
694 descendants.add(n)
695 isdescendant = True
695 isdescendant = True
696 if isdescendant and ((ancestors is None) or (n in ancestors)):
696 if isdescendant and ((ancestors is None) or (n in ancestors)):
697 # Only include nodes that are both descendants and ancestors.
697 # Only include nodes that are both descendants and ancestors.
698 orderedout.append(n)
698 orderedout.append(n)
699 if (ancestors is not None) and (n in heads):
699 if (ancestors is not None) and (n in heads):
700 # We're trying to figure out which heads are reachable
700 # We're trying to figure out which heads are reachable
701 # from roots.
701 # from roots.
702 # Mark this head as having been reached
702 # Mark this head as having been reached
703 heads[n] = True
703 heads[n] = True
704 elif ancestors is None:
704 elif ancestors is None:
705 # Otherwise, we're trying to discover the heads.
705 # Otherwise, we're trying to discover the heads.
706 # Assume this is a head because if it isn't, the next step
706 # Assume this is a head because if it isn't, the next step
707 # will eventually remove it.
707 # will eventually remove it.
708 heads[n] = True
708 heads[n] = True
709 # But, obviously its parents aren't.
709 # But, obviously its parents aren't.
710 for p in self.parents(n):
710 for p in self.parents(n):
711 heads.pop(p, None)
711 heads.pop(p, None)
712 heads = [n for n, flag in heads.iteritems() if flag]
712 heads = [n for n, flag in heads.iteritems() if flag]
713 roots = list(roots)
713 roots = list(roots)
714 assert orderedout
714 assert orderedout
715 assert roots
715 assert roots
716 assert heads
716 assert heads
717 return (orderedout, roots, heads)
717 return (orderedout, roots, heads)
718
718
719 def headrevs(self):
719 def headrevs(self):
720 try:
720 try:
721 return self.index.headrevs()
721 return self.index.headrevs()
722 except AttributeError:
722 except AttributeError:
723 return self._headrevs()
723 return self._headrevs()
724
724
725 def _headrevs(self):
725 def _headrevs(self):
726 count = len(self)
726 count = len(self)
727 if not count:
727 if not count:
728 return [nullrev]
728 return [nullrev]
729 # we won't iter over filtered rev so nobody is a head at start
729 # we won't iter over filtered rev so nobody is a head at start
730 ishead = [0] * (count + 1)
730 ishead = [0] * (count + 1)
731 index = self.index
731 index = self.index
732 for r in self:
732 for r in self:
733 ishead[r] = 1 # I may be an head
733 ishead[r] = 1 # I may be an head
734 e = index[r]
734 e = index[r]
735 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
735 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
736 return [r for r, val in enumerate(ishead) if val]
736 return [r for r, val in enumerate(ishead) if val]
737
737
738 def heads(self, start=None, stop=None):
738 def heads(self, start=None, stop=None):
739 """return the list of all nodes that have no children
739 """return the list of all nodes that have no children
740
740
741 if start is specified, only heads that are descendants of
741 if start is specified, only heads that are descendants of
742 start will be returned
742 start will be returned
743 if stop is specified, it will consider all the revs from stop
743 if stop is specified, it will consider all the revs from stop
744 as if they had no children
744 as if they had no children
745 """
745 """
746 if start is None and stop is None:
746 if start is None and stop is None:
747 if not len(self):
747 if not len(self):
748 return [nullid]
748 return [nullid]
749 return [self.node(r) for r in self.headrevs()]
749 return [self.node(r) for r in self.headrevs()]
750
750
751 if start is None:
751 if start is None:
752 start = nullid
752 start = nullid
753 if stop is None:
753 if stop is None:
754 stop = []
754 stop = []
755 stoprevs = set([self.rev(n) for n in stop])
755 stoprevs = set([self.rev(n) for n in stop])
756 startrev = self.rev(start)
756 startrev = self.rev(start)
757 reachable = set((startrev,))
757 reachable = set((startrev,))
758 heads = set((startrev,))
758 heads = set((startrev,))
759
759
760 parentrevs = self.parentrevs
760 parentrevs = self.parentrevs
761 for r in self.revs(start=startrev + 1):
761 for r in self.revs(start=startrev + 1):
762 for p in parentrevs(r):
762 for p in parentrevs(r):
763 if p in reachable:
763 if p in reachable:
764 if r not in stoprevs:
764 if r not in stoprevs:
765 reachable.add(r)
765 reachable.add(r)
766 heads.add(r)
766 heads.add(r)
767 if p in heads and p not in stoprevs:
767 if p in heads and p not in stoprevs:
768 heads.remove(p)
768 heads.remove(p)
769
769
770 return [self.node(r) for r in heads]
770 return [self.node(r) for r in heads]
771
771
772 def children(self, node):
772 def children(self, node):
773 """find the children of a given node"""
773 """find the children of a given node"""
774 c = []
774 c = []
775 p = self.rev(node)
775 p = self.rev(node)
776 for r in self.revs(start=p + 1):
776 for r in self.revs(start=p + 1):
777 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
777 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
778 if prevs:
778 if prevs:
779 for pr in prevs:
779 for pr in prevs:
780 if pr == p:
780 if pr == p:
781 c.append(self.node(r))
781 c.append(self.node(r))
782 elif p == nullrev:
782 elif p == nullrev:
783 c.append(self.node(r))
783 c.append(self.node(r))
784 return c
784 return c
785
785
786 def descendant(self, start, end):
786 def descendant(self, start, end):
787 if start == nullrev:
787 if start == nullrev:
788 return True
788 return True
789 for i in self.descendants([start]):
789 for i in self.descendants([start]):
790 if i == end:
790 if i == end:
791 return True
791 return True
792 elif i > end:
792 elif i > end:
793 break
793 break
794 return False
794 return False
795
795
796 def commonancestorsheads(self, a, b):
796 def commonancestorsheads(self, a, b):
797 """calculate all the heads of the common ancestors of nodes a and b"""
797 """calculate all the heads of the common ancestors of nodes a and b"""
798 a, b = self.rev(a), self.rev(b)
798 a, b = self.rev(a), self.rev(b)
799 try:
799 try:
800 ancs = self.index.commonancestorsheads(a, b)
800 ancs = self.index.commonancestorsheads(a, b)
801 except (AttributeError, OverflowError): # C implementation failed
801 except (AttributeError, OverflowError): # C implementation failed
802 ancs = ancestor.commonancestorsheads(self.parentrevs, a, b)
802 ancs = ancestor.commonancestorsheads(self.parentrevs, a, b)
803 return map(self.node, ancs)
803 return map(self.node, ancs)
804
804
805 def isancestor(self, a, b):
805 def isancestor(self, a, b):
806 """return True if node a is an ancestor of node b
806 """return True if node a is an ancestor of node b
807
807
808 The implementation of this is trivial but the use of
808 The implementation of this is trivial but the use of
809 commonancestorsheads is not."""
809 commonancestorsheads is not."""
810 return a in self.commonancestorsheads(a, b)
810 return a in self.commonancestorsheads(a, b)
811
811
812 def ancestor(self, a, b):
812 def ancestor(self, a, b):
813 """calculate the "best" common ancestor of nodes a and b"""
813 """calculate the "best" common ancestor of nodes a and b"""
814
814
815 a, b = self.rev(a), self.rev(b)
815 a, b = self.rev(a), self.rev(b)
816 try:
816 try:
817 ancs = self.index.ancestors(a, b)
817 ancs = self.index.ancestors(a, b)
818 except (AttributeError, OverflowError):
818 except (AttributeError, OverflowError):
819 ancs = ancestor.ancestors(self.parentrevs, a, b)
819 ancs = ancestor.ancestors(self.parentrevs, a, b)
820 if ancs:
820 if ancs:
821 # choose a consistent winner when there's a tie
821 # choose a consistent winner when there's a tie
822 return min(map(self.node, ancs))
822 return min(map(self.node, ancs))
823 return nullid
823 return nullid
824
824
825 def _match(self, id):
825 def _match(self, id):
826 if isinstance(id, int):
826 if isinstance(id, int):
827 # rev
827 # rev
828 return self.node(id)
828 return self.node(id)
829 if len(id) == 20:
829 if len(id) == 20:
830 # possibly a binary node
830 # possibly a binary node
831 # odds of a binary node being all hex in ASCII are 1 in 10**25
831 # odds of a binary node being all hex in ASCII are 1 in 10**25
832 try:
832 try:
833 node = id
833 node = id
834 self.rev(node) # quick search the index
834 self.rev(node) # quick search the index
835 return node
835 return node
836 except LookupError:
836 except LookupError:
837 pass # may be partial hex id
837 pass # may be partial hex id
838 try:
838 try:
839 # str(rev)
839 # str(rev)
840 rev = int(id)
840 rev = int(id)
841 if str(rev) != id:
841 if str(rev) != id:
842 raise ValueError
842 raise ValueError
843 if rev < 0:
843 if rev < 0:
844 rev = len(self) + rev
844 rev = len(self) + rev
845 if rev < 0 or rev >= len(self):
845 if rev < 0 or rev >= len(self):
846 raise ValueError
846 raise ValueError
847 return self.node(rev)
847 return self.node(rev)
848 except (ValueError, OverflowError):
848 except (ValueError, OverflowError):
849 pass
849 pass
850 if len(id) == 40:
850 if len(id) == 40:
851 try:
851 try:
852 # a full hex nodeid?
852 # a full hex nodeid?
853 node = bin(id)
853 node = bin(id)
854 self.rev(node)
854 self.rev(node)
855 return node
855 return node
856 except (TypeError, LookupError):
856 except (TypeError, LookupError):
857 pass
857 pass
858
858
859 def _partialmatch(self, id):
859 def _partialmatch(self, id):
860 try:
860 try:
861 n = self.index.partialmatch(id)
861 n = self.index.partialmatch(id)
862 if n and self.hasnode(n):
862 if n and self.hasnode(n):
863 return n
863 return n
864 return None
864 return None
865 except RevlogError:
865 except RevlogError:
866 # parsers.c radix tree lookup gave multiple matches
866 # parsers.c radix tree lookup gave multiple matches
867 # fall through to slow path that filters hidden revisions
867 # fall through to slow path that filters hidden revisions
868 pass
868 pass
869 except (AttributeError, ValueError):
869 except (AttributeError, ValueError):
870 # we are pure python, or key was too short to search radix tree
870 # we are pure python, or key was too short to search radix tree
871 pass
871 pass
872
872
873 if id in self._pcache:
873 if id in self._pcache:
874 return self._pcache[id]
874 return self._pcache[id]
875
875
876 if len(id) < 40:
876 if len(id) < 40:
877 try:
877 try:
878 # hex(node)[:...]
878 # hex(node)[:...]
879 l = len(id) // 2 # grab an even number of digits
879 l = len(id) // 2 # grab an even number of digits
880 prefix = bin(id[:l * 2])
880 prefix = bin(id[:l * 2])
881 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
881 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
882 nl = [n for n in nl if hex(n).startswith(id) and
882 nl = [n for n in nl if hex(n).startswith(id) and
883 self.hasnode(n)]
883 self.hasnode(n)]
884 if len(nl) > 0:
884 if len(nl) > 0:
885 if len(nl) == 1:
885 if len(nl) == 1:
886 self._pcache[id] = nl[0]
886 self._pcache[id] = nl[0]
887 return nl[0]
887 return nl[0]
888 raise LookupError(id, self.indexfile,
888 raise LookupError(id, self.indexfile,
889 _('ambiguous identifier'))
889 _('ambiguous identifier'))
890 return None
890 return None
891 except TypeError:
891 except TypeError:
892 pass
892 pass
893
893
894 def lookup(self, id):
894 def lookup(self, id):
895 """locate a node based on:
895 """locate a node based on:
896 - revision number or str(revision number)
896 - revision number or str(revision number)
897 - nodeid or subset of hex nodeid
897 - nodeid or subset of hex nodeid
898 """
898 """
899 n = self._match(id)
899 n = self._match(id)
900 if n is not None:
900 if n is not None:
901 return n
901 return n
902 n = self._partialmatch(id)
902 n = self._partialmatch(id)
903 if n:
903 if n:
904 return n
904 return n
905
905
906 raise LookupError(id, self.indexfile, _('no match found'))
906 raise LookupError(id, self.indexfile, _('no match found'))
907
907
908 def cmp(self, node, text):
908 def cmp(self, node, text):
909 """compare text with a given file revision
909 """compare text with a given file revision
910
910
911 returns True if text is different than what is stored.
911 returns True if text is different than what is stored.
912 """
912 """
913 p1, p2 = self.parents(node)
913 p1, p2 = self.parents(node)
914 return hash(text, p1, p2) != node
914 return hash(text, p1, p2) != node
915
915
916 def _addchunk(self, offset, data):
916 def _addchunk(self, offset, data):
917 o, d = self._chunkcache
917 o, d = self._chunkcache
918 # try to add to existing cache
918 # try to add to existing cache
919 if o + len(d) == offset and len(d) + len(data) < _chunksize:
919 if o + len(d) == offset and len(d) + len(data) < _chunksize:
920 self._chunkcache = o, d + data
920 self._chunkcache = o, d + data
921 else:
921 else:
922 self._chunkcache = offset, data
922 self._chunkcache = offset, data
923
923
924 def _loadchunk(self, offset, length):
924 def _loadchunk(self, offset, length):
925 if self._inline:
925 if self._inline:
926 df = self.opener(self.indexfile)
926 df = self.opener(self.indexfile)
927 else:
927 else:
928 df = self.opener(self.datafile)
928 df = self.opener(self.datafile)
929
929
930 # Cache data both forward and backward around the requested
930 # Cache data both forward and backward around the requested
931 # data, in a fixed size window. This helps speed up operations
931 # data, in a fixed size window. This helps speed up operations
932 # involving reading the revlog backwards.
932 # involving reading the revlog backwards.
933 cachesize = self._chunkcachesize
933 cachesize = self._chunkcachesize
934 realoffset = offset & ~(cachesize - 1)
934 realoffset = offset & ~(cachesize - 1)
935 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
935 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
936 - realoffset)
936 - realoffset)
937 df.seek(realoffset)
937 df.seek(realoffset)
938 d = df.read(reallength)
938 d = df.read(reallength)
939 df.close()
939 df.close()
940 self._addchunk(realoffset, d)
940 self._addchunk(realoffset, d)
941 if offset != realoffset or reallength != length:
941 if offset != realoffset or reallength != length:
942 return util.buffer(d, offset - realoffset, length)
942 return util.buffer(d, offset - realoffset, length)
943 return d
943 return d
944
944
945 def _getchunk(self, offset, length):
945 def _getchunk(self, offset, length):
946 o, d = self._chunkcache
946 o, d = self._chunkcache
947 l = len(d)
947 l = len(d)
948
948
949 # is it in the cache?
949 # is it in the cache?
950 cachestart = offset - o
950 cachestart = offset - o
951 cacheend = cachestart + length
951 cacheend = cachestart + length
952 if cachestart >= 0 and cacheend <= l:
952 if cachestart >= 0 and cacheend <= l:
953 if cachestart == 0 and cacheend == l:
953 if cachestart == 0 and cacheend == l:
954 return d # avoid a copy
954 return d # avoid a copy
955 return util.buffer(d, cachestart, cacheend - cachestart)
955 return util.buffer(d, cachestart, cacheend - cachestart)
956
956
957 return self._loadchunk(offset, length)
957 return self._loadchunk(offset, length)
958
958
959 def _chunkraw(self, startrev, endrev):
959 def _chunkraw(self, startrev, endrev):
960 start = self.start(startrev)
960 start = self.start(startrev)
961 end = self.end(endrev)
961 end = self.end(endrev)
962 if self._inline:
962 if self._inline:
963 start += (startrev + 1) * self._io.size
963 start += (startrev + 1) * self._io.size
964 end += (endrev + 1) * self._io.size
964 end += (endrev + 1) * self._io.size
965 length = end - start
965 length = end - start
966 return self._getchunk(start, length)
966 return self._getchunk(start, length)
967
967
968 def _chunk(self, rev):
968 def _chunk(self, rev):
969 return decompress(self._chunkraw(rev, rev))
969 return decompress(self._chunkraw(rev, rev))
970
970
971 def _chunks(self, revs):
971 def _chunks(self, revs):
972 '''faster version of [self._chunk(rev) for rev in revs]
972 '''faster version of [self._chunk(rev) for rev in revs]
973
973
974 Assumes that revs is in ascending order.'''
974 Assumes that revs is in ascending order.'''
975 if not revs:
975 if not revs:
976 return []
976 return []
977 start = self.start
977 start = self.start
978 length = self.length
978 length = self.length
979 inline = self._inline
979 inline = self._inline
980 iosize = self._io.size
980 iosize = self._io.size
981 buffer = util.buffer
981 buffer = util.buffer
982
982
983 l = []
983 l = []
984 ladd = l.append
984 ladd = l.append
985
985
986 # preload the cache
986 # preload the cache
987 try:
987 try:
988 while True:
988 while True:
989 # ensure that the cache doesn't change out from under us
989 # ensure that the cache doesn't change out from under us
990 _cache = self._chunkcache
990 _cache = self._chunkcache
991 self._chunkraw(revs[0], revs[-1])
991 self._chunkraw(revs[0], revs[-1])
992 if _cache == self._chunkcache:
992 if _cache == self._chunkcache:
993 break
993 break
994 offset, data = _cache
994 offset, data = _cache
995 except OverflowError:
995 except OverflowError:
996 # issue4215 - we can't cache a run of chunks greater than
996 # issue4215 - we can't cache a run of chunks greater than
997 # 2G on Windows
997 # 2G on Windows
998 return [self._chunk(rev) for rev in revs]
998 return [self._chunk(rev) for rev in revs]
999
999
1000 for rev in revs:
1000 for rev in revs:
1001 chunkstart = start(rev)
1001 chunkstart = start(rev)
1002 if inline:
1002 if inline:
1003 chunkstart += (rev + 1) * iosize
1003 chunkstart += (rev + 1) * iosize
1004 chunklength = length(rev)
1004 chunklength = length(rev)
1005 ladd(decompress(buffer(data, chunkstart - offset, chunklength)))
1005 ladd(decompress(buffer(data, chunkstart - offset, chunklength)))
1006
1006
1007 return l
1007 return l
1008
1008
1009 def _chunkclear(self):
1009 def _chunkclear(self):
1010 self._chunkcache = (0, '')
1010 self._chunkcache = (0, '')
1011
1011
1012 def deltaparent(self, rev):
1012 def deltaparent(self, rev):
1013 """return deltaparent of the given revision"""
1013 """return deltaparent of the given revision"""
1014 base = self.index[rev][3]
1014 base = self.index[rev][3]
1015 if base == rev:
1015 if base == rev:
1016 return nullrev
1016 return nullrev
1017 elif self._generaldelta:
1017 elif self._generaldelta:
1018 return base
1018 return base
1019 else:
1019 else:
1020 return rev - 1
1020 return rev - 1
1021
1021
1022 def revdiff(self, rev1, rev2):
1022 def revdiff(self, rev1, rev2):
1023 """return or calculate a delta between two revisions"""
1023 """return or calculate a delta between two revisions"""
1024 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1024 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1025 return str(self._chunk(rev2))
1025 return str(self._chunk(rev2))
1026
1026
1027 return mdiff.textdiff(self.revision(rev1),
1027 return mdiff.textdiff(self.revision(rev1),
1028 self.revision(rev2))
1028 self.revision(rev2))
1029
1029
1030 def revision(self, nodeorrev):
1030 def revision(self, nodeorrev):
1031 """return an uncompressed revision of a given node or revision
1031 """return an uncompressed revision of a given node or revision
1032 number.
1032 number.
1033 """
1033 """
1034 if isinstance(nodeorrev, int):
1034 if isinstance(nodeorrev, int):
1035 rev = nodeorrev
1035 rev = nodeorrev
1036 node = self.node(rev)
1036 node = self.node(rev)
1037 else:
1037 else:
1038 node = nodeorrev
1038 node = nodeorrev
1039 rev = None
1039 rev = None
1040
1040
1041 _cache = self._cache # grab local copy of cache to avoid thread race
1041 _cache = self._cache # grab local copy of cache to avoid thread race
1042 cachedrev = None
1042 cachedrev = None
1043 if node == nullid:
1043 if node == nullid:
1044 return ""
1044 return ""
1045 if _cache:
1045 if _cache:
1046 if _cache[0] == node:
1046 if _cache[0] == node:
1047 return _cache[2]
1047 return _cache[2]
1048 cachedrev = _cache[1]
1048 cachedrev = _cache[1]
1049
1049
1050 # look up what we need to read
1050 # look up what we need to read
1051 text = None
1051 text = None
1052 if rev is None:
1052 if rev is None:
1053 rev = self.rev(node)
1053 rev = self.rev(node)
1054
1054
1055 # check rev flags
1055 # check rev flags
1056 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
1056 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
1057 raise RevlogError(_('incompatible revision flag %x') %
1057 raise RevlogError(_('incompatible revision flag %x') %
1058 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
1058 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
1059
1059
1060 # build delta chain
1060 # build delta chain
1061 chain = []
1061 chain = []
1062 index = self.index # for performance
1062 index = self.index # for performance
1063 generaldelta = self._generaldelta
1063 generaldelta = self._generaldelta
1064 iterrev = rev
1064 iterrev = rev
1065 e = index[iterrev]
1065 e = index[iterrev]
1066 while iterrev != e[3] and iterrev != cachedrev:
1066 while iterrev != e[3] and iterrev != cachedrev:
1067 chain.append(iterrev)
1067 chain.append(iterrev)
1068 if generaldelta:
1068 if generaldelta:
1069 iterrev = e[3]
1069 iterrev = e[3]
1070 else:
1070 else:
1071 iterrev -= 1
1071 iterrev -= 1
1072 e = index[iterrev]
1072 e = index[iterrev]
1073
1073
1074 if iterrev == cachedrev:
1074 if iterrev == cachedrev:
1075 # cache hit
1075 # cache hit
1076 text = _cache[2]
1076 text = _cache[2]
1077 else:
1077 else:
1078 chain.append(iterrev)
1078 chain.append(iterrev)
1079 chain.reverse()
1079 chain.reverse()
1080
1080
1081 # drop cache to save memory
1081 # drop cache to save memory
1082 self._cache = None
1082 self._cache = None
1083
1083
1084 bins = self._chunks(chain)
1084 bins = self._chunks(chain)
1085 if text is None:
1085 if text is None:
1086 text = str(bins[0])
1086 text = str(bins[0])
1087 bins = bins[1:]
1087 bins = bins[1:]
1088
1088
1089 text = mdiff.patches(text, bins)
1089 text = mdiff.patches(text, bins)
1090
1090
1091 text = self._checkhash(text, node, rev)
1091 text = self._checkhash(text, node, rev)
1092
1092
1093 self._cache = (node, rev, text)
1093 self._cache = (node, rev, text)
1094 return text
1094 return text
1095
1095
1096 def hash(self, text, p1, p2):
1096 def hash(self, text, p1, p2):
1097 """Compute a node hash.
1097 """Compute a node hash.
1098
1098
1099 Available as a function so that subclasses can replace the hash
1099 Available as a function so that subclasses can replace the hash
1100 as needed.
1100 as needed.
1101 """
1101 """
1102 return hash(text, p1, p2)
1102 return hash(text, p1, p2)
1103
1103
1104 def _checkhash(self, text, node, rev):
1104 def _checkhash(self, text, node, rev):
1105 p1, p2 = self.parents(node)
1105 p1, p2 = self.parents(node)
1106 self.checkhash(text, p1, p2, node, rev)
1106 self.checkhash(text, p1, p2, node, rev)
1107 return text
1107 return text
1108
1108
1109 def checkhash(self, text, p1, p2, node, rev=None):
1109 def checkhash(self, text, p1, p2, node, rev=None):
1110 if node != self.hash(text, p1, p2):
1110 if node != self.hash(text, p1, p2):
1111 revornode = rev
1111 revornode = rev
1112 if revornode is None:
1112 if revornode is None:
1113 revornode = templatefilters.short(hex(node))
1113 revornode = templatefilters.short(hex(node))
1114 raise RevlogError(_("integrity check failed on %s:%s")
1114 raise RevlogError(_("integrity check failed on %s:%s")
1115 % (self.indexfile, revornode))
1115 % (self.indexfile, revornode))
1116
1116
1117 def checkinlinesize(self, tr, fp=None):
1117 def checkinlinesize(self, tr, fp=None):
1118 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1118 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1119 return
1119 return
1120
1120
1121 trinfo = tr.find(self.indexfile)
1121 trinfo = tr.find(self.indexfile)
1122 if trinfo is None:
1122 if trinfo is None:
1123 raise RevlogError(_("%s not found in the transaction")
1123 raise RevlogError(_("%s not found in the transaction")
1124 % self.indexfile)
1124 % self.indexfile)
1125
1125
1126 trindex = trinfo[2]
1126 trindex = trinfo[2]
1127 dataoff = self.start(trindex)
1127 dataoff = self.start(trindex)
1128
1128
1129 tr.add(self.datafile, dataoff)
1129 tr.add(self.datafile, dataoff)
1130
1130
1131 if fp:
1131 if fp:
1132 fp.flush()
1132 fp.flush()
1133 fp.close()
1133 fp.close()
1134
1134
1135 df = self.opener(self.datafile, 'w')
1135 df = self.opener(self.datafile, 'w')
1136 try:
1136 try:
1137 for r in self:
1137 for r in self:
1138 df.write(self._chunkraw(r, r))
1138 df.write(self._chunkraw(r, r))
1139 finally:
1139 finally:
1140 df.close()
1140 df.close()
1141
1141
1142 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1142 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1143 self.version &= ~(REVLOGNGINLINEDATA)
1143 self.version &= ~(REVLOGNGINLINEDATA)
1144 self._inline = False
1144 self._inline = False
1145 for i in self:
1145 for i in self:
1146 e = self._io.packentry(self.index[i], self.node, self.version, i)
1146 e = self._io.packentry(self.index[i], self.node, self.version, i)
1147 fp.write(e)
1147 fp.write(e)
1148
1148
1149 # if we don't call close, the temp file will never replace the
1149 # if we don't call close, the temp file will never replace the
1150 # real index
1150 # real index
1151 fp.close()
1151 fp.close()
1152
1152
1153 tr.replace(self.indexfile, trindex * self._io.size)
1153 tr.replace(self.indexfile, trindex * self._io.size)
1154 self._chunkclear()
1154 self._chunkclear()
1155
1155
1156 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1156 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1157 node=None):
1157 node=None):
1158 """add a revision to the log
1158 """add a revision to the log
1159
1159
1160 text - the revision data to add
1160 text - the revision data to add
1161 transaction - the transaction object used for rollback
1161 transaction - the transaction object used for rollback
1162 link - the linkrev data to add
1162 link - the linkrev data to add
1163 p1, p2 - the parent nodeids of the revision
1163 p1, p2 - the parent nodeids of the revision
1164 cachedelta - an optional precomputed delta
1164 cachedelta - an optional precomputed delta
1165 node - nodeid of revision; typically node is not specified, and it is
1165 node - nodeid of revision; typically node is not specified, and it is
1166 computed by default as hash(text, p1, p2), however subclasses might
1166 computed by default as hash(text, p1, p2), however subclasses might
1167 use different hashing method (and override checkhash() in such case)
1167 use different hashing method (and override checkhash() in such case)
1168 """
1168 """
1169 if link == nullrev:
1169 if link == nullrev:
1170 raise RevlogError(_("attempted to add linkrev -1 to %s")
1170 raise RevlogError(_("attempted to add linkrev -1 to %s")
1171 % self.indexfile)
1171 % self.indexfile)
1172 node = node or self.hash(text, p1, p2)
1172 node = node or self.hash(text, p1, p2)
1173 if node in self.nodemap:
1173 if node in self.nodemap:
1174 return node
1174 return node
1175
1175
1176 dfh = None
1176 dfh = None
1177 if not self._inline:
1177 if not self._inline:
1178 dfh = self.opener(self.datafile, "a")
1178 dfh = self.opener(self.datafile, "a")
1179 ifh = self.opener(self.indexfile, "a+")
1179 ifh = self.opener(self.indexfile, "a+")
1180 try:
1180 try:
1181 return self._addrevision(node, text, transaction, link, p1, p2,
1181 return self._addrevision(node, text, transaction, link, p1, p2,
1182 cachedelta, ifh, dfh)
1182 REVIDX_DEFAULT_FLAGS, cachedelta, ifh, dfh)
1183 finally:
1183 finally:
1184 if dfh:
1184 if dfh:
1185 dfh.close()
1185 dfh.close()
1186 ifh.close()
1186 ifh.close()
1187
1187
1188 def compress(self, text):
1188 def compress(self, text):
1189 """ generate a possibly-compressed representation of text """
1189 """ generate a possibly-compressed representation of text """
1190 if not text:
1190 if not text:
1191 return ("", text)
1191 return ("", text)
1192 l = len(text)
1192 l = len(text)
1193 bin = None
1193 bin = None
1194 if l < 44:
1194 if l < 44:
1195 pass
1195 pass
1196 elif l > 1000000:
1196 elif l > 1000000:
1197 # zlib makes an internal copy, thus doubling memory usage for
1197 # zlib makes an internal copy, thus doubling memory usage for
1198 # large files, so lets do this in pieces
1198 # large files, so lets do this in pieces
1199 z = zlib.compressobj()
1199 z = zlib.compressobj()
1200 p = []
1200 p = []
1201 pos = 0
1201 pos = 0
1202 while pos < l:
1202 while pos < l:
1203 pos2 = pos + 2**20
1203 pos2 = pos + 2**20
1204 p.append(z.compress(text[pos:pos2]))
1204 p.append(z.compress(text[pos:pos2]))
1205 pos = pos2
1205 pos = pos2
1206 p.append(z.flush())
1206 p.append(z.flush())
1207 if sum(map(len, p)) < l:
1207 if sum(map(len, p)) < l:
1208 bin = "".join(p)
1208 bin = "".join(p)
1209 else:
1209 else:
1210 bin = _compress(text)
1210 bin = _compress(text)
1211 if bin is None or len(bin) > l:
1211 if bin is None or len(bin) > l:
1212 if text[0] == '\0':
1212 if text[0] == '\0':
1213 return ("", text)
1213 return ("", text)
1214 return ('u', text)
1214 return ('u', text)
1215 return ("", bin)
1215 return ("", bin)
1216
1216
1217 def _addrevision(self, node, text, transaction, link, p1, p2,
1217 def _addrevision(self, node, text, transaction, link, p1, p2, flags,
1218 cachedelta, ifh, dfh):
1218 cachedelta, ifh, dfh):
1219 """internal function to add revisions to the log
1219 """internal function to add revisions to the log
1220
1220
1221 see addrevision for argument descriptions.
1221 see addrevision for argument descriptions.
1222 invariants:
1222 invariants:
1223 - text is optional (can be None); if not set, cachedelta must be set.
1223 - text is optional (can be None); if not set, cachedelta must be set.
1224 if both are set, they must correspond to each other.
1224 if both are set, they must correspond to each other.
1225 """
1225 """
1226 btext = [text]
1226 btext = [text]
1227 def buildtext():
1227 def buildtext():
1228 if btext[0] is not None:
1228 if btext[0] is not None:
1229 return btext[0]
1229 return btext[0]
1230 # flush any pending writes here so we can read it in revision
1230 # flush any pending writes here so we can read it in revision
1231 if dfh:
1231 if dfh:
1232 dfh.flush()
1232 dfh.flush()
1233 ifh.flush()
1233 ifh.flush()
1234 basetext = self.revision(self.node(cachedelta[0]))
1234 basetext = self.revision(self.node(cachedelta[0]))
1235 btext[0] = mdiff.patch(basetext, cachedelta[1])
1235 btext[0] = mdiff.patch(basetext, cachedelta[1])
1236 try:
1236 try:
1237 self.checkhash(btext[0], p1, p2, node)
1237 self.checkhash(btext[0], p1, p2, node)
1238 except CensoredNodeError:
1238 except CensoredNodeError:
1239 pass # always import a censor tombstone.
1239 pass # always import a censor tombstone.
1240 return btext[0]
1240 return btext[0]
1241
1241
1242 def builddelta(rev):
1242 def builddelta(rev):
1243 # can we use the cached delta?
1243 # can we use the cached delta?
1244 if cachedelta and cachedelta[0] == rev:
1244 if cachedelta and cachedelta[0] == rev:
1245 delta = cachedelta[1]
1245 delta = cachedelta[1]
1246 else:
1246 else:
1247 t = buildtext()
1247 t = buildtext()
1248 ptext = self.revision(self.node(rev))
1248 ptext = self.revision(self.node(rev))
1249 delta = mdiff.textdiff(ptext, t)
1249 delta = mdiff.textdiff(ptext, t)
1250 data = self.compress(delta)
1250 data = self.compress(delta)
1251 l = len(data[1]) + len(data[0])
1251 l = len(data[1]) + len(data[0])
1252 if basecache[0] == rev:
1252 if basecache[0] == rev:
1253 chainbase = basecache[1]
1253 chainbase = basecache[1]
1254 else:
1254 else:
1255 chainbase = self.chainbase(rev)
1255 chainbase = self.chainbase(rev)
1256 dist = l + offset - self.start(chainbase)
1256 dist = l + offset - self.start(chainbase)
1257 if self._generaldelta:
1257 if self._generaldelta:
1258 base = rev
1258 base = rev
1259 else:
1259 else:
1260 base = chainbase
1260 base = chainbase
1261 chainlen, compresseddeltalen = self._chaininfo(rev)
1261 chainlen, compresseddeltalen = self._chaininfo(rev)
1262 chainlen += 1
1262 chainlen += 1
1263 compresseddeltalen += l
1263 compresseddeltalen += l
1264 return dist, l, data, base, chainbase, chainlen, compresseddeltalen
1264 return dist, l, data, base, chainbase, chainlen, compresseddeltalen
1265
1265
1266 curr = len(self)
1266 curr = len(self)
1267 prev = curr - 1
1267 prev = curr - 1
1268 base = chainbase = curr
1268 base = chainbase = curr
1269 chainlen = None
1269 chainlen = None
1270 offset = self.end(prev)
1270 offset = self.end(prev)
1271 flags = 0
1272 d = None
1271 d = None
1273 if self._basecache is None:
1272 if self._basecache is None:
1274 self._basecache = (prev, self.chainbase(prev))
1273 self._basecache = (prev, self.chainbase(prev))
1275 basecache = self._basecache
1274 basecache = self._basecache
1276 p1r, p2r = self.rev(p1), self.rev(p2)
1275 p1r, p2r = self.rev(p1), self.rev(p2)
1277
1276
1278 # should we try to build a delta?
1277 # should we try to build a delta?
1279 if prev != nullrev:
1278 if prev != nullrev:
1280 if self._generaldelta:
1279 if self._generaldelta:
1281 if p1r >= basecache[1]:
1280 if p1r >= basecache[1]:
1282 d = builddelta(p1r)
1281 d = builddelta(p1r)
1283 elif p2r >= basecache[1]:
1282 elif p2r >= basecache[1]:
1284 d = builddelta(p2r)
1283 d = builddelta(p2r)
1285 else:
1284 else:
1286 d = builddelta(prev)
1285 d = builddelta(prev)
1287 else:
1286 else:
1288 d = builddelta(prev)
1287 d = builddelta(prev)
1289 dist, l, data, base, chainbase, chainlen, compresseddeltalen = d
1288 dist, l, data, base, chainbase, chainlen, compresseddeltalen = d
1290
1289
1291 # full versions are inserted when the needed deltas
1290 # full versions are inserted when the needed deltas
1292 # become comparable to the uncompressed text
1291 # become comparable to the uncompressed text
1293 if text is None:
1292 if text is None:
1294 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1293 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1295 cachedelta[1])
1294 cachedelta[1])
1296 else:
1295 else:
1297 textlen = len(text)
1296 textlen = len(text)
1298
1297
1299 # - 'dist' is the distance from the base revision -- bounding it limits
1298 # - 'dist' is the distance from the base revision -- bounding it limits
1300 # the amount of I/O we need to do.
1299 # the amount of I/O we need to do.
1301 # - 'compresseddeltalen' is the sum of the total size of deltas we need
1300 # - 'compresseddeltalen' is the sum of the total size of deltas we need
1302 # to apply -- bounding it limits the amount of CPU we consume.
1301 # to apply -- bounding it limits the amount of CPU we consume.
1303 if (d is None or dist > textlen * 4 or l > textlen or
1302 if (d is None or dist > textlen * 4 or l > textlen or
1304 compresseddeltalen > textlen * 2 or
1303 compresseddeltalen > textlen * 2 or
1305 (self._maxchainlen and chainlen > self._maxchainlen)):
1304 (self._maxchainlen and chainlen > self._maxchainlen)):
1306 text = buildtext()
1305 text = buildtext()
1307 data = self.compress(text)
1306 data = self.compress(text)
1308 l = len(data[1]) + len(data[0])
1307 l = len(data[1]) + len(data[0])
1309 base = chainbase = curr
1308 base = chainbase = curr
1310
1309
1311 e = (offset_type(offset, flags), l, textlen,
1310 e = (offset_type(offset, flags), l, textlen,
1312 base, link, p1r, p2r, node)
1311 base, link, p1r, p2r, node)
1313 self.index.insert(-1, e)
1312 self.index.insert(-1, e)
1314 self.nodemap[node] = curr
1313 self.nodemap[node] = curr
1315
1314
1316 entry = self._io.packentry(e, self.node, self.version, curr)
1315 entry = self._io.packentry(e, self.node, self.version, curr)
1317 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
1316 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
1318
1317
1319 if type(text) == str: # only accept immutable objects
1318 if type(text) == str: # only accept immutable objects
1320 self._cache = (node, curr, text)
1319 self._cache = (node, curr, text)
1321 self._basecache = (curr, chainbase)
1320 self._basecache = (curr, chainbase)
1322 return node
1321 return node
1323
1322
1324 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
1323 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
1325 curr = len(self) - 1
1324 curr = len(self) - 1
1326 if not self._inline:
1325 if not self._inline:
1327 transaction.add(self.datafile, offset)
1326 transaction.add(self.datafile, offset)
1328 transaction.add(self.indexfile, curr * len(entry))
1327 transaction.add(self.indexfile, curr * len(entry))
1329 if data[0]:
1328 if data[0]:
1330 dfh.write(data[0])
1329 dfh.write(data[0])
1331 dfh.write(data[1])
1330 dfh.write(data[1])
1332 dfh.flush()
1331 dfh.flush()
1333 ifh.write(entry)
1332 ifh.write(entry)
1334 else:
1333 else:
1335 offset += curr * self._io.size
1334 offset += curr * self._io.size
1336 transaction.add(self.indexfile, offset, curr)
1335 transaction.add(self.indexfile, offset, curr)
1337 ifh.write(entry)
1336 ifh.write(entry)
1338 ifh.write(data[0])
1337 ifh.write(data[0])
1339 ifh.write(data[1])
1338 ifh.write(data[1])
1340 self.checkinlinesize(transaction, ifh)
1339 self.checkinlinesize(transaction, ifh)
1341
1340
1342 def addgroup(self, bundle, linkmapper, transaction):
1341 def addgroup(self, bundle, linkmapper, transaction):
1343 """
1342 """
1344 add a delta group
1343 add a delta group
1345
1344
1346 given a set of deltas, add them to the revision log. the
1345 given a set of deltas, add them to the revision log. the
1347 first delta is against its parent, which should be in our
1346 first delta is against its parent, which should be in our
1348 log, the rest are against the previous delta.
1347 log, the rest are against the previous delta.
1349 """
1348 """
1350
1349
1351 # track the base of the current delta log
1350 # track the base of the current delta log
1352 content = []
1351 content = []
1353 node = None
1352 node = None
1354
1353
1355 r = len(self)
1354 r = len(self)
1356 end = 0
1355 end = 0
1357 if r:
1356 if r:
1358 end = self.end(r - 1)
1357 end = self.end(r - 1)
1359 ifh = self.opener(self.indexfile, "a+")
1358 ifh = self.opener(self.indexfile, "a+")
1360 isize = r * self._io.size
1359 isize = r * self._io.size
1361 if self._inline:
1360 if self._inline:
1362 transaction.add(self.indexfile, end + isize, r)
1361 transaction.add(self.indexfile, end + isize, r)
1363 dfh = None
1362 dfh = None
1364 else:
1363 else:
1365 transaction.add(self.indexfile, isize, r)
1364 transaction.add(self.indexfile, isize, r)
1366 transaction.add(self.datafile, end)
1365 transaction.add(self.datafile, end)
1367 dfh = self.opener(self.datafile, "a")
1366 dfh = self.opener(self.datafile, "a")
1368
1367
1369 try:
1368 try:
1370 # loop through our set of deltas
1369 # loop through our set of deltas
1371 chain = None
1370 chain = None
1372 while True:
1371 while True:
1373 chunkdata = bundle.deltachunk(chain)
1372 chunkdata = bundle.deltachunk(chain)
1374 if not chunkdata:
1373 if not chunkdata:
1375 break
1374 break
1376 node = chunkdata['node']
1375 node = chunkdata['node']
1377 p1 = chunkdata['p1']
1376 p1 = chunkdata['p1']
1378 p2 = chunkdata['p2']
1377 p2 = chunkdata['p2']
1379 cs = chunkdata['cs']
1378 cs = chunkdata['cs']
1380 deltabase = chunkdata['deltabase']
1379 deltabase = chunkdata['deltabase']
1381 delta = chunkdata['delta']
1380 delta = chunkdata['delta']
1382
1381
1383 content.append(node)
1382 content.append(node)
1384
1383
1385 link = linkmapper(cs)
1384 link = linkmapper(cs)
1386 if node in self.nodemap:
1385 if node in self.nodemap:
1387 # this can happen if two branches make the same change
1386 # this can happen if two branches make the same change
1388 chain = node
1387 chain = node
1389 continue
1388 continue
1390
1389
1391 for p in (p1, p2):
1390 for p in (p1, p2):
1392 if p not in self.nodemap:
1391 if p not in self.nodemap:
1393 raise LookupError(p, self.indexfile,
1392 raise LookupError(p, self.indexfile,
1394 _('unknown parent'))
1393 _('unknown parent'))
1395
1394
1396 if deltabase not in self.nodemap:
1395 if deltabase not in self.nodemap:
1397 raise LookupError(deltabase, self.indexfile,
1396 raise LookupError(deltabase, self.indexfile,
1398 _('unknown delta base'))
1397 _('unknown delta base'))
1399
1398
1400 baserev = self.rev(deltabase)
1399 baserev = self.rev(deltabase)
1401 chain = self._addrevision(node, None, transaction, link,
1400 chain = self._addrevision(node, None, transaction, link,
1402 p1, p2, (baserev, delta), ifh, dfh)
1401 p1, p2, REVIDX_DEFAULT_FLAGS,
1402 (baserev, delta), ifh, dfh)
1403 if not dfh and not self._inline:
1403 if not dfh and not self._inline:
1404 # addrevision switched from inline to conventional
1404 # addrevision switched from inline to conventional
1405 # reopen the index
1405 # reopen the index
1406 ifh.close()
1406 ifh.close()
1407 dfh = self.opener(self.datafile, "a")
1407 dfh = self.opener(self.datafile, "a")
1408 ifh = self.opener(self.indexfile, "a")
1408 ifh = self.opener(self.indexfile, "a")
1409 finally:
1409 finally:
1410 if dfh:
1410 if dfh:
1411 dfh.close()
1411 dfh.close()
1412 ifh.close()
1412 ifh.close()
1413
1413
1414 return content
1414 return content
1415
1415
1416 def getstrippoint(self, minlink):
1416 def getstrippoint(self, minlink):
1417 """find the minimum rev that must be stripped to strip the linkrev
1417 """find the minimum rev that must be stripped to strip the linkrev
1418
1418
1419 Returns a tuple containing the minimum rev and a set of all revs that
1419 Returns a tuple containing the minimum rev and a set of all revs that
1420 have linkrevs that will be broken by this strip.
1420 have linkrevs that will be broken by this strip.
1421 """
1421 """
1422 brokenrevs = set()
1422 brokenrevs = set()
1423 strippoint = len(self)
1423 strippoint = len(self)
1424
1424
1425 heads = {}
1425 heads = {}
1426 futurelargelinkrevs = set()
1426 futurelargelinkrevs = set()
1427 for head in self.headrevs():
1427 for head in self.headrevs():
1428 headlinkrev = self.linkrev(head)
1428 headlinkrev = self.linkrev(head)
1429 heads[head] = headlinkrev
1429 heads[head] = headlinkrev
1430 if headlinkrev >= minlink:
1430 if headlinkrev >= minlink:
1431 futurelargelinkrevs.add(headlinkrev)
1431 futurelargelinkrevs.add(headlinkrev)
1432
1432
1433 # This algorithm involves walking down the rev graph, starting at the
1433 # This algorithm involves walking down the rev graph, starting at the
1434 # heads. Since the revs are topologically sorted according to linkrev,
1434 # heads. Since the revs are topologically sorted according to linkrev,
1435 # once all head linkrevs are below the minlink, we know there are
1435 # once all head linkrevs are below the minlink, we know there are
1436 # no more revs that could have a linkrev greater than minlink.
1436 # no more revs that could have a linkrev greater than minlink.
1437 # So we can stop walking.
1437 # So we can stop walking.
1438 while futurelargelinkrevs:
1438 while futurelargelinkrevs:
1439 strippoint -= 1
1439 strippoint -= 1
1440 linkrev = heads.pop(strippoint)
1440 linkrev = heads.pop(strippoint)
1441
1441
1442 if linkrev < minlink:
1442 if linkrev < minlink:
1443 brokenrevs.add(strippoint)
1443 brokenrevs.add(strippoint)
1444 else:
1444 else:
1445 futurelargelinkrevs.remove(linkrev)
1445 futurelargelinkrevs.remove(linkrev)
1446
1446
1447 for p in self.parentrevs(strippoint):
1447 for p in self.parentrevs(strippoint):
1448 if p != nullrev:
1448 if p != nullrev:
1449 plinkrev = self.linkrev(p)
1449 plinkrev = self.linkrev(p)
1450 heads[p] = plinkrev
1450 heads[p] = plinkrev
1451 if plinkrev >= minlink:
1451 if plinkrev >= minlink:
1452 futurelargelinkrevs.add(plinkrev)
1452 futurelargelinkrevs.add(plinkrev)
1453
1453
1454 return strippoint, brokenrevs
1454 return strippoint, brokenrevs
1455
1455
1456 def strip(self, minlink, transaction):
1456 def strip(self, minlink, transaction):
1457 """truncate the revlog on the first revision with a linkrev >= minlink
1457 """truncate the revlog on the first revision with a linkrev >= minlink
1458
1458
1459 This function is called when we're stripping revision minlink and
1459 This function is called when we're stripping revision minlink and
1460 its descendants from the repository.
1460 its descendants from the repository.
1461
1461
1462 We have to remove all revisions with linkrev >= minlink, because
1462 We have to remove all revisions with linkrev >= minlink, because
1463 the equivalent changelog revisions will be renumbered after the
1463 the equivalent changelog revisions will be renumbered after the
1464 strip.
1464 strip.
1465
1465
1466 So we truncate the revlog on the first of these revisions, and
1466 So we truncate the revlog on the first of these revisions, and
1467 trust that the caller has saved the revisions that shouldn't be
1467 trust that the caller has saved the revisions that shouldn't be
1468 removed and that it'll re-add them after this truncation.
1468 removed and that it'll re-add them after this truncation.
1469 """
1469 """
1470 if len(self) == 0:
1470 if len(self) == 0:
1471 return
1471 return
1472
1472
1473 rev, _ = self.getstrippoint(minlink)
1473 rev, _ = self.getstrippoint(minlink)
1474 if rev == len(self):
1474 if rev == len(self):
1475 return
1475 return
1476
1476
1477 # first truncate the files on disk
1477 # first truncate the files on disk
1478 end = self.start(rev)
1478 end = self.start(rev)
1479 if not self._inline:
1479 if not self._inline:
1480 transaction.add(self.datafile, end)
1480 transaction.add(self.datafile, end)
1481 end = rev * self._io.size
1481 end = rev * self._io.size
1482 else:
1482 else:
1483 end += rev * self._io.size
1483 end += rev * self._io.size
1484
1484
1485 transaction.add(self.indexfile, end)
1485 transaction.add(self.indexfile, end)
1486
1486
1487 # then reset internal state in memory to forget those revisions
1487 # then reset internal state in memory to forget those revisions
1488 self._cache = None
1488 self._cache = None
1489 self._chaininfocache = {}
1489 self._chaininfocache = {}
1490 self._chunkclear()
1490 self._chunkclear()
1491 for x in xrange(rev, len(self)):
1491 for x in xrange(rev, len(self)):
1492 del self.nodemap[self.node(x)]
1492 del self.nodemap[self.node(x)]
1493
1493
1494 del self.index[rev:-1]
1494 del self.index[rev:-1]
1495
1495
1496 def checksize(self):
1496 def checksize(self):
1497 expected = 0
1497 expected = 0
1498 if len(self):
1498 if len(self):
1499 expected = max(0, self.end(len(self) - 1))
1499 expected = max(0, self.end(len(self) - 1))
1500
1500
1501 try:
1501 try:
1502 f = self.opener(self.datafile)
1502 f = self.opener(self.datafile)
1503 f.seek(0, 2)
1503 f.seek(0, 2)
1504 actual = f.tell()
1504 actual = f.tell()
1505 f.close()
1505 f.close()
1506 dd = actual - expected
1506 dd = actual - expected
1507 except IOError, inst:
1507 except IOError, inst:
1508 if inst.errno != errno.ENOENT:
1508 if inst.errno != errno.ENOENT:
1509 raise
1509 raise
1510 dd = 0
1510 dd = 0
1511
1511
1512 try:
1512 try:
1513 f = self.opener(self.indexfile)
1513 f = self.opener(self.indexfile)
1514 f.seek(0, 2)
1514 f.seek(0, 2)
1515 actual = f.tell()
1515 actual = f.tell()
1516 f.close()
1516 f.close()
1517 s = self._io.size
1517 s = self._io.size
1518 i = max(0, actual // s)
1518 i = max(0, actual // s)
1519 di = actual - (i * s)
1519 di = actual - (i * s)
1520 if self._inline:
1520 if self._inline:
1521 databytes = 0
1521 databytes = 0
1522 for r in self:
1522 for r in self:
1523 databytes += max(0, self.length(r))
1523 databytes += max(0, self.length(r))
1524 dd = 0
1524 dd = 0
1525 di = actual - len(self) * s - databytes
1525 di = actual - len(self) * s - databytes
1526 except IOError, inst:
1526 except IOError, inst:
1527 if inst.errno != errno.ENOENT:
1527 if inst.errno != errno.ENOENT:
1528 raise
1528 raise
1529 di = 0
1529 di = 0
1530
1530
1531 return (dd, di)
1531 return (dd, di)
1532
1532
1533 def files(self):
1533 def files(self):
1534 res = [self.indexfile]
1534 res = [self.indexfile]
1535 if not self._inline:
1535 if not self._inline:
1536 res.append(self.datafile)
1536 res.append(self.datafile)
1537 return res
1537 return res
General Comments 0
You need to be logged in to leave comments. Login now